diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index f28d73ede..e862470f7 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -42,7 +42,7 @@ jobs: - name: Install dependencies run: | poetry install --with dev - poetry run pip install -U langchain + poetry run pip install -U langchain langchain_anthropic langchain_openai rapidfuzz - name: Run Python integration tests uses: ./.github/actions/python-integration-tests with: diff --git a/.github/workflows/js_test.yml b/.github/workflows/js_test.yml index 172ed9034..1778178cc 100644 --- a/.github/workflows/js_test.yml +++ b/.github/workflows/js_test.yml @@ -81,7 +81,7 @@ jobs: strategy: matrix: os: [ubuntu-latest] - node-version: [18.x, 19.x, 20.x, 21.x, 22.x] + node-version: [18.x, 20.x, "22.4.1"] # See Node.js release schedule at https://nodejs.org/en/about/releases/ include: - os: windows-latest @@ -107,4 +107,4 @@ jobs: - name: Check version run: yarn run check-version - name: Test - run: yarn run test \ No newline at end of file + run: yarn run test diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml index 188d89b08..98020f18e 100644 --- a/.github/workflows/python_test.yml +++ b/.github/workflows/python_test.yml @@ -20,6 +20,8 @@ jobs: matrix: python-version: - "3.8" + - "3.9" + - "3.10" - "3.11" - "3.12" defaults: @@ -42,7 +44,7 @@ jobs: - name: Install dependencies run: | poetry install --with dev,lint - poetry run pip install -U langchain + poetry run pip install -U langchain langchain-core langchain_anthropic langchain_openai - name: Build ${{ matrix.python-version }} run: poetry build - name: Lint ${{ matrix.python-version }} diff --git a/.github/workflows/release_js.yml b/.github/workflows/release_js.yml index 5f2ac7294..4f1aee583 100644 --- a/.github/workflows/release_js.yml +++ b/.github/workflows/release_js.yml @@ -1,6 +1,11 @@ name: JS Release on: + push: + branches: + - main + paths: + - "js/package.json" workflow_dispatch: jobs: @@ -11,33 +16,34 @@ jobs: permissions: contents: write id-token: write + defaults: + run: + working-directory: "js" steps: - uses: actions/checkout@v3 # JS Build - - name: Use Node.js ${{ matrix.node-version }} + - name: Use Node.js 20.x uses: actions/setup-node@v3 with: - node-version: ${{ matrix.node-version }} + node-version: 20.x cache: "yarn" cache-dependency-path: "js/yarn.lock" - name: Install dependencies - run: cd js && yarn install --immutable + run: yarn install --immutable - name: Build - run: cd js && yarn run build + run: yarn run build - name: Check version - run: cd js && yarn run check-version + run: yarn run check-version - name: Check NPM version id: check_npm_version run: | - cd js if yarn run check-npm-version; then - echo "::set-output name=should_publish::true" + echo "should_publish=true" >> $GITHUB_OUTPUT else - echo "::set-output name=should_publish::false" + echo "should_publish=false" >> $GITHUB_OUTPUT fi - name: Publish package to NPM if: steps.check_npm_version.outputs.should_publish == 'true' run: | - cd js echo "//registry.npmjs.org/:_authToken=${{ secrets.NPM_TOKEN }}" > .npmrc yarn publish --non-interactive diff --git a/_scripts/_fetch_schema.py b/_scripts/_fetch_schema.py index 741e12a9c..ba8c171bd 100644 --- a/_scripts/_fetch_schema.py +++ b/_scripts/_fetch_schema.py @@ -1,4 +1,5 @@ """Fetch and prune the Langsmith spec.""" + import argparse from pathlib import Path @@ -19,7 +20,9 @@ def process_schema(sub_schema): get_dependencies(schema, sub_schema["$ref"].split("/")[-1], new_components) else: if "items" in sub_schema and "$ref" in sub_schema["items"]: - get_dependencies(schema, sub_schema["items"]["$ref"].split("/")[-1], new_components) + get_dependencies( + schema, sub_schema["items"]["$ref"].split("/")[-1], new_components + ) for keyword in ["anyOf", "oneOf", "allOf"]: if keyword in sub_schema: for item in sub_schema[keyword]: @@ -38,8 +41,6 @@ def process_schema(sub_schema): process_schema(item) - - def _extract_langsmith_routes_and_properties(schema, operation_ids): new_paths = {} new_components = {"schemas": {}} @@ -98,20 +99,25 @@ def test_openapi_specification(spec: dict): assert errors is None, f"OpenAPI validation failed: {errors}" -def main(out_file: str = "openapi.yaml", url: str = "https://web.smith.langchain.com/openapi.json"): +def main( + out_file: str = "openapi.yaml", + url: str = "https://web.smith.langchain.com/openapi.json", +): langsmith_schema = get_langsmith_runs_schema(url=url) parent_dir = Path(__file__).parent.parent test_openapi_specification(langsmith_schema) with (parent_dir / "openapi" / out_file).open("w") as f: # Sort the schema keys so the openapi version and info come at the top - for key in ['openapi', 'info', 'paths', 'components']: + for key in ["openapi", "info", "paths", "components"]: langsmith_schema[key] = langsmith_schema.pop(key) f.write(yaml.dump(langsmith_schema, sort_keys=False)) if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("--url", type=str, default="https://web.smith.langchain.com/openapi.json") + parser.add_argument( + "--url", type=str, default="https://web.smith.langchain.com/openapi.json" + ) parser.add_argument("--output", type=str, default="openapi.yaml") args = parser.parse_args() main(args.output, url=args.url) diff --git a/js/.eslintrc.cjs b/js/.eslintrc.cjs index a870c9f5a..da4c3ecb4 100644 --- a/js/.eslintrc.cjs +++ b/js/.eslintrc.cjs @@ -14,6 +14,7 @@ module.exports = { ignorePatterns: [ ".eslintrc.cjs", "scripts", + "src/utils/lodash/*", "node_modules", "dist", "dist-cjs", diff --git a/js/.gitignore b/js/.gitignore index bf7ee31ef..e758389d2 100644 --- a/js/.gitignore +++ b/js/.gitignore @@ -47,18 +47,38 @@ Chinook_Sqlite.sql /evaluation.js /evaluation.d.ts /evaluation.d.cts +/evaluation/langchain.cjs +/evaluation/langchain.js +/evaluation/langchain.d.ts +/evaluation/langchain.d.cts /schemas.cjs /schemas.js /schemas.d.ts /schemas.d.cts +/langchain.cjs +/langchain.js +/langchain.d.ts +/langchain.d.cts /wrappers.cjs /wrappers.js /wrappers.d.ts /wrappers.d.cts +/anonymizer.cjs +/anonymizer.js +/anonymizer.d.ts +/anonymizer.d.cts /wrappers/openai.cjs /wrappers/openai.js /wrappers/openai.d.ts /wrappers/openai.d.cts +/wrappers/vercel.cjs +/wrappers/vercel.js +/wrappers/vercel.d.ts +/wrappers/vercel.d.cts +/singletons/traceable.cjs +/singletons/traceable.js +/singletons/traceable.d.ts +/singletons/traceable.d.cts /index.cjs /index.js /index.d.ts diff --git a/js/README.md b/js/README.md index 9eba64647..7aa73a1c9 100644 --- a/js/README.md +++ b/js/README.md @@ -53,6 +53,7 @@ Tracing can be activated by setting the following environment variables or by ma ```typescript process.env["LANGSMITH_TRACING"] = "true"; process.env["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"; +// process.env["LANGCHAIN_ENDPOINT"] = "https://eu.api.smith.langchain.com"; // If signed up in the EU region process.env["LANGCHAIN_API_KEY"] = ""; // process.env["LANGCHAIN_PROJECT"] = "My Project Name"; // Optional: "default" is used if not set ``` diff --git a/js/package.json b/js/package.json index 09191b908..3f230796c 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.23", + "version": "0.1.42", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -21,18 +21,38 @@ "evaluation.js", "evaluation.d.ts", "evaluation.d.cts", + "evaluation/langchain.cjs", + "evaluation/langchain.js", + "evaluation/langchain.d.ts", + "evaluation/langchain.d.cts", "schemas.cjs", "schemas.js", "schemas.d.ts", "schemas.d.cts", + "langchain.cjs", + "langchain.js", + "langchain.d.ts", + "langchain.d.cts", "wrappers.cjs", "wrappers.js", "wrappers.d.ts", "wrappers.d.cts", + "anonymizer.cjs", + "anonymizer.js", + "anonymizer.d.ts", + "anonymizer.d.cts", "wrappers/openai.cjs", "wrappers/openai.js", "wrappers/openai.d.ts", "wrappers/openai.d.cts", + "wrappers/vercel.cjs", + "wrappers/vercel.js", + "wrappers/vercel.d.ts", + "wrappers/vercel.d.cts", + "singletons/traceable.cjs", + "singletons/traceable.js", + "singletons/traceable.d.ts", + "singletons/traceable.d.cts", "index.cjs", "index.js", "index.d.ts", @@ -81,17 +101,22 @@ "commander": "^10.0.1", "p-queue": "^6.6.2", "p-retry": "4", + "semver": "^7.6.3", "uuid": "^9.0.0" }, "devDependencies": { + "@ai-sdk/openai": "^0.0.40", "@babel/preset-env": "^7.22.4", + "@faker-js/faker": "^8.4.1", "@jest/globals": "^29.5.0", - "@langchain/core": "^0.1.32", - "@langchain/langgraph": "^0.0.8", + "@langchain/core": "^0.2.17", + "@langchain/langgraph": "^0.0.29", + "@langchain/openai": "^0.2.5", "@tsconfig/recommended": "^1.0.2", "@types/jest": "^29.5.1", "@typescript-eslint/eslint-plugin": "^5.59.8", "@typescript-eslint/parser": "^5.59.8", + "ai": "^3.2.37", "babel-jest": "^29.5.0", "cross-env": "^7.0.3", "dotenv": "^16.1.3", @@ -101,18 +126,28 @@ "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", + "langchain": "^0.2.10", "openai": "^4.38.5", "prettier": "^2.8.8", "ts-jest": "^29.1.0", "ts-node": "^10.9.1", - "typescript": "^5.4.5" + "typescript": "^5.4.5", + "zod": "^3.23.8" }, "peerDependencies": { + "@langchain/core": "*", + "langchain": "*", "openai": "*" }, "peerDependenciesMeta": { "openai": { "optional": true + }, + "langchain": { + "optional": true + }, + "@langchain/core": { + "optional": true } }, "lint-staged": { @@ -167,6 +202,15 @@ "import": "./evaluation.js", "require": "./evaluation.cjs" }, + "./evaluation/langchain": { + "types": { + "import": "./evaluation/langchain.d.ts", + "require": "./evaluation/langchain.d.cts", + "default": "./evaluation/langchain.d.ts" + }, + "import": "./evaluation/langchain.js", + "require": "./evaluation/langchain.cjs" + }, "./schemas": { "types": { "import": "./schemas.d.ts", @@ -176,6 +220,15 @@ "import": "./schemas.js", "require": "./schemas.cjs" }, + "./langchain": { + "types": { + "import": "./langchain.d.ts", + "require": "./langchain.d.cts", + "default": "./langchain.d.ts" + }, + "import": "./langchain.js", + "require": "./langchain.cjs" + }, "./wrappers": { "types": { "import": "./wrappers.d.ts", @@ -185,6 +238,15 @@ "import": "./wrappers.js", "require": "./wrappers.cjs" }, + "./anonymizer": { + "types": { + "import": "./anonymizer.d.ts", + "require": "./anonymizer.d.cts", + "default": "./anonymizer.d.ts" + }, + "import": "./anonymizer.js", + "require": "./anonymizer.cjs" + }, "./wrappers/openai": { "types": { "import": "./wrappers/openai.d.ts", @@ -194,6 +256,24 @@ "import": "./wrappers/openai.js", "require": "./wrappers/openai.cjs" }, + "./wrappers/vercel": { + "types": { + "import": "./wrappers/vercel.d.ts", + "require": "./wrappers/vercel.d.cts", + "default": "./wrappers/vercel.d.ts" + }, + "import": "./wrappers/vercel.js", + "require": "./wrappers/vercel.cjs" + }, + "./singletons/traceable": { + "types": { + "import": "./singletons/traceable.d.ts", + "require": "./singletons/traceable.d.cts", + "default": "./singletons/traceable.d.ts" + }, + "import": "./singletons/traceable.js", + "require": "./singletons/traceable.cjs" + }, "./package.json": "./package.json" } -} +} \ No newline at end of file diff --git a/js/scripts/create-entrypoints.js b/js/scripts/create-entrypoints.js index 3f6307303..a3487f756 100644 --- a/js/scripts/create-entrypoints.js +++ b/js/scripts/create-entrypoints.js @@ -11,10 +11,16 @@ const entrypoints = { run_trees: "run_trees", traceable: "traceable", evaluation: "evaluation/index", + "evaluation/langchain": "evaluation/langchain", schemas: "schemas", + langchain: "langchain", wrappers: "wrappers/index", + anonymizer: "anonymizer/index", "wrappers/openai": "wrappers/openai", + "wrappers/vercel": "wrappers/vercel", + "singletons/traceable": "singletons/traceable", }; + const updateJsonFile = (relativePath, updateFunction) => { const contents = fs.readFileSync(relativePath).toString(); const res = updateFunction(JSON.parse(contents)); diff --git a/js/src/anonymizer/index.ts b/js/src/anonymizer/index.ts new file mode 100644 index 000000000..dc360a3c4 --- /dev/null +++ b/js/src/anonymizer/index.ts @@ -0,0 +1,129 @@ +import set from "../utils/lodash/set.js"; + +export interface StringNode { + value: string; + path: string; +} + +function extractStringNodes(data: unknown, options: { maxDepth?: number }) { + const parsedOptions = { ...options, maxDepth: options.maxDepth ?? 10 }; + + const queue: [value: unknown, depth: number, path: string][] = [ + [data, 0, ""], + ]; + + const result: StringNode[] = []; + while (queue.length > 0) { + const task = queue.shift(); + if (task == null) continue; + const [value, depth, path] = task; + if (typeof value === "object" && value != null) { + if (depth >= parsedOptions.maxDepth) continue; + for (const [key, nestedValue] of Object.entries(value)) { + queue.push([nestedValue, depth + 1, path ? `${path}.${key}` : key]); + } + } else if (Array.isArray(value)) { + if (depth >= parsedOptions.maxDepth) continue; + for (let i = 0; i < value.length; i++) { + queue.push([value[i], depth + 1, `${path}[${i}]`]); + } + } else if (typeof value === "string") { + result.push({ value, path }); + } + } + + return result; +} + +function deepClone(data: T): T { + return JSON.parse(JSON.stringify(data)); +} + +export interface StringNodeProcessor { + maskNodes: (nodes: StringNode[]) => StringNode[]; +} + +export interface StringNodeRule { + type?: "pattern"; + pattern: RegExp | string; + replace?: string; +} + +export type ReplacerType = + | ((value: string, path?: string) => string) + | StringNodeRule[] + | StringNodeProcessor; + +export function createAnonymizer( + replacer: ReplacerType, + options?: { maxDepth?: number } +) { + return (data: T): T => { + let mutateValue = deepClone(data); + const nodes = extractStringNodes(mutateValue, { + maxDepth: options?.maxDepth, + }); + + const processor: StringNodeProcessor = Array.isArray(replacer) + ? (() => { + const replacers: [regex: RegExp, replace: string][] = replacer.map( + ({ pattern, type, replace }) => { + if (type != null && type !== "pattern") + throw new Error("Invalid anonymizer type"); + return [ + typeof pattern === "string" + ? new RegExp(pattern, "g") + : pattern, + replace ?? "[redacted]", + ]; + } + ); + + if (replacers.length === 0) throw new Error("No replacers provided"); + return { + maskNodes: (nodes: StringNode[]) => { + return nodes.reduce((memo, item) => { + const newValue = replacers.reduce((value, [regex, replace]) => { + const result = value.replace(regex, replace); + + // make sure we reset the state of regex + regex.lastIndex = 0; + + return result; + }, item.value); + + if (newValue !== item.value) { + memo.push({ value: newValue, path: item.path }); + } + + return memo; + }, []); + }, + }; + })() + : typeof replacer === "function" + ? { + maskNodes: (nodes: StringNode[]) => + nodes.reduce((memo, item) => { + const newValue = replacer(item.value, item.path); + if (newValue !== item.value) { + memo.push({ value: newValue, path: item.path }); + } + + return memo; + }, []), + } + : replacer; + + const toUpdate = processor.maskNodes(nodes); + for (const node of toUpdate) { + if (node.path === "") { + mutateValue = node.value as unknown as T; + } else { + set(mutateValue as unknown as object, node.path, node.value); + } + } + + return mutateValue; + }; +} diff --git a/js/src/client.ts b/js/src/client.ts index 05b1a602b..1b7759f1b 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -10,11 +10,19 @@ import { Example, ExampleCreate, ExampleUpdate, + ExampleUpdateWithId, Feedback, FeedbackConfig, FeedbackIngestToken, KVMap, LangChainBaseMessage, + LangSmithSettings, + LikePromptResponse, + ListCommitsResponse, + ListPromptsResponse, + Prompt, + PromptCommit, + PromptSortField, Run, RunCreate, RunUpdate, @@ -29,8 +37,8 @@ import { isLangChainMessage, } from "./utils/messages.js"; import { - getEnvironmentVariable, getLangChainEnvVarsMetadata, + getLangSmithEnvironmentVariable, getRuntimeEnvironment, } from "./utils/env.js"; @@ -41,6 +49,11 @@ import { } from "./evaluation/evaluator.js"; import { __version__ } from "./index.js"; import { assertUuid } from "./utils/_uuid.js"; +import { warnOnce } from "./utils/warn.js"; +import { + isVersionGreaterOrEqual, + parsePromptIdentifier, +} from "./utils/prompts.js"; export interface ClientConfig { apiUrl?: string; @@ -48,8 +61,9 @@ export interface ClientConfig { callerOptions?: AsyncCallerParams; timeout_ms?: number; webUrl?: string; - hideInputs?: boolean; - hideOutputs?: boolean; + anonymizer?: (values: KVMap) => KVMap; + hideInputs?: boolean | ((inputs: KVMap) => KVMap); + hideOutputs?: boolean | ((outputs: KVMap) => KVMap); autoBatchTracing?: boolean; pendingAutoBatchedRunLimit?: number; fetchOptions?: RequestInit; @@ -239,6 +253,7 @@ export type CreateExampleOptions = { exampleId?: string; metadata?: KVMap; + split?: string | string[]; }; type AutoBatchQueueItem = { @@ -271,8 +286,8 @@ async function mergeRuntimeEnvIntoRunCreates(runs: RunCreate[]) { } const getTracingSamplingRate = () => { - const samplingRateStr = getEnvironmentVariable( - "LANGCHAIN_TRACING_SAMPLING_RATE" + const samplingRateStr = getLangSmithEnvironmentVariable( + "TRACING_SAMPLING_RATE" ); if (samplingRateStr === undefined) { return undefined; @@ -280,7 +295,7 @@ const getTracingSamplingRate = () => { const samplingRate = parseFloat(samplingRateStr); if (samplingRate < 0 || samplingRate > 1) { throw new Error( - `LANGCHAIN_TRACING_SAMPLING_RATE must be between 0 and 1 if set. Got: ${samplingRate}` + `LANGSMITH_TRACING_SAMPLING_RATE must be between 0 and 1 if set. Got: ${samplingRate}` ); } return samplingRate; @@ -414,6 +429,8 @@ export class Client { private fetchOptions: RequestInit; + private settings: Promise | null; + constructor(config: ClientConfig = {}) { const defaultConfig = Client.getDefaultClientConfig(); @@ -427,8 +444,12 @@ export class Client { ...(config.callerOptions ?? {}), onFailedResponseHook: handle429, }); - this.hideInputs = config.hideInputs ?? defaultConfig.hideInputs; - this.hideOutputs = config.hideOutputs ?? defaultConfig.hideOutputs; + + this.hideInputs = + config.hideInputs ?? config.anonymizer ?? defaultConfig.hideInputs; + this.hideOutputs = + config.hideOutputs ?? config.anonymizer ?? defaultConfig.hideOutputs; + this.autoBatchTracing = config.autoBatchTracing ?? this.autoBatchTracing; this.pendingAutoBatchedRunLimit = config.pendingAutoBatchedRunLimit ?? this.pendingAutoBatchedRunLimit; @@ -442,14 +463,14 @@ export class Client { hideInputs?: boolean; hideOutputs?: boolean; } { - const apiKey = getEnvironmentVariable("LANGCHAIN_API_KEY"); + const apiKey = getLangSmithEnvironmentVariable("API_KEY"); const apiUrl = - getEnvironmentVariable("LANGCHAIN_ENDPOINT") ?? + getLangSmithEnvironmentVariable("ENDPOINT") ?? "https://api.smith.langchain.com"; const hideInputs = - getEnvironmentVariable("LANGCHAIN_HIDE_INPUTS") === "true"; + getLangSmithEnvironmentVariable("HIDE_INPUTS") === "true"; const hideOutputs = - getEnvironmentVariable("LANGCHAIN_HIDE_OUTPUTS") === "true"; + getLangSmithEnvironmentVariable("HIDE_OUTPUTS") === "true"; return { apiUrl: apiUrl, apiKey: apiKey, @@ -459,12 +480,12 @@ export class Client { }; } - private getHostUrl(): string { + public getHostUrl(): string { if (this.webUrl) { return this.webUrl; } else if (isLocalhost(this.apiUrl)) { - this.webUrl = "http://localhost"; - return "http://localhost"; + this.webUrl = "http://localhost:3000"; + return this.webUrl; } else if ( this.apiUrl.includes("/api") && !this.apiUrl.split(".", 1)[0].endsWith("api") @@ -473,10 +494,13 @@ export class Client { return this.webUrl; } else if (this.apiUrl.split(".", 1)[0].includes("dev")) { this.webUrl = "https://dev.smith.langchain.com"; - return "https://dev.smith.langchain.com"; + return this.webUrl; + } else if (this.apiUrl.split(".", 1)[0].includes("eu")) { + this.webUrl = "https://eu.smith.langchain.com"; + return this.webUrl; } else { this.webUrl = "https://smith.langchain.com"; - return "https://smith.langchain.com"; + return this.webUrl; } } @@ -558,9 +582,10 @@ export class Client { const response = await this._getResponse(path, queryParams); return response.json() as T; } - private async *_getPaginated( + private async *_getPaginated( path: string, - queryParams: URLSearchParams = new URLSearchParams() + queryParams: URLSearchParams = new URLSearchParams(), + transform?: (data: TResponse) => T[] ): AsyncIterable { let offset = Number(queryParams.get("offset")) || 0; const limit = Number(queryParams.get("limit")) || 100; @@ -580,7 +605,10 @@ export class Client { `Failed to fetch ${path}: ${response.status} ${response.statusText}` ); } - const items: T[] = await response.json(); + + const items: T[] = transform + ? transform(await response.json()) + : await response.json(); if (items.length === 0) { break; @@ -735,6 +763,14 @@ export class Client { return true; } + protected async _getSettings() { + if (!this.settings) { + this.settings = this._get("/settings"); + } + + return await this.settings; + } + public async createRun(run: CreateRunParams): Promise { if (!this._filterForSampling([run]).length) { return; @@ -981,7 +1017,7 @@ export class Client { sessionId = projectOpts?.projectId; } else { const project = await this.readProject({ - projectName: getEnvironmentVariable("LANGCHAIN_PROJECT") || "default", + projectName: getLangSmithEnvironmentVariable("PROJECT") || "default", }); sessionId = project.id; } @@ -1196,14 +1232,116 @@ export class Client { is_root: isRoot, }; + let runsYielded = 0; for await (const runs of this._getCursorPaginatedList( "/runs/query", body )) { - yield* runs; + if (limit) { + if (runsYielded >= limit) { + break; + } + if (runs.length + runsYielded > limit) { + const newRuns = runs.slice(0, limit - runsYielded); + yield* newRuns; + break; + } + runsYielded += runs.length; + yield* runs; + } else { + yield* runs; + } } } + public async getRunStats({ + id, + trace, + parentRun, + runType, + projectNames, + projectIds, + referenceExampleIds, + startTime, + endTime, + error, + query, + filter, + traceFilter, + treeFilter, + isRoot, + dataSourceType, + }: { + id?: string[]; + trace?: string; + parentRun?: string; + runType?: string; + projectNames?: string[]; + projectIds?: string[]; + referenceExampleIds?: string[]; + startTime?: string; + endTime?: string; + error?: boolean; + query?: string; + filter?: string; + traceFilter?: string; + treeFilter?: string; + isRoot?: boolean; + dataSourceType?: string; + }): Promise { + let projectIds_ = projectIds || []; + if (projectNames) { + projectIds_ = [ + ...(projectIds || []), + ...(await Promise.all( + projectNames.map((name) => + this.readProject({ projectName: name }).then( + (project) => project.id + ) + ) + )), + ]; + } + + const payload = { + id, + trace, + parent_run: parentRun, + run_type: runType, + session: projectIds_, + reference_example: referenceExampleIds, + start_time: startTime, + end_time: endTime, + error, + query, + filter, + trace_filter: traceFilter, + tree_filter: treeFilter, + is_root: isRoot, + data_source_type: dataSourceType, + }; + + // Remove undefined values from the payload + const filteredPayload = Object.fromEntries( + Object.entries(payload).filter(([_, value]) => value !== undefined) + ); + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/runs/stats`, + { + method: "POST", + headers: this.headers, + body: JSON.stringify(filteredPayload), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + const result = await response.json(); + return result; + } + public async shareRun( runId: string, { shareId }: { shareId?: string } = {} @@ -1568,6 +1706,36 @@ export class Client { return result; } + public async getProjectUrl({ + projectId, + projectName, + }: { + projectId?: string; + projectName?: string; + }) { + if (projectId === undefined && projectName === undefined) { + throw new Error("Must provide either projectName or projectId"); + } + const project = await this.readProject({ projectId, projectName }); + const tenantId = await this._getTenantId(); + return `${this.getHostUrl()}/o/${tenantId}/projects/p/${project.id}`; + } + + public async getDatasetUrl({ + datasetId, + datasetName, + }: { + datasetId?: string; + datasetName?: string; + }) { + if (datasetId === undefined && datasetName === undefined) { + throw new Error("Must provide either datasetName or datasetId"); + } + const dataset = await this.readDataset({ datasetId, datasetName }); + const tenantId = await this._getTenantId(); + return `${this.getHostUrl()}/o/${tenantId}/datasets/${dataset.id}`; + } + private async _getTenantId(): Promise { if (this._tenantId !== null) { return this._tenantId; @@ -1590,6 +1758,7 @@ export class Client { referenceDatasetId, referenceDatasetName, referenceFree, + metadata, }: { projectIds?: string[]; name?: string; @@ -1597,6 +1766,7 @@ export class Client { referenceDatasetId?: string; referenceDatasetName?: string; referenceFree?: boolean; + metadata?: RecordStringAny; } = {}): AsyncIterable { const params = new URLSearchParams(); if (projectIds !== undefined) { @@ -1621,6 +1791,9 @@ export class Client { if (referenceFree !== undefined) { params.append("reference_free", referenceFree.toString()); } + if (metadata !== undefined) { + params.append("metadata", JSON.stringify(metadata)); + } for await (const projects of this._getPaginated( "/sessions", params @@ -1901,6 +2074,45 @@ export class Client { } } + /** + * Update a dataset + * @param props The dataset details to update + * @returns The updated dataset + */ + public async updateDataset(props: { + datasetId?: string; + datasetName?: string; + name?: string; + description?: string; + }): Promise { + const { datasetId, datasetName, ...update } = props; + + if (!datasetId && !datasetName) { + throw new Error("Must provide either datasetName or datasetId"); + } + const _datasetId = + datasetId ?? (await this.readDataset({ datasetName })).id; + assertUuid(_datasetId); + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/datasets/${_datasetId}`, + { + method: "PATCH", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(update), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + if (!response.ok) { + throw new Error( + `Failed to update dataset ${_datasetId}: ${response.status} ${response.statusText}` + ); + } + return (await response.json()) as Dataset; + } + public async deleteDataset({ datasetId, datasetName, @@ -1945,6 +2157,7 @@ export class Client { createdAt, exampleId, metadata, + split, }: CreateExampleOptions ): Promise { let datasetId_ = datasetId; @@ -1965,6 +2178,7 @@ export class Client { created_at: createdAt_?.toISOString(), id: exampleId, metadata, + split, }; const response = await this.caller.call(fetch, `${this.apiUrl}/examples`, { @@ -1989,6 +2203,7 @@ export class Client { inputs: Array; outputs?: Array; metadata?: Array; + splits?: Array>; sourceRunIds?: Array; exampleIds?: Array; datasetId?: string; @@ -2019,6 +2234,7 @@ export class Client { inputs: input, outputs: outputs ? outputs[idx] : undefined, metadata: metadata ? metadata[idx] : undefined, + split: props.splits ? props.splits[idx] : undefined, id: exampleIds ? exampleIds[idx] : undefined, source_run_id: sourceRunIds ? sourceRunIds[idx] : undefined, }; @@ -2086,15 +2302,23 @@ export class Client { datasetName, exampleIds, asOf, + splits, inlineS3Urls, metadata, + limit, + offset, + filter, }: { datasetId?: string; datasetName?: string; exampleIds?: string[]; asOf?: string | Date; + splits?: string[]; inlineS3Urls?: boolean; metadata?: KVMap; + limit?: number; + offset?: number; + filter?: string; } = {}): AsyncIterable { let datasetId_; if (datasetId !== undefined && datasetName !== undefined) { @@ -2123,15 +2347,36 @@ export class Client { params.append("id", id_); } } + if (splits !== undefined) { + for (const split of splits) { + params.append("splits", split); + } + } if (metadata !== undefined) { const serializedMetadata = JSON.stringify(metadata); params.append("metadata", serializedMetadata); } + if (limit !== undefined) { + params.append("limit", limit.toString()); + } + if (offset !== undefined) { + params.append("offset", offset.toString()); + } + if (filter !== undefined) { + params.append("filter", filter); + } + let i = 0; for await (const examples of this._getPaginated( "/examples", params )) { - yield* examples; + for (const example of examples) { + yield example; + i++; + } + if (limit !== undefined && i >= limit) { + break; + } } } @@ -2177,6 +2422,121 @@ export class Client { return result; } + public async updateExamples(update: ExampleUpdateWithId[]): Promise { + const response = await this.caller.call( + fetch, + `${this.apiUrl}/examples/bulk`, + { + method: "PATCH", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(update), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + if (!response.ok) { + throw new Error( + `Failed to update examples: ${response.status} ${response.statusText}` + ); + } + const result = await response.json(); + return result; + } + + public async listDatasetSplits({ + datasetId, + datasetName, + asOf, + }: { + datasetId?: string; + datasetName?: string; + asOf?: string | Date; + }): Promise { + let datasetId_: string; + if (datasetId === undefined && datasetName === undefined) { + throw new Error("Must provide dataset name or ID"); + } else if (datasetId !== undefined && datasetName !== undefined) { + throw new Error("Must provide either datasetName or datasetId, not both"); + } else if (datasetId === undefined) { + const dataset = await this.readDataset({ datasetName }); + datasetId_ = dataset.id; + } else { + datasetId_ = datasetId; + } + + assertUuid(datasetId_); + + const params = new URLSearchParams(); + const dataset_version = asOf + ? typeof asOf === "string" + ? asOf + : asOf?.toISOString() + : undefined; + if (dataset_version) { + params.append("as_of", dataset_version); + } + + const response = await this._get( + `/datasets/${datasetId_}/splits`, + params + ); + return response; + } + + public async updateDatasetSplits({ + datasetId, + datasetName, + splitName, + exampleIds, + remove = false, + }: { + datasetId?: string; + datasetName?: string; + splitName: string; + exampleIds: string[]; + remove?: boolean; + }): Promise { + let datasetId_: string; + if (datasetId === undefined && datasetName === undefined) { + throw new Error("Must provide dataset name or ID"); + } else if (datasetId !== undefined && datasetName !== undefined) { + throw new Error("Must provide either datasetName or datasetId, not both"); + } else if (datasetId === undefined) { + const dataset = await this.readDataset({ datasetName }); + datasetId_ = dataset.id; + } else { + datasetId_ = datasetId; + } + + assertUuid(datasetId_); + + const data = { + split_name: splitName, + examples: exampleIds.map((id) => { + assertUuid(id); + return id; + }), + remove, + }; + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/datasets/${datasetId_}/splits`, + { + method: "PUT", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(data), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + await raiseForStatus(response, "update dataset splits"); + } + + /** + * @deprecated This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead. + */ public async evaluateRun( run: Run | string, evaluator: RunEvaluator, @@ -2190,6 +2550,9 @@ export class Client { referenceExample?: Example; } = { loadChildRuns: false } ): Promise { + warnOnce( + "This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead." + ); let run_: Run; if (typeof run === "string") { run_ = await this.readRun(run, { loadChildRuns }); @@ -2204,21 +2567,15 @@ export class Client { ) { referenceExample = await this.readExample(run_.reference_example_id); } + const feedbackResult = await evaluator.evaluateRun(run_, referenceExample); - let sourceInfo_ = sourceInfo ?? {}; - if (feedbackResult.evaluatorInfo) { - sourceInfo_ = { ...sourceInfo_, ...feedbackResult.evaluatorInfo }; - } - const runId = feedbackResult.targetRunId ?? run_.id; - return await this.createFeedback(runId, feedbackResult.key, { - score: feedbackResult?.score, - value: feedbackResult?.value, - comment: feedbackResult?.comment, - correction: feedbackResult?.correction, - sourceInfo: sourceInfo_, - feedbackSourceType: "model", - sourceRunId: feedbackResult?.sourceRunId, - }); + const [_, feedbacks] = await this._logEvaluationFeedback( + feedbackResult, + run_, + sourceInfo + ); + + return feedbacks[0]; } public async createFeedback( @@ -2543,14 +2900,17 @@ export class Client { return results_; } - public async logEvaluationFeedback( + async _logEvaluationFeedback( evaluatorResponse: EvaluationResult | EvaluationResults, run?: Run, sourceInfo?: { [key: string]: any } - ): Promise { - const results: Array = + ): Promise<[results: EvaluationResult[], feedbacks: Feedback[]]> { + const evalResults: Array = this._selectEvalResults(evaluatorResponse); - for (const res of results) { + + const feedbacks: Feedback[] = []; + + for (const res of evalResults) { let sourceInfo_ = sourceInfo || {}; if (res.evaluatorInfo) { sourceInfo_ = { ...res.evaluatorInfo, ...sourceInfo_ }; @@ -2562,17 +2922,531 @@ export class Client { runId_ = run.id; } - await this.createFeedback(runId_, res.key, { - score: res.score, - value: res.value, - comment: res.comment, - correction: res.correction, - sourceInfo: sourceInfo_, - sourceRunId: res.sourceRunId, - feedbackConfig: res.feedbackConfig as FeedbackConfig | undefined, - feedbackSourceType: "model", - }); + feedbacks.push( + await this.createFeedback(runId_, res.key, { + score: res.score, + value: res.value, + comment: res.comment, + correction: res.correction, + sourceInfo: sourceInfo_, + sourceRunId: res.sourceRunId, + feedbackConfig: res.feedbackConfig as FeedbackConfig | undefined, + feedbackSourceType: "model", + }) + ); } + + return [evalResults, feedbacks]; + } + + public async logEvaluationFeedback( + evaluatorResponse: EvaluationResult | EvaluationResults, + run?: Run, + sourceInfo?: { [key: string]: any } + ): Promise { + const [results] = await this._logEvaluationFeedback( + evaluatorResponse, + run, + sourceInfo + ); return results; } + + protected async _currentTenantIsOwner(owner: string): Promise { + const settings = await this._getSettings(); + return owner == "-" || settings.tenant_handle === owner; + } + + protected async _ownerConflictError( + action: string, + owner: string + ): Promise { + const settings = await this._getSettings(); + return new Error( + `Cannot ${action} for another tenant.\n + Current tenant: ${settings.tenant_handle}\n + Requested tenant: ${owner}` + ); + } + + protected async _getLatestCommitHash( + promptOwnerAndName: string + ): Promise { + const res = await this.caller.call( + fetch, + `${this.apiUrl}/commits/${promptOwnerAndName}/?limit=${1}&offset=${0}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + const json = await res.json(); + if (!res.ok) { + const detail = + typeof json.detail === "string" + ? json.detail + : JSON.stringify(json.detail); + const error = new Error( + `Error ${res.status}: ${res.statusText}\n${detail}` + ); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (error as any).statusCode = res.status; + throw error; + } + + if (json.commits.length === 0) { + return undefined; + } + + return json.commits[0].commit_hash; + } + + protected async _likeOrUnlikePrompt( + promptIdentifier: string, + like: boolean + ): Promise { + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + const response = await this.caller.call( + fetch, + `${this.apiUrl}/likes/${owner}/${promptName}`, + { + method: "POST", + body: JSON.stringify({ like: like }), + headers: { ...this.headers, "Content-Type": "application/json" }, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (!response.ok) { + throw new Error( + `Failed to ${like ? "like" : "unlike"} prompt: ${ + response.status + } ${await response.text()}` + ); + } + + return await response.json(); + } + + protected async _getPromptUrl(promptIdentifier: string): Promise { + const [owner, promptName, commitHash] = + parsePromptIdentifier(promptIdentifier); + if (!(await this._currentTenantIsOwner(owner))) { + if (commitHash !== "latest") { + return `${this.getHostUrl()}/hub/${owner}/${promptName}/${commitHash.substring( + 0, + 8 + )}`; + } else { + return `${this.getHostUrl()}/hub/${owner}/${promptName}`; + } + } else { + const settings = await this._getSettings(); + if (commitHash !== "latest") { + return `${this.getHostUrl()}/prompts/${promptName}/${commitHash.substring( + 0, + 8 + )}?organizationId=${settings.id}`; + } else { + return `${this.getHostUrl()}/prompts/${promptName}?organizationId=${ + settings.id + }`; + } + } + } + + public async promptExists(promptIdentifier: string): Promise { + const prompt = await this.getPrompt(promptIdentifier); + return !!prompt; + } + + public async likePrompt( + promptIdentifier: string + ): Promise { + return this._likeOrUnlikePrompt(promptIdentifier, true); + } + + public async unlikePrompt( + promptIdentifier: string + ): Promise { + return this._likeOrUnlikePrompt(promptIdentifier, false); + } + + public async *listCommits( + promptOwnerAndName: string + ): AsyncIterableIterator { + for await (const commits of this._getPaginated< + PromptCommit, + ListCommitsResponse + >( + `/commits/${promptOwnerAndName}/`, + {} as URLSearchParams, + (res) => res.commits + )) { + yield* commits; + } + } + + public async *listPrompts(options?: { + isPublic?: boolean; + isArchived?: boolean; + sortField?: PromptSortField; + query?: string; + }): AsyncIterableIterator { + const params = new URLSearchParams(); + params.append("sort_field", options?.sortField ?? "updated_at"); + params.append("sort_direction", "desc"); + params.append("is_archived", (!!options?.isArchived).toString()); + + if (options?.isPublic !== undefined) { + params.append("is_public", options.isPublic.toString()); + } + + if (options?.query) { + params.append("query", options.query); + } + + for await (const prompts of this._getPaginated( + "/repos", + params, + (res) => res.repos + )) { + yield* prompts; + } + } + + public async getPrompt(promptIdentifier: string): Promise { + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + const response = await this.caller.call( + fetch, + `${this.apiUrl}/repos/${owner}/${promptName}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (response.status === 404) { + return null; + } + + if (!response.ok) { + throw new Error( + `Failed to get prompt: ${response.status} ${await response.text()}` + ); + } + + const result = await response.json(); + if (result.repo) { + return result.repo as Prompt; + } else { + return null; + } + } + + public async createPrompt( + promptIdentifier: string, + options?: { + description?: string; + readme?: string; + tags?: string[]; + isPublic?: boolean; + } + ): Promise { + const settings = await this._getSettings(); + if (options?.isPublic && !settings.tenant_handle) { + throw new Error( + `Cannot create a public prompt without first\n + creating a LangChain Hub handle. + You can add a handle by creating a public prompt at:\n + https://smith.langchain.com/prompts` + ); + } + + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + if (!(await this._currentTenantIsOwner(owner))) { + throw await this._ownerConflictError("create a prompt", owner); + } + + const data = { + repo_handle: promptName, + ...(options?.description && { description: options.description }), + ...(options?.readme && { readme: options.readme }), + ...(options?.tags && { tags: options.tags }), + is_public: !!options?.isPublic, + }; + + const response = await this.caller.call(fetch, `${this.apiUrl}/repos/`, { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(data), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + }); + + if (!response.ok) { + throw new Error( + `Failed to create prompt: ${response.status} ${await response.text()}` + ); + } + + const { repo } = await response.json(); + return repo as Prompt; + } + + public async createCommit( + promptIdentifier: string, + object: any, + options?: { + parentCommitHash?: string; + } + ): Promise { + if (!(await this.promptExists(promptIdentifier))) { + throw new Error("Prompt does not exist, you must create it first."); + } + + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + const resolvedParentCommitHash = + options?.parentCommitHash === "latest" || !options?.parentCommitHash + ? await this._getLatestCommitHash(`${owner}/${promptName}`) + : options?.parentCommitHash; + + const payload = { + manifest: JSON.parse(JSON.stringify(object)), + parent_commit: resolvedParentCommitHash, + }; + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/commits/${owner}/${promptName}`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(payload), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (!response.ok) { + throw new Error( + `Failed to create commit: ${response.status} ${await response.text()}` + ); + } + + const result = await response.json(); + return this._getPromptUrl( + `${owner}/${promptName}${ + result.commit_hash ? `:${result.commit_hash}` : "" + }` + ); + } + + public async updatePrompt( + promptIdentifier: string, + options?: { + description?: string; + readme?: string; + tags?: string[]; + isPublic?: boolean; + isArchived?: boolean; + } + ): Promise> { + if (!(await this.promptExists(promptIdentifier))) { + throw new Error("Prompt does not exist, you must create it first."); + } + + const [owner, promptName] = parsePromptIdentifier(promptIdentifier); + + if (!(await this._currentTenantIsOwner(owner))) { + throw await this._ownerConflictError("update a prompt", owner); + } + + const payload: Record = {}; + + if (options?.description !== undefined) + payload.description = options.description; + if (options?.readme !== undefined) payload.readme = options.readme; + if (options?.tags !== undefined) payload.tags = options.tags; + if (options?.isPublic !== undefined) payload.is_public = options.isPublic; + if (options?.isArchived !== undefined) + payload.is_archived = options.isArchived; + + // Check if payload is empty + if (Object.keys(payload).length === 0) { + throw new Error("No valid update options provided"); + } + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/repos/${owner}/${promptName}`, + { + method: "PATCH", + body: JSON.stringify(payload), + headers: { + ...this.headers, + "Content-Type": "application/json", + }, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (!response.ok) { + throw new Error( + `HTTP Error: ${response.status} - ${await response.text()}` + ); + } + + return response.json(); + } + + public async deletePrompt(promptIdentifier: string): Promise { + if (!(await this.promptExists(promptIdentifier))) { + throw new Error("Prompt does not exist, you must create it first."); + } + + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + + if (!(await this._currentTenantIsOwner(owner))) { + throw await this._ownerConflictError("delete a prompt", owner); + } + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/repos/${owner}/${promptName}`, + { + method: "DELETE", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + return await response.json(); + } + + public async pullPromptCommit( + promptIdentifier: string, + options?: { + includeModel?: boolean; + } + ): Promise { + const [owner, promptName, commitHash] = + parsePromptIdentifier(promptIdentifier); + const serverInfo = await this._getServerInfo(); + const useOptimization = isVersionGreaterOrEqual( + serverInfo.version, + "0.5.23" + ); + + let passedCommitHash = commitHash; + + if (!useOptimization && commitHash === "latest") { + const latestCommitHash = await this._getLatestCommitHash( + `${owner}/${promptName}` + ); + if (!latestCommitHash) { + throw new Error("No commits found"); + } else { + passedCommitHash = latestCommitHash; + } + } + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/commits/${owner}/${promptName}/${passedCommitHash}${ + options?.includeModel ? "?include_model=true" : "" + }`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (!response.ok) { + throw new Error( + `Failed to pull prompt commit: ${response.status} ${response.statusText}` + ); + } + + const result = await response.json(); + + return { + owner, + repo: promptName, + commit_hash: result.commit_hash, + manifest: result.manifest, + examples: result.examples, + }; + } + + /** + * + * This method should not be used directly, use `import { pull } from "langchain/hub"` instead. + * Using this method directly returns the JSON string of the prompt rather than a LangChain object. + * @private + * + */ + public async _pullPrompt( + promptIdentifier: string, + options?: { + includeModel?: boolean; + } + ): Promise { + const promptObject = await this.pullPromptCommit(promptIdentifier, { + includeModel: options?.includeModel, + }); + const prompt = JSON.stringify(promptObject.manifest); + return prompt; + } + + public async pushPrompt( + promptIdentifier: string, + options?: { + object?: any; + parentCommitHash?: string; + isPublic?: boolean; + description?: string; + readme?: string; + tags?: string[]; + } + ): Promise { + // Create or update prompt metadata + if (await this.promptExists(promptIdentifier)) { + if (options && Object.keys(options).some((key) => key !== "object")) { + await this.updatePrompt(promptIdentifier, { + description: options?.description, + readme: options?.readme, + tags: options?.tags, + isPublic: options?.isPublic, + }); + } + } else { + await this.createPrompt(promptIdentifier, { + description: options?.description, + readme: options?.readme, + tags: options?.tags, + isPublic: options?.isPublic, + }); + } + + if (!options?.object) { + return await this._getPromptUrl(promptIdentifier); + } + + // Create a commit with the new manifest + const url = await this.createCommit(promptIdentifier, options?.object, { + parentCommitHash: options?.parentCommitHash, + }); + return url; + } } diff --git a/js/src/env.ts b/js/src/env.ts new file mode 100644 index 000000000..9d04037a5 --- /dev/null +++ b/js/src/env.ts @@ -0,0 +1,11 @@ +import { getLangSmithEnvironmentVariable } from "./utils/env.js"; + +export const isTracingEnabled = (tracingEnabled?: boolean): boolean => { + if (tracingEnabled !== undefined) { + return tracingEnabled; + } + const envVars = ["TRACING_V2", "TRACING"]; + return !!envVars.find( + (envVar) => getLangSmithEnvironmentVariable(envVar) === "true" + ); +}; diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index 9bc850c5e..acdb0db9b 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -1,4 +1,5 @@ import { Client, RunTree, RunTreeConfig } from "../index.js"; +import { getLangchainCallbacks } from "../langchain.js"; import { BaseRun, Example, KVMap, Run, TracerSession } from "../schemas.js"; import { traceable } from "../traceable.js"; import { getDefaultRevisionId, getGitInfo } from "../utils/_git.js"; @@ -6,6 +7,7 @@ import { assertUuid } from "../utils/_uuid.js"; import { AsyncCaller } from "../utils/async_caller.js"; import { atee } from "../utils/atee.js"; import { getLangChainEnvVarsMetadata } from "../utils/env.js"; +import { printErrorStackTrace } from "../utils/error.js"; import { randomName } from "./_random_name.js"; import { EvaluationResult, @@ -22,11 +24,6 @@ type TargetT = | { invoke: (input: TInput, config?: KVMap) => TOutput } | { invoke: (input: TInput, config?: KVMap) => Promise }; -// eslint-disable-next-line @typescript-eslint/no-explicit-any -type TargetNoInvoke = - | ((input: TInput, config?: KVMap) => Promise) - | ((input: TInput, config?: KVMap) => TOutput); - // Data format: dataset-name, dataset_id, or examples type DataT = string | AsyncIterable | Example[]; @@ -45,8 +42,11 @@ type SummaryEvaluatorT = // Row-level evaluator type EvaluatorT = | RunEvaluator - | ((run: Run, example?: Example) => EvaluationResult) - | ((run: Run, example?: Example) => Promise); + | ((run: Run, example?: Example) => EvaluationResult | EvaluationResults) + | (( + run: Run, + example?: Example + ) => Promise); interface _ForwardResults { run: Run; @@ -66,6 +66,7 @@ interface _ExperimentManagerArgs { unknown >; examples?: Example[]; + numRepetitions?: number; _runsArray?: Run[]; } @@ -109,6 +110,12 @@ export interface EvaluateOptions { * @default undefined */ client?: Client; + /** + * The number of repetitions to perform. Each example + * will be run this many times. + * @default 1 + */ + numRepetitions?: number; } export function evaluate( @@ -148,6 +155,8 @@ class _ExperimentManager { _examples?: Example[]; + _numRepetitions?: number; + _runsArray?: Run[]; client: Client; @@ -182,7 +191,15 @@ class _ExperimentManager { for await (const example of unresolvedData) { exs.push(example); } - this.setExamples(exs); + if (this._numRepetitions && this._numRepetitions > 0) { + const repeatedExamples = []; + for (let i = 0; i < this._numRepetitions; i++) { + repeatedExamples.push(...exs); + } + this.setExamples(repeatedExamples); + } else { + this.setExamples(exs); + } } return this._examples; } @@ -263,6 +280,7 @@ class _ExperimentManager { this._evaluationResults = args.evaluationResults; this._summaryResults = args.summaryResults; + this._numRepetitions = args.numRepetitions; } _getExperiment(): TracerSession { @@ -305,6 +323,7 @@ class _ExperimentManager { metadata: projectMetadata, description: this._description, }); + this._experiment = project; } catch (e) { if (String(e).includes("already exists")) { throw e; @@ -319,16 +338,25 @@ class _ExperimentManager { return project; } - _printExperimentStart(): void { - // @TODO log with experiment URL + protected async _printExperimentStart(): Promise { console.log(`Starting evaluation of experiment: ${this.experimentName}`); + + const firstExample = this._examples?.[0]; + const datasetId = firstExample?.dataset_id; + if (!datasetId || !this._experiment) return; + + const datasetUrl = await this.client.getDatasetUrl({ datasetId }); + const compareUrl = `${datasetUrl}/compare?selectedSessions=${this._experiment.id}`; + + console.log(`View results at ${compareUrl}`); } async start(): Promise<_ExperimentManager> { const examples = await this.getExamples(); const firstExample = examples[0]; const project = await this._getProject(firstExample); - this._printExperimentStart(); + await this._printExperimentStart(); + this._metadata["num_repetitions"] = this._numRepetitions; return new _ExperimentManager({ examples, experiment: project, @@ -340,7 +368,7 @@ class _ExperimentManager { } async withPredictions( - target: TargetNoInvoke, + target: TargetT, options?: { maxConcurrency?: number; } @@ -453,13 +481,13 @@ class _ExperimentManager { // Private methods /** - * Run the target function on the examples. - * @param {TargetNoInvoke} target The target function to evaluate. + * Run the target function or runnable on the examples. + * @param {TargetT} target The target function or runnable to evaluate. * @param options * @returns {AsyncGenerator<_ForwardResults>} An async generator of the results. */ async *_predict( - target: TargetNoInvoke, + target: TargetT, options?: { maxConcurrency?: number; } @@ -510,7 +538,6 @@ class _ExperimentManager { evaluators: Array, currentResults: ExperimentResultRow, fields: { - experimentName: string; client: Client; } ): Promise { @@ -519,7 +546,7 @@ class _ExperimentManager { try { const options = { reference_example_id: example.id, - project_name: fields.experimentName, + project_name: "evaluators", metadata: { example_version: example.modified_at ? new Date(example.modified_at).toISOString() @@ -539,6 +566,7 @@ class _ExperimentManager { console.error( `Error running evaluator ${evaluator.evaluateRun.name} on run ${run.id}: ${e}` ); + printErrorStackTrace(e); } } @@ -567,7 +595,6 @@ class _ExperimentManager { if (maxConcurrency === 0) { for await (const currentResults of this.getResults()) { yield this._runEvaluators(evaluators, currentResults, { - experimentName: this.experimentName, client: this.client, }); } @@ -579,7 +606,6 @@ class _ExperimentManager { for await (const currentResults of this.getResults()) { futures.push( caller.call(this._runEvaluators, evaluators, currentResults, { - experimentName: this.experimentName, client: this.client, }) ); @@ -638,6 +664,7 @@ class _ExperimentManager { evaluator.name }: ${JSON.stringify(e, null, 2)}` ); + printErrorStackTrace(e); } } @@ -686,6 +713,21 @@ class _ExperimentManager { ).date; } + async _getDatasetSplits(): Promise { + const examples = await this.getExamples(); + const allSplits = examples.reduce((acc, ex) => { + if (ex.metadata && ex.metadata.dataset_split) { + if (Array.isArray(ex.metadata.dataset_split)) { + ex.metadata.dataset_split.forEach((split) => acc.add(split)); + } else if (typeof ex.metadata.dataset_split === "string") { + acc.add(ex.metadata.dataset_split); + } + } + return acc; + }, new Set()); + return allSplits.size ? Array.from(allSplits) : undefined; + } + async _end(): Promise { const experiment = this._experiment; if (!experiment) { @@ -693,6 +735,7 @@ class _ExperimentManager { } const projectMetadata = await this._getExperimentMetadata(); projectMetadata["dataset_version"] = await this._getDatasetVersion(); + projectMetadata["dataset_splits"] = await this._getDatasetSplits(); // Update revision_id if not already set if (!projectMetadata["revision_id"]) { projectMetadata["revision_id"] = await getDefaultRevisionId(); @@ -752,13 +795,6 @@ class ExperimentResults implements AsyncIterableIterator { } } -function convertInvokeToTopLevel(fn: TargetT): TargetNoInvoke { - if ("invoke" in fn) { - return fn.invoke.bind(fn); - } - return fn; -} - async function _evaluate( target: TargetT | AsyncGenerator, fields: EvaluateOptions & { experiment?: TracerSession } @@ -778,13 +814,13 @@ async function _evaluate( metadata: fields.metadata, experiment: experiment_ ?? fields.experimentPrefix, runs: newRuns ?? undefined, + numRepetitions: fields.numRepetitions ?? 1, }).start(); if (_isCallable(target)) { - manager = await manager.withPredictions( - convertInvokeToTopLevel(target as TargetT), - { maxConcurrency: fields.maxConcurrency } - ); + manager = await manager.withPredictions(target, { + maxConcurrency: fields.maxConcurrency, + }); } if (fields.evaluators) { @@ -801,10 +837,8 @@ async function _evaluate( return results; } -type ForwardFn = ((...args: any[]) => Promise) | ((...args: any[]) => any); - async function _forward( - fn: ForwardFn, + fn: TargetT, example: Example, experimentName: string, metadata: KVMap, @@ -827,23 +861,28 @@ async function _forward( : new Date(example.created_at).toISOString(), }, client, + tracingEnabled: true, }; - const wrappedFn = traceable(fn, { - ...options, - tracingEnabled: true, - }) as ReturnType; + const wrappedFn = + "invoke" in fn + ? traceable(async (inputs) => { + const callbacks = await getLangchainCallbacks(); + return fn.invoke(inputs, { callbacks }); + }, options) + : traceable(fn, options); try { await wrappedFn(example.inputs); } catch (e) { console.error(`Error running target function: ${e}`); + printErrorStackTrace(e); } if (!run) { throw new Error(`Run not created by target function. This is most likely due to tracing not being enabled.\n -Try setting "LANGCHAIN_TRACING_V2=true" in your environment.`); +Try setting "LANGSMITH_TRACING=true" in your environment.`); } return { @@ -976,7 +1015,7 @@ async function _resolveExperiment( return [undefined, undefined]; } -function _isCallable(target: TargetT | AsyncGenerator): boolean { +function _isCallable(target: TargetT | AsyncGenerator): target is TargetT { return Boolean( typeof target === "function" || ("invoke" in target && typeof target.invoke === "function") diff --git a/js/src/evaluation/evaluate_comparative.ts b/js/src/evaluation/evaluate_comparative.ts index bb58f582f..5a67ee9f5 100644 --- a/js/src/evaluation/evaluate_comparative.ts +++ b/js/src/evaluation/evaluate_comparative.ts @@ -1,13 +1,33 @@ import { v4 as uuid4, validate } from "uuid"; import { Client } from "../index.js"; -import { ComparisonEvaluationResult, Example, Run } from "../schemas.js"; +import { + ComparisonEvaluationResult as ComparisonEvaluationResultRow, + Example, + Run, +} from "../schemas.js"; import { shuffle } from "../utils/shuffle.js"; +import { AsyncCaller } from "../utils/async_caller.js"; +import { evaluate } from "./index.js"; +import pRetry from "p-retry"; +import { getCurrentRunTree, traceable } from "../traceable.js"; + +type ExperimentResults = Awaited>; + +function isExperimentResultsList( + value: ExperimentResults[] | string[] +): value is ExperimentResults[] { + return value.some((x) => typeof x !== "string"); +} + +async function loadExperiment( + client: Client, + experiment: string | ExperimentResults +) { + const value = + typeof experiment === "string" ? experiment : experiment.experimentName; -function loadExperiment(client: Client, experiment: string) { return client.readProject( - validate(experiment) - ? { projectId: experiment } - : { projectName: experiment } + validate(value) ? { projectId: value } : { projectName: value } ); } @@ -57,7 +77,7 @@ export interface EvaluateComparativeOptions { ( runs: Run[], example: Example - ) => ComparisonEvaluationResult | Promise + ) => ComparisonEvaluationResultRow | Promise >; /** * Randomize the order of outputs for each evaluation @@ -97,11 +117,14 @@ export interface EvaluateComparativeOptions { } export interface ComparisonEvaluationResults { - results: ComparisonEvaluationResult[]; + experimentName: string; + results: ComparisonEvaluationResultRow[]; } export async function evaluateComparative( - experiments: Array, + experiments: + | Array + | Array | ExperimentResults>, options: EvaluateComparativeOptions ): Promise { if (experiments.length < 2) { @@ -119,10 +142,34 @@ export async function evaluateComparative( } const client = options.client ?? new Client(); + const resolvedExperiments = await Promise.all(experiments); + + const projects = await (() => { + if (!isExperimentResultsList(resolvedExperiments)) { + return Promise.all( + resolvedExperiments.map((experiment) => + loadExperiment(client, experiment) + ) + ); + } - const projects = await Promise.all( - experiments.map((experiment) => loadExperiment(client, experiment)) - ); + // if we know the number of runs beforehand, check if the + // number of runs in the project matches the expected number of runs + return Promise.all( + resolvedExperiments.map((experiment) => + pRetry( + async () => { + const project = await loadExperiment(client, experiment); + if (project.run_count !== experiment?.results.length) { + throw new Error("Experiment is missing runs. Retrying."); + } + return project; + }, + { factor: 2, minTimeout: 1000, retries: 10 } + ) + ) + ); + })(); if (new Set(projects.map((p) => p.reference_dataset_id)).size > 1) { throw new Error("All experiments must have the same reference dataset."); @@ -146,7 +193,7 @@ export async function evaluateComparative( const datasetVersion = projects.at(0)?.extra?.metadata?.dataset_version; const id = uuid4(); - const name = (() => { + const experimentName = (() => { if (!options.experimentPrefix) { const names = projects .map((p) => p.name) @@ -159,17 +206,46 @@ export async function evaluateComparative( })(); // TODO: add URL to the comparative experiment - console.log(`Starting pairwise evaluation of: ${name}`); + console.log(`Starting pairwise evaluation of: ${experimentName}`); const comparativeExperiment = await client.createComparativeExperiment({ id, - name, + name: experimentName, experimentIds: projects.map((p) => p.id), description: options.description, metadata: options.metadata, referenceDatasetId: projects.at(0)?.reference_dataset_id, }); + const viewUrl = await (async () => { + const projectId = projects.at(0)?.id ?? projects.at(1)?.id; + const datasetId = comparativeExperiment?.reference_dataset_id; + + if (projectId && datasetId) { + const hostUrl = (await client.getProjectUrl({ projectId })) + .split("/projects/p/") + .at(0); + + const result = new URL(`${hostUrl}/datasets/${datasetId}/compare`); + result.searchParams.set( + "selectedSessions", + projects.map((p) => p.id).join(",") + ); + + result.searchParams.set( + "comparativeExperiment", + comparativeExperiment.id + ); + return result.toString(); + } + + return null; + })(); + + if (viewUrl != null) { + console.log(`View results at: ${viewUrl}`); + } + const experimentRuns = await Promise.all( projects.map((p) => loadTraces(client, p.id, { loadNested: !!options.loadNested }) @@ -225,41 +301,79 @@ export async function evaluateComparative( } } - const results: ComparisonEvaluationResult[] = []; + const caller = new AsyncCaller({ maxConcurrency: options.maxConcurrency }); - // TODO: handle maxConcurrency - for (const [exampleId, runs] of Object.entries(runMapByExampleId)) { - const example = exampleMap[exampleId]; - if (!example) throw new Error(`Example ${exampleId} not found.`); + async function evaluateAndSubmitFeedback( + runs: Run[], + example: Example, + evaluator: ( + runs: Run[], + example: Example + ) => ComparisonEvaluationResultRow | Promise + ) { + const expectedRunIds = new Set(runs.map((r) => r.id)); + const result = await evaluator( + options.randomizeOrder ? shuffle(runs) : runs, + example + ); - for (const evaluator of options.evaluators) { - const expectedRunIds = new Set(runs.map((r) => r.id)); + for (const [runId, score] of Object.entries(result.scores)) { + // validate if the run id + if (!expectedRunIds.has(runId)) { + throw new Error(`Returning an invalid run id ${runId} from evaluator.`); + } - if (options.randomizeOrder) { - runs.sort(() => Math.random() - 0.5); + await client.createFeedback(runId, result.key, { + score, + sourceRunId: result.source_run_id, + comparativeExperimentId: comparativeExperiment.id, + }); + } + + return result; + } + + const tracedEvaluators = options.evaluators.map((evaluator) => + traceable( + async ( + runs: Run[], + example: Example + ): Promise => { + const evaluatorRun = getCurrentRunTree(); + const result = await evaluator(runs, example); + + // sanitise the payload before sending to LangSmith + evaluatorRun.inputs = { runs: runs, example: example }; + evaluatorRun.outputs = result; + + return { + ...result, + source_run_id: result.source_run_id ?? evaluatorRun.id, + }; + }, + { + project_name: "evaluators", + name: evaluator.name || "evaluator", } - const result = await evaluator( - options.randomizeOrder ? shuffle(runs) : runs, - example + ) + ); + + const promises = Object.entries(runMapByExampleId).flatMap( + ([exampleId, runs]) => { + const example = exampleMap[exampleId]; + if (!example) throw new Error(`Example ${exampleId} not found.`); + + return tracedEvaluators.map((evaluator) => + caller.call( + evaluateAndSubmitFeedback, + runs, + exampleMap[exampleId], + evaluator + ) ); - results.push(result); - - for (const [runId, score] of Object.entries(result.scores)) { - // validate if the run id - if (!expectedRunIds.has(runId)) { - throw new Error( - `Returning an invalid run id ${runId} from evaluator.` - ); - } - - await client.createFeedback(runId, result.key, { - score, - sourceRunId: result.source_run_id, - comparativeExperimentId: comparativeExperiment.id, - }); - } } - } + ); - return { results }; + const results: ComparisonEvaluationResultRow[] = await Promise.all(promises); + return { experimentName, results }; } diff --git a/js/src/evaluation/evaluator.ts b/js/src/evaluation/evaluator.ts index 4e07fa9dc..92777082b 100644 --- a/js/src/evaluation/evaluator.ts +++ b/js/src/evaluation/evaluator.ts @@ -87,7 +87,7 @@ export interface RunEvaluator { run: Run, example?: Example, options?: Partial - ): Promise; + ): Promise; } export type RunEvaluatorLike = @@ -114,12 +114,26 @@ export class DynamicRunEvaluator any> }) as Func; } + private isEvaluationResults(x: unknown): x is EvaluationResults { + return ( + typeof x === "object" && + x != null && + "results" in x && + Array.isArray(x.results) && + x.results.length > 0 + ); + } + private coerceEvaluationResults( results: Record | EvaluationResults, sourceRunId: string - ): EvaluationResult { - if ("results" in results) { - throw new Error("EvaluationResults not supported yet."); + ): EvaluationResult | EvaluationResults { + if (this.isEvaluationResults(results)) { + return { + results: results.results.map((r) => + this.coerceEvaluationResult(r, sourceRunId, false) + ), + }; } return this.coerceEvaluationResult( @@ -162,7 +176,7 @@ export class DynamicRunEvaluator any> run: Run, example?: Example, options?: Partial - ): Promise { + ): Promise { const sourceRunId = uuidv4(); const metadata: Record = { targetRunId: run.id, @@ -177,7 +191,12 @@ export class DynamicRunEvaluator any> const wrappedTraceableFunc: TraceableFunction = traceable( this.func, - { project_name: "evaluators", name: "evaluator", ...options } + { + project_name: "evaluators", + name: "evaluator", + id: sourceRunId, + ...options, + } ); const result = (await wrappedTraceableFunc( diff --git a/js/src/evaluation/langchain.ts b/js/src/evaluation/langchain.ts new file mode 100644 index 000000000..87010c7ec --- /dev/null +++ b/js/src/evaluation/langchain.ts @@ -0,0 +1,69 @@ +import type { Run, Example } from "../schemas.js"; +import { type LoadEvaluatorOptions, loadEvaluator } from "langchain/evaluation"; +import { getLangchainCallbacks } from "../langchain.js"; + +function isStringifiable( + value: unknown +): value is string | number | boolean | bigint { + return ( + typeof value === "string" || + typeof value === "number" || + typeof value === "boolean" || + typeof value === "bigint" + ); +} + +// utility methods for extracting stringified values +// from unknown inputs and records +function getPrimitiveValue(value: unknown) { + if (isStringifiable(value)) return String(value); + if (!Array.isArray(value) && typeof value === "object" && value != null) { + const values = Object.values(value); + if (values.length === 1 && isStringifiable(values[0])) { + return String(values[0]); + } + } + return undefined; +} + +/** + * This utility function loads a LangChain string evaluator and returns a function + * which can be used by newer `evaluate` function. + * + * @param type Type of string evaluator, one of "criteria" or "labeled_criteria + * @param options Options for loading the evaluator + * @returns Evaluator consumable by `evaluate` + */ +export async function getLangchainStringEvaluator( + type: "criteria" | "labeled_criteria", + options: LoadEvaluatorOptions & { + formatEvaluatorInputs?: ( + run: Run, + example: Example + ) => { prediction: string; reference?: string; input?: string }; + } +) { + const evaluator = await loadEvaluator(type, options); + const feedbackKey = getPrimitiveValue(options.criteria) ?? type; + + const formatEvaluatorInputs = + options.formatEvaluatorInputs ?? + ((run: Run, example: Example) => { + const prediction = getPrimitiveValue(run.outputs); + const reference = getPrimitiveValue(example.outputs); + const input = getPrimitiveValue(example.inputs); + + if (prediction == null) throw new Error("Missing prediction"); + if (type === "criteria") return { prediction, input }; + return { prediction, reference, input }; + }); + + return async (run: Run, example: Example) => { + const score = await evaluator.evaluateStrings( + formatEvaluatorInputs(run, example), + { callbacks: await getLangchainCallbacks() } + ); + + return { key: feedbackKey, ...score }; + }; +} diff --git a/js/src/index.ts b/js/src/index.ts index 9aab556f7..7f8cf8520 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -6,9 +6,10 @@ export type { TracerSession, Run, Feedback, + RetrieverOutput, } from "./schemas.js"; export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.23"; +export const __version__ = "0.1.42"; diff --git a/js/src/langchain.ts b/js/src/langchain.ts new file mode 100644 index 000000000..6eca684e7 --- /dev/null +++ b/js/src/langchain.ts @@ -0,0 +1,167 @@ +import { CallbackManager } from "@langchain/core/callbacks/manager"; +import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; +import { + Runnable, + RunnableConfig, + patchConfig, + getCallbackManagerForConfig, +} from "@langchain/core/runnables"; + +import { RunTree } from "./run_trees.js"; +import { Run } from "./schemas.js"; +import { + TraceableFunction, + getCurrentRunTree, + isTraceableFunction, +} from "./traceable.js"; +import { isAsyncIterable, isIteratorLike } from "./utils/asserts.js"; + +/** + * Converts the current run tree active within a traceable-wrapped function + * into a LangChain compatible callback manager. This is useful to handoff tracing + * from LangSmith to LangChain Runnables and LLMs. + * + * @param {RunTree | undefined} currentRunTree Current RunTree from within a traceable-wrapped function. If not provided, the current run tree will be inferred from AsyncLocalStorage. + * @returns {CallbackManager | undefined} Callback manager used by LangChain Runnable objects. + */ +export async function getLangchainCallbacks( + currentRunTree?: RunTree | undefined +) { + const runTree: RunTree | undefined = currentRunTree ?? getCurrentRunTree(); + if (!runTree) return undefined; + + // TODO: CallbackManager.configure() is only async due to LangChainTracer + // factory being unnecessarily async. + let callbacks = await CallbackManager.configure(); + if (!callbacks && runTree.tracingEnabled) { + callbacks = new CallbackManager(); + } + + let langChainTracer = callbacks?.handlers.find( + (handler): handler is LangChainTracer => + handler?.name === "langchain_tracer" + ); + + if (!langChainTracer && runTree.tracingEnabled) { + langChainTracer = new LangChainTracer(); + callbacks?.addHandler(langChainTracer); + } + + const runMap = new Map(); + + // find upward root run + let rootRun = runTree; + const rootVisited = new Set(); + while (rootRun.parent_run) { + if (rootVisited.has(rootRun.id)) break; + rootVisited.add(rootRun.id); + rootRun = rootRun.parent_run; + } + + const queue = [rootRun]; + const visited = new Set(); + + while (queue.length > 0) { + const current = queue.shift(); + if (!current || visited.has(current.id)) continue; + visited.add(current.id); + + runMap.set(current.id, current); + if (current.child_runs) { + queue.push(...current.child_runs); + } + } + + if (callbacks != null) { + Object.assign(callbacks, { _parentRunId: runTree.id }); + } + + if (langChainTracer != null) { + if ( + "updateFromRunTree" in langChainTracer && + typeof langChainTracer === "function" + ) { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore @langchain/core can use a different version of LangSmith + langChainTracer.updateFromRunTree(runTree); + } else { + Object.assign(langChainTracer, { + runMap, + client: runTree.client, + projectName: runTree.project_name || langChainTracer.projectName, + exampleId: runTree.reference_example_id || langChainTracer.exampleId, + }); + } + } + + return callbacks; +} + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type AnyTraceableFunction = TraceableFunction<(...any: any[]) => any>; + +/** + * RunnableTraceable is a Runnable that wraps a traceable function. + * This allows adding Langsmith traced functions into LangChain sequences. + */ +export class RunnableTraceable extends Runnable< + RunInput, + RunOutput +> { + lc_serializable = false; + + lc_namespace = ["langchain_core", "runnables"]; + + protected func: AnyTraceableFunction; + + constructor(fields: { func: AnyTraceableFunction }) { + super(fields); + + if (!isTraceableFunction(fields.func)) { + throw new Error( + "RunnableTraceable requires a function that is wrapped in traceable higher-order function" + ); + } + + this.func = fields.func; + } + + async invoke(input: RunInput, options?: Partial) { + const [config] = this._getOptionsList(options ?? {}, 1); + const callbacks = await getCallbackManagerForConfig(config); + + return (await this.func( + patchConfig(config, { callbacks }), + input + )) as RunOutput; + } + + async *_streamIterator( + input: RunInput, + options?: Partial + ): AsyncGenerator { + const result = await this.invoke(input, options); + + if (isAsyncIterable(result)) { + for await (const item of result) { + yield item as RunOutput; + } + return; + } + + if (isIteratorLike(result)) { + while (true) { + const state: IteratorResult = result.next(); + if (state.done) break; + yield state.value as RunOutput; + } + return; + } + + yield result; + } + + static from(func: AnyTraceableFunction) { + return new RunnableTraceable({ func }); + } +} diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index 267b22c5e..4427305e0 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -6,15 +6,8 @@ import { getRuntimeEnvironment, } from "./utils/env.js"; import { Client } from "./client.js"; - -const warnedMessages: Record = {}; - -function warnOnce(message: string): void { - if (!warnedMessages[message]) { - console.warn(message); - warnedMessages[message] = true; - } -} +import { isTracingEnabled } from "./env.js"; +import { warnOnce } from "./utils/warn.js"; function stripNonAlphanumeric(input: string) { return input.replace(/[-:.]/g, ""); @@ -58,6 +51,9 @@ export interface RunTreeConfig { on_end?: (runTree: RunTree) => void; execution_order?: number; child_execution_order?: number; + + trace_id?: string; + dotted_order?: string; } export interface RunnableConfigLike { @@ -84,15 +80,69 @@ export interface RunnableConfigLike { interface CallbackManagerLike { handlers: TracerLike[]; getParentRunId?: () => string | undefined; + copy?: () => CallbackManagerLike; } interface TracerLike { name: string; } + interface LangChainTracerLike extends TracerLike { name: "langchain_tracer"; projectName: string; getRun?: (id: string) => RunTree | undefined; + client: Client; + updateFromRunTree?: (runTree: RunTree) => void; +} + +interface HeadersLike { + get(name: string): string | null; + set(name: string, value: string): void; +} + +/** + * Baggage header information + */ +class Baggage { + metadata: KVMap | undefined; + tags: string[] | undefined; + + constructor(metadata: KVMap | undefined, tags: string[] | undefined) { + this.metadata = metadata; + this.tags = tags; + } + + static fromHeader(value: string) { + const items = value.split(","); + let metadata: KVMap = {}; + let tags: string[] = []; + for (const item of items) { + const [key, uriValue] = item.split("="); + const value = decodeURIComponent(uriValue); + if (key === "langsmith-metadata") { + metadata = JSON.parse(value); + } else if (key === "langsmith-tags") { + tags = value.split(","); + } + } + + return new Baggage(metadata, tags); + } + + toHeader(): string { + const items = []; + if (this.metadata && Object.keys(this.metadata).length > 0) { + items.push( + `langsmith-metadata=${encodeURIComponent( + JSON.stringify(this.metadata) + )}` + ); + } + if (this.tags && this.tags.length > 0) { + items.push(`langsmith-tags=${encodeURIComponent(this.tags.join(","))}`); + } + return items.join(","); + } } export class RunTree implements BaseRun { @@ -156,47 +206,6 @@ export class RunTree implements BaseRun { } } - static fromRunnableConfig( - config: RunnableConfigLike, - props: { - name: string; - tags?: string[]; - metadata?: KVMap; - } - ): RunTree { - // We only handle the callback manager case for now - const callbackManager = config?.callbacks as - | CallbackManagerLike - | undefined; - let parentRun: RunTree | undefined; - let projectName: string | undefined; - if (callbackManager) { - const parentRunId = callbackManager?.getParentRunId?.() ?? ""; - const langChainTracer = callbackManager?.handlers?.find( - (handler: TracerLike) => handler?.name == "langchain_tracer" - ) as LangChainTracerLike | undefined; - parentRun = langChainTracer?.getRun?.(parentRunId); - projectName = langChainTracer?.projectName; - } - const dedupedTags = [ - ...new Set((parentRun?.tags ?? []).concat(config?.tags ?? [])), - ]; - const dedupedMetadata = { - ...parentRun?.extra?.metadata, - ...config?.metadata, - }; - const rt = new RunTree({ - name: props?.name ?? "", - parent_run: parentRun, - tags: dedupedTags, - extra: { - metadata: dedupedMetadata, - }, - project_name: projectName, - }); - return rt; - } - private static getDefaultConfig(): object { return { id: uuid.v4(), @@ -230,6 +239,36 @@ export class RunTree implements BaseRun { child_execution_order: child_execution_order, }); + type ExtraWithSymbol = Record; + const LC_CHILD = Symbol.for("lc:child_config"); + + const presentConfig = + (config.extra as ExtraWithSymbol | undefined)?.[LC_CHILD] ?? + (this.extra as ExtraWithSymbol)[LC_CHILD]; + + // tracing for LangChain is defined by the _parentRunId and runMap of the tracer + if (isRunnableConfigLike(presentConfig)) { + const newConfig: RunnableConfigLike = { ...presentConfig }; + const callbacks: CallbackManagerLike | unknown[] | undefined = + isCallbackManagerLike(newConfig.callbacks) + ? newConfig.callbacks.copy?.() + : undefined; + + if (callbacks) { + // update the parent run id + Object.assign(callbacks, { _parentRunId: child.id }); + + // only populate if we're in a newer LC.JS version + callbacks.handlers + ?.find(isLangChainTracerLike) + ?.updateFromRunTree?.(child); + + newConfig.callbacks = callbacks; + } + + (child.extra as ExtraWithSymbol)[LC_CHILD] = newConfig; + } + // propagate child_execution_order upwards const visited = new Set(); let current: RunTree | undefined = this as RunTree; @@ -343,6 +382,122 @@ export class RunTree implements BaseRun { toJSON() { return this._convertToCreate(this, undefined, false); } + + static fromRunnableConfig( + parentConfig: RunnableConfigLike, + props: RunTreeConfig + ): RunTree { + // We only handle the callback manager case for now + const callbackManager = parentConfig?.callbacks as + | CallbackManagerLike + | undefined; + let parentRun: RunTree | undefined; + let projectName: string | undefined; + let client: Client | undefined; + + let tracingEnabled = isTracingEnabled(); + + if (callbackManager) { + const parentRunId = callbackManager?.getParentRunId?.() ?? ""; + const langChainTracer = callbackManager?.handlers?.find( + (handler: TracerLike) => handler?.name == "langchain_tracer" + ) as LangChainTracerLike | undefined; + + parentRun = langChainTracer?.getRun?.(parentRunId); + projectName = langChainTracer?.projectName; + client = langChainTracer?.client; + tracingEnabled = tracingEnabled || !!langChainTracer; + } + + if (!parentRun) { + return new RunTree({ + ...props, + client, + tracingEnabled, + project_name: projectName, + }); + } + + const parentRunTree = new RunTree({ + name: parentRun.name, + id: parentRun.id, + client, + tracingEnabled, + project_name: projectName, + tags: [ + ...new Set((parentRun?.tags ?? []).concat(parentConfig?.tags ?? [])), + ], + extra: { + metadata: { + ...parentRun?.extra?.metadata, + ...parentConfig?.metadata, + }, + }, + }); + + return parentRunTree.createChild(props); + } + + static fromDottedOrder(dottedOrder: string): RunTree | undefined { + return this.fromHeaders({ "langsmith-trace": dottedOrder }); + } + + static fromHeaders( + headers: Record | HeadersLike, + inheritArgs?: RunTreeConfig + ): RunTree | undefined { + const rawHeaders: Record = + "get" in headers && typeof headers.get === "function" + ? { + "langsmith-trace": headers.get("langsmith-trace"), + baggage: headers.get("baggage"), + } + : (headers as Record); + + const headerTrace = rawHeaders["langsmith-trace"]; + if (!headerTrace || typeof headerTrace !== "string") return undefined; + + const parentDottedOrder = headerTrace.trim(); + const parsedDottedOrder = parentDottedOrder.split(".").map((part) => { + const [strTime, uuid] = part.split("Z"); + return { strTime, time: Date.parse(strTime + "Z"), uuid }; + }); + + const traceId = parsedDottedOrder[0].uuid; + + const config: RunTreeConfig = { + ...inheritArgs, + name: inheritArgs?.["name"] ?? "parent", + run_type: inheritArgs?.["run_type"] ?? "chain", + start_time: inheritArgs?.["start_time"] ?? Date.now(), + id: parsedDottedOrder.at(-1)?.uuid, + trace_id: traceId, + dotted_order: parentDottedOrder, + }; + + if (rawHeaders["baggage"] && typeof rawHeaders["baggage"] === "string") { + const baggage = Baggage.fromHeader(rawHeaders["baggage"]); + config.metadata = baggage.metadata; + config.tags = baggage.tags; + } + + return new RunTree(config); + } + + toHeaders(headers?: HeadersLike) { + const result = { + "langsmith-trace": this.dotted_order, + baggage: new Baggage(this.extra?.metadata, this.tags).toHeader(), + }; + + if (headers) { + for (const [key, value] of Object.entries(result)) { + headers.set(key, value); + } + } + + return result; + } } export function isRunTree(x?: unknown): x is RunTree { @@ -353,15 +508,26 @@ export function isRunTree(x?: unknown): x is RunTree { ); } -function containsLangChainTracerLike(x?: unknown): x is LangChainTracerLike[] { +function isLangChainTracerLike(x: unknown): x is LangChainTracerLike { return ( - Array.isArray(x) && - x.some((callback: unknown) => { - return ( - typeof (callback as LangChainTracerLike).name === "string" && - (callback as LangChainTracerLike).name === "langchain_tracer" - ); - }) + typeof x === "object" && + x != null && + typeof (x as LangChainTracerLike).name === "string" && + (x as LangChainTracerLike).name === "langchain_tracer" + ); +} + +function containsLangChainTracerLike(x: unknown): x is LangChainTracerLike[] { + return ( + Array.isArray(x) && x.some((callback) => isLangChainTracerLike(callback)) + ); +} + +function isCallbackManagerLike(x: unknown): x is CallbackManagerLike { + return ( + typeof x === "object" && + x != null && + Array.isArray((x as CallbackManagerLike).handlers) ); } diff --git a/js/src/schemas.ts b/js/src/schemas.ts index ca53e301b..5692b8a86 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -229,6 +229,7 @@ export interface RunUpdate { export interface ExampleCreate extends BaseExample { id?: string; created_at?: string; + split?: string | string[]; } export interface Example extends BaseExample { @@ -244,6 +245,11 @@ export interface ExampleUpdate { inputs?: KVMap; outputs?: KVMap; metadata?: KVMap; + split?: string | string[]; +} + +export interface ExampleUpdateWithId extends ExampleUpdate { + id: string; } export interface BaseDataset { name: string; @@ -380,3 +386,83 @@ export interface ComparativeExperiment { experiments_info?: Array>; feedback_stats?: Record; } + +/** + * Represents the expected output schema returned by traceable + * or by run tree output for LangSmith to correctly display + * documents in the UI + */ +export type RetrieverOutput = Array<{ + page_content: string; + type: "Document"; + metadata?: KVMap; +}>; + +export interface InvocationParamsSchema { + ls_provider?: string; + ls_model_name?: string; + ls_model_type: "chat" | "text"; + ls_temperature?: number; + ls_max_tokens?: number; + ls_stop?: string[]; +} + +export interface PromptCommit { + owner: string; + repo: string; + commit_hash: string; + manifest: Record; + examples: Array>; +} + +export interface Prompt { + repo_handle: string; + description?: string; + readme?: string; + id: string; + tenant_id: string; + created_at: string; + updated_at: string; + is_public: boolean; + is_archived: boolean; + tags: string[]; + original_repo_id?: string; + upstream_repo_id?: string; + owner?: string; + full_name: string; + num_likes: number; + num_downloads: number; + num_views: number; + liked_by_auth_user: boolean; + last_commit_hash?: string; + num_commits: number; + original_repo_full_name?: string; + upstream_repo_full_name?: string; +} + +export interface ListPromptsResponse { + repos: Prompt[]; + total: number; +} + +export interface ListCommitsResponse { + commits: PromptCommit[]; + total: number; +} + +export type PromptSortField = + | "num_downloads" + | "num_views" + | "updated_at" + | "num_likes"; + +export interface LikePromptResponse { + likes: number; +} + +export interface LangSmithSettings { + id: string; + display_name: string; + created_at: string; + tenant_handle?: string; +} diff --git a/js/src/singletons/traceable.ts b/js/src/singletons/traceable.ts new file mode 100644 index 000000000..0cdd1f936 --- /dev/null +++ b/js/src/singletons/traceable.ts @@ -0,0 +1,86 @@ +import { RunTree } from "../run_trees.js"; +import { TraceableFunction } from "./types.js"; + +interface AsyncLocalStorageInterface { + getStore: () => RunTree | undefined; + + run: (context: RunTree | undefined, fn: () => void) => void; +} + +class MockAsyncLocalStorage implements AsyncLocalStorageInterface { + getStore() { + return undefined; + } + + run(_: RunTree | undefined, callback: () => void): void { + return callback(); + } +} + +const TRACING_ALS_KEY = Symbol.for("ls:tracing_async_local_storage"); + +const mockAsyncLocalStorage = new MockAsyncLocalStorage(); + +class AsyncLocalStorageProvider { + getInstance(): AsyncLocalStorageInterface { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return (globalThis as any)[TRACING_ALS_KEY] ?? mockAsyncLocalStorage; + } + + initializeGlobalInstance(instance: AsyncLocalStorageInterface) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + if ((globalThis as any)[TRACING_ALS_KEY] === undefined) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (globalThis as any)[TRACING_ALS_KEY] = instance; + } + } +} + +export const AsyncLocalStorageProviderSingleton = + new AsyncLocalStorageProvider(); + +/** + * Return the current run tree from within a traceable-wrapped function. + * Will throw an error if called outside of a traceable function. + * + * @returns The run tree for the given context. + */ +export const getCurrentRunTree = () => { + const runTree = AsyncLocalStorageProviderSingleton.getInstance().getStore(); + if (runTree === undefined) { + throw new Error( + [ + "Could not get the current run tree.", + "", + "Please make sure you are calling this method within a traceable function or the tracing is enabled.", + ].join("\n") + ); + } + + return runTree; +}; + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export function withRunTree any>( + runTree: RunTree, + fn: Fn +): Promise>> { + const storage = AsyncLocalStorageProviderSingleton.getInstance(); + return new Promise>>((resolve, reject) => { + storage.run( + runTree, + () => void Promise.resolve(fn()).then(resolve).catch(reject) + ); + }); +} + +export const ROOT = Symbol.for("langsmith:traceable:root"); + +export function isTraceableFunction( + x: unknown + // eslint-disable-next-line @typescript-eslint/no-explicit-any +): x is TraceableFunction { + return typeof x === "function" && "langsmith:traceable" in x; +} + +export type { TraceableFunction } from "./types.js"; diff --git a/js/src/singletons/types.ts b/js/src/singletons/types.ts new file mode 100644 index 000000000..dd7efabf3 --- /dev/null +++ b/js/src/singletons/types.ts @@ -0,0 +1,77 @@ +import { RunTree, RunnableConfigLike } from "../run_trees.js"; +import { ROOT } from "./traceable.js"; + +type SmartPromise = T extends AsyncGenerator + ? T + : T extends Promise + ? T + : Promise; +type WrapArgReturnPair = Pair extends [ + // eslint-disable-next-line @typescript-eslint/no-explicit-any + infer Args extends any[], + infer Return +] + ? Args extends [RunTree, ...infer RestArgs] + ? { + ( + runTree: RunTree | typeof ROOT, + ...args: RestArgs + ): SmartPromise; + (config: RunnableConfigLike, ...args: RestArgs): SmartPromise; + } + : { + (...args: Args): SmartPromise; + (runTree: RunTree, ...rest: Args): SmartPromise; + (config: RunnableConfigLike, ...args: Args): SmartPromise; + } + : never; +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type UnionToIntersection = (U extends any ? (x: U) => void : never) extends ( + x: infer I +) => void + ? I + : never; +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export type TraceableFunction any> = + // function overloads are represented as intersections rather than unions + // matches the behavior introduced in https://github.com/microsoft/TypeScript/pull/54448 + (Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + (...args: infer A3): infer R3; + (...args: infer A4): infer R4; + (...args: infer A5): infer R5; + } + ? UnionToIntersection< + WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4] | [A5, R5]> + > + : Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + (...args: infer A3): infer R3; + (...args: infer A4): infer R4; + } + ? UnionToIntersection< + WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4]> + > + : Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + (...args: infer A3): infer R3; + } + ? UnionToIntersection> + : Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + } + ? UnionToIntersection> + : Func extends { + (...args: infer A1): infer R1; + } + ? UnionToIntersection> + : never) & { + // Other properties of Func + [K in keyof Func]: Func[K]; + }; + +export type RunTreeLike = RunTree; diff --git a/js/src/tests/anonymizer.test.ts b/js/src/tests/anonymizer.test.ts new file mode 100644 index 000000000..880d4d16c --- /dev/null +++ b/js/src/tests/anonymizer.test.ts @@ -0,0 +1,131 @@ +import { StringNodeRule, createAnonymizer } from "../anonymizer/index.js"; +import { v4 as uuid } from "uuid"; +import { traceable } from "../traceable.js"; +import { BaseMessage, SystemMessage } from "@langchain/core/messages"; +import { mockClient } from "./utils/mock_client.js"; +import { getAssumedTreeFromCalls } from "./utils/tree.js"; + +const EMAIL_REGEX = /[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+.[a-zA-Z]{2,}/g; +const UUID_REGEX = + /[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}/g; + +describe("replacer", () => { + const replacer = (text: string) => + text.replace(EMAIL_REGEX, "[email address]").replace(UUID_REGEX, "[uuid]"); + + test("object", () => { + expect( + createAnonymizer(replacer)({ + message: "Hello, this is my email: hello@example.com", + metadata: uuid(), + }) + ).toEqual({ + message: "Hello, this is my email: [email address]", + metadata: "[uuid]", + }); + }); + + test("array", () => { + expect(createAnonymizer(replacer)(["human", "hello@example.com"])).toEqual([ + "human", + "[email address]", + ]); + }); + + test("string", () => { + expect(createAnonymizer(replacer)("hello@example.com")).toEqual( + "[email address]" + ); + }); +}); + +describe("declared", () => { + const replacers: StringNodeRule[] = [ + { pattern: EMAIL_REGEX, replace: "[email address]" }, + { pattern: UUID_REGEX, replace: "[uuid]" }, + ]; + + test("object", () => { + expect( + createAnonymizer(replacers)({ + message: "Hello, this is my email: hello@example.com", + metadata: uuid(), + }) + ).toEqual({ + message: "Hello, this is my email: [email address]", + metadata: "[uuid]", + }); + }); + + test("array", () => { + expect(createAnonymizer(replacers)(["human", "hello@example.com"])).toEqual( + ["human", "[email address]"] + ); + }); + + test("string", () => { + expect(createAnonymizer(replacers)("hello@example.com")).toEqual( + "[email address]" + ); + }); +}); + +describe("client", () => { + test("messages", async () => { + const anonymizer = createAnonymizer([ + { pattern: EMAIL_REGEX, replace: "[email]" }, + { pattern: UUID_REGEX, replace: "[uuid]" }, + ]); + + const { client, callSpy } = mockClient({ anonymizer }); + + const id = uuid(); + const child = traceable( + (value: { messages: BaseMessage[]; values: Record }) => { + return [ + ...value.messages.map((message) => message.content.toString()), + ...Object.entries(value.values).map((lst) => lst.join(": ")), + ].join("\n"); + }, + { name: "child" } + ); + + const evaluate = traceable( + (values: Record) => { + const messages = [new SystemMessage(`UUID: ${id}`)]; + return child({ messages, values }); + }, + { client, name: "evaluate", tracingEnabled: true } + ); + + const result = await evaluate({ email: "hello@example.com" }); + + expect(result).toEqual( + [`UUID: ${id}`, `email: hello@example.com`].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: ["evaluate:0", "child:1"], + data: { + "evaluate:0": { + inputs: { email: "[email]" }, + outputs: { outputs: [`UUID: [uuid]`, `email: [email]`].join("\n") }, + }, + "child:1": { + inputs: { + messages: [ + { + lc: 1, + type: "constructor", + id: ["langchain_core", "messages", "SystemMessage"], + kwargs: { content: "UUID: [uuid]" }, + }, + ], + values: { email: "[email]" }, + }, + outputs: { outputs: [`UUID: [uuid]`, `email: [email]`].join("\n") }, + }, + }, + }); + }); +}); diff --git a/js/src/tests/batch_client.int.test.ts b/js/src/tests/batch_client.int.test.ts index 4715e80a8..4705fae3c 100644 --- a/js/src/tests/batch_client.int.test.ts +++ b/js/src/tests/batch_client.int.test.ts @@ -1,7 +1,11 @@ import { Client } from "../client.js"; import { RunTree, convertToDottedOrderFormat } from "../run_trees.js"; import { v4 as uuidv4 } from "uuid"; -import { deleteProject, waitUntilRunFound } from "./utils.js"; +import { + deleteProject, + waitUntilProjectFound, + waitUntilRunFound, +} from "./utils.js"; test.concurrent( "Test persist update run", @@ -11,7 +15,8 @@ test.concurrent( callerOptions: { maxRetries: 2 }, timeout_ms: 30_000, }); - const projectName = "__test_persist_update_run_batch_1"; + const projectName = + "__test_persist_update_run_batch_1" + uuidv4().substring(0, 4); await deleteProject(langchainClient, projectName); const runId = uuidv4(); @@ -34,7 +39,12 @@ test.concurrent( dotted_order: dottedOrder, trace_id: runId, }); - await waitUntilRunFound(langchainClient, runId, true); + + await Promise.all([ + waitUntilRunFound(langchainClient, runId, true), + waitUntilProjectFound(langchainClient, projectName), + ]); + const storedRun = await langchainClient.readRun(runId); expect(storedRun.id).toEqual(runId); await langchainClient.deleteProject({ projectName }); @@ -51,7 +61,9 @@ test.concurrent( pendingAutoBatchedRunLimit: 2, timeout_ms: 30_000, }); - const projectName = "__test_persist_update_run_batch_above_bs_limit"; + const projectName = + "__test_persist_update_run_batch_above_bs_limit" + + uuidv4().substring(0, 4); await deleteProject(langchainClient, projectName); const createRun = async () => { @@ -76,7 +88,11 @@ test.concurrent( trace_id: runId, end_time: Math.floor(new Date().getTime() / 1000), }); - await waitUntilRunFound(langchainClient, runId, true); + await Promise.all([ + waitUntilRunFound(langchainClient, runId, true), + waitUntilProjectFound(langchainClient, projectName), + ]); + const storedRun = await langchainClient.readRun(runId); expect(storedRun.id).toEqual(runId); }; @@ -96,7 +112,8 @@ test.concurrent( callerOptions: { maxRetries: 2 }, timeout_ms: 30_000, }); - const projectName = "__test_persist_update_run_batch_with_delay"; + const projectName = + "__test_persist_update_run_batch_with_delay" + uuidv4().substring(0, 4); await deleteProject(langchainClient, projectName); const runId = uuidv4(); @@ -121,7 +138,10 @@ test.concurrent( trace_id: runId, end_time: Math.floor(new Date().getTime() / 1000), }); - await waitUntilRunFound(langchainClient, runId, true); + await Promise.all([ + waitUntilRunFound(langchainClient, runId, true), + waitUntilProjectFound(langchainClient, projectName), + ]); const storedRun = await langchainClient.readRun(runId); expect(storedRun.id).toEqual(runId); await langchainClient.deleteProject({ projectName }); @@ -137,7 +157,8 @@ test.concurrent( callerOptions: { maxRetries: 2 }, timeout_ms: 30_000, }); - const projectName = "__test_persist_update_run_tree"; + const projectName = + "__test_persist_update_run_tree" + uuidv4().substring(0, 4); await deleteProject(langchainClient, projectName); const runId = uuidv4(); const runTree = new RunTree({ @@ -150,7 +171,10 @@ test.concurrent( await runTree.postRun(); await runTree.end({ output: "foo2" }); await runTree.patchRun(); - await waitUntilRunFound(langchainClient, runId, true); + await Promise.all([ + waitUntilRunFound(langchainClient, runId, true), + waitUntilProjectFound(langchainClient, projectName), + ]); const storedRun = await langchainClient.readRun(runId); expect(storedRun.id).toEqual(runId); expect(storedRun.dotted_order).toEqual(runTree.dotted_order); diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index eaf49b975..ee940c6fe 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -1,9 +1,23 @@ -import { Dataset, Run } from "../schemas.js"; -import { FunctionMessage, HumanMessage } from "@langchain/core/messages"; +import { Dataset, Run, TracerSession } from "../schemas.js"; +import { + FunctionMessage, + HumanMessage, + SystemMessage, +} from "@langchain/core/messages"; import { Client } from "../client.js"; import { v4 as uuidv4 } from "uuid"; -import { deleteDataset, deleteProject, toArray, waitUntil } from "./utils.js"; +import { + createRunsFactory, + deleteDataset, + deleteProject, + toArray, + waitUntil, +} from "./utils.js"; +import { ChatPromptTemplate, PromptTemplate } from "@langchain/core/prompts"; +import { ChatOpenAI } from "@langchain/openai"; +import { RunnableSequence } from "@langchain/core/runnables"; +import { load } from "langchain/load"; type CheckOutputsType = boolean | ((run: Run) => boolean); async function waitUntilRunFound( @@ -70,7 +84,7 @@ test.concurrent("Test LangSmith Client Dataset CRD", async () => { const example = await client.createExample( { col1: "addedExampleCol1" }, { col2: "addedExampleCol2" }, - { datasetId: newDataset.id } + { datasetId: newDataset.id, split: "my_split" } ); const exampleValue = await client.readExample(example.id); expect(exampleValue.inputs.col1).toBe("addedExampleCol1"); @@ -82,18 +96,70 @@ test.concurrent("Test LangSmith Client Dataset CRD", async () => { expect(examples.length).toBe(2); expect(examples.map((e) => e.id)).toContain(example.id); + const _examples = await toArray( + client.listExamples({ datasetId: newDataset.id, splits: ["my_split"] }) + ); + expect(_examples.length).toBe(1); + expect(_examples.map((e) => e.id)).toContain(example.id); + await client.updateExample(example.id, { inputs: { col1: "updatedExampleCol1" }, outputs: { col2: "updatedExampleCol2" }, + split: ["my_split2"], }); // Says 'example updated' or something similar const newExampleValue = await client.readExample(example.id); expect(newExampleValue.inputs.col1).toBe("updatedExampleCol1"); + expect(newExampleValue.metadata?.dataset_split).toStrictEqual(["my_split2"]); + + await client.updateExample(example.id, { + inputs: { col1: "updatedExampleCol3" }, + outputs: { col2: "updatedExampleCol4" }, + split: "my_split3", + }); + // Says 'example updated' or something similar + const newExampleValue2 = await client.readExample(example.id); + expect(newExampleValue2.inputs.col1).toBe("updatedExampleCol3"); + expect(newExampleValue2.metadata?.dataset_split).toStrictEqual(["my_split3"]); + + const newExample = await client.createExample( + { col1: "newAddedExampleCol1" }, + { col2: "newAddedExampleCol2" }, + { datasetId: newDataset.id } + ); + const newExampleValue_ = await client.readExample(newExample.id); + expect(newExampleValue_.inputs.col1).toBe("newAddedExampleCol1"); + expect(newExampleValue_.outputs?.col2).toBe("newAddedExampleCol2"); + + await client.updateExamples([ + { + id: newExample.id, + inputs: { col1: "newUpdatedExampleCol1" }, + outputs: { col2: "newUpdatedExampleCol2" }, + metadata: { foo: "baz" }, + }, + { + id: example.id, + inputs: { col1: "newNewUpdatedExampleCol" }, + outputs: { col2: "newNewUpdatedExampleCol2" }, + metadata: { foo: "qux" }, + }, + ]); + const updatedExample = await client.readExample(newExample.id); + expect(updatedExample.inputs.col1).toBe("newUpdatedExampleCol1"); + expect(updatedExample.outputs?.col2).toBe("newUpdatedExampleCol2"); + expect(updatedExample.metadata?.foo).toBe("baz"); + + const updatedExample2 = await client.readExample(example.id); + expect(updatedExample2.inputs.col1).toBe("newNewUpdatedExampleCol"); + expect(updatedExample2.outputs?.col2).toBe("newNewUpdatedExampleCol2"); + expect(updatedExample2.metadata?.foo).toBe("qux"); + await client.deleteExample(example.id); const examples2 = await toArray( client.listExamples({ datasetId: newDataset.id }) ); - expect(examples2.length).toBe(1); + expect(examples2.length).toBe(2); await client.deleteDataset({ datasetId }); const rawDataset = await client.createDataset(fileName, { @@ -475,6 +541,7 @@ test.concurrent( { output: "hi there 3" }, ], metadata: [{ key: "value 1" }, { key: "value 2" }, { key: "value 3" }], + splits: ["train", "test", ["train", "validation"]], datasetId: dataset.id, }); const initialExamplesList = await toArray( @@ -485,6 +552,22 @@ test.concurrent( client.listExamples({ datasetId: dataset.id }) ); expect(examplesList.length).toEqual(4); + + const examplesListLimited = await toArray( + client.listExamples({ datasetId: dataset.id, limit: 2 }) + ); + expect(examplesListLimited.length).toEqual(2); + + const examplesListOffset = await toArray( + client.listExamples({ datasetId: dataset.id, offset: 2 }) + ); + expect(examplesListOffset.length).toEqual(2); + + const examplesListLimitedOffset = await toArray( + client.listExamples({ datasetId: dataset.id, limit: 1, offset: 2 }) + ); + expect(examplesListLimitedOffset.length).toEqual(1); + await client.deleteExample(example.id); const examplesList2 = await toArray( client.listExamples({ datasetId: dataset.id }) @@ -505,16 +588,20 @@ test.concurrent( ); expect(example1?.outputs?.output).toEqual("hi there 1"); expect(example1?.metadata?.key).toEqual("value 1"); + expect(example1?.metadata?.dataset_split).toEqual(["train"]); const example2 = examplesList2.find( (e) => e.inputs.input === "hello world 2" ); expect(example2?.outputs?.output).toEqual("hi there 2"); expect(example2?.metadata?.key).toEqual("value 2"); + expect(example2?.metadata?.dataset_split).toEqual(["test"]); const example3 = examplesList2.find( (e) => e.inputs.input === "hello world 3" ); expect(example3?.outputs?.output).toEqual("hi there 3"); expect(example3?.metadata?.key).toEqual("value 3"); + expect(example3?.metadata?.dataset_split).toContain("train"); + expect(example3?.metadata?.dataset_split).toContain("validation"); await client.createExample( { input: "hello world" }, @@ -554,7 +641,436 @@ test.concurrent( expect(examplesList3[0].metadata?.foo).toEqual("bar"); expect(examplesList3[0].metadata?.baz).toEqual("qux"); + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + filter: 'exists(metadata, "baz")', + }) + ); + expect(examplesList3.length).toEqual(1); + expect(examplesList3[0].metadata?.foo).toEqual("bar"); + expect(examplesList3[0].metadata?.baz).toEqual("qux"); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + filter: 'has("metadata", \'{"foo": "bar"}\')', + }) + ); + expect(examplesList3.length).toEqual(1); + expect(examplesList3[0].metadata?.foo).toEqual("bar"); + expect(examplesList3[0].metadata?.baz).toEqual("qux"); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + filter: 'exists(metadata, "bazzz")', + }) + ); + expect(examplesList3.length).toEqual(0); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + splits: ["train"], + }) + ); + expect(examplesList3.length).toEqual(2); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + splits: ["test"], + }) + ); + expect(examplesList3.length).toEqual(1); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + splits: ["train", "test"], + }) + ); + expect(examplesList3.length).toEqual(3); + await client.deleteDataset({ datasetId: dataset.id }); }, 180_000 ); + +test.concurrent("list runs limit arg works", async () => { + const client = new Client(); + + const projectName = `test-limit-runs-${uuidv4().substring(0, 4)}`; + const limit = 6; + + // delete the project just in case + if (await client.hasProject({ projectName })) { + await client.deleteProject({ projectName }); + } + + try { + const runsArr: Array = []; + // create a fresh project with 10 runs --default amount created by createRunsFactory + await client.createProject({ projectName }); + await Promise.all( + createRunsFactory(projectName).map(async (payload) => { + if (!payload.id) payload.id = uuidv4(); + await client.createRun(payload); + await waitUntilRunFound(client, payload.id); + }) + ); + + let iters = 0; + for await (const run of client.listRuns({ limit, projectName })) { + expect(run).toBeDefined(); + runsArr.push(run); + iters += 1; + if (iters > limit) { + throw new Error( + `More runs returned than expected.\nExpected: ${limit}\nReceived: ${iters}` + ); + } + } + + expect(runsArr.length).toBe(limit); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + } catch (e: any) { + if (e.message.startsWith("More runs returned than expected.")) { + throw e; + } else { + console.error(e); + } + } finally { + if (await client.hasProject({ projectName })) { + await client.deleteProject({ projectName }); + } + } +}); + +test.concurrent("Test run stats", async () => { + const client = new Client(); + const stats = await client.getRunStats({ + projectNames: ["default"], + runType: "llm", + }); + expect(stats).toBeDefined(); +}); + +test("Test list prompts", async () => { + const client = new Client(); + const uid = uuidv4(); + // push 3 prompts + const promptName1 = `test_prompt_${uid}__0`; + const promptName2 = `test_prompt_${uid}__1`; + const promptName3 = `test_prompt_${uid}__2`; + + await client.pushPrompt(promptName1, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + isPublic: true, + }); + await client.pushPrompt(promptName2, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + await client.pushPrompt(promptName3, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + // expect at least one of the prompts to have promptName1 + const response = client.listPrompts({ isPublic: true, query: uid }); + let found = false; + expect(response).toBeDefined(); + for await (const prompt of response) { + expect(prompt).toBeDefined(); + if (prompt.repo_handle === promptName1) { + found = true; + } + } + expect(found).toBe(true); + + // expect the prompts to be sorted by updated_at + const response2 = client.listPrompts({ sortField: "updated_at", query: uid }); + expect(response2).toBeDefined(); + let lastUpdatedAt: number | undefined; + for await (const prompt of response2) { + expect(prompt.updated_at).toBeDefined(); + const currentUpdatedAt = new Date(prompt.updated_at).getTime(); + if (lastUpdatedAt !== undefined) { + expect(currentUpdatedAt).toBeLessThanOrEqual(lastUpdatedAt); + } + lastUpdatedAt = currentUpdatedAt; + } + expect(lastUpdatedAt).toBeDefined(); +}); + +test("Test get prompt", async () => { + const client = new Client(); + const promptName = `test_prompt_${uuidv4().slice(0, 8)}`; + const promptTemplate = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ); + + const url = await client.pushPrompt(promptName, { object: promptTemplate }); + expect(url).toBeDefined(); + + const prompt = await client.getPrompt(promptName); + expect(prompt).toBeDefined(); + expect(prompt?.repo_handle).toBe(promptName); + + await client.deletePrompt(promptName); +}); + +test("Test prompt exists", async () => { + const client = new Client(); + const nonExistentPrompt = `non_existent_${uuidv4().slice(0, 8)}`; + expect(await client.promptExists(nonExistentPrompt)).toBe(false); + + const existentPrompt = `existent_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(existentPrompt, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + expect(await client.promptExists(existentPrompt)).toBe(true); + + await client.deletePrompt(existentPrompt); +}); + +test("Test update prompt", async () => { + const client = new Client(); + + const promptName = `test_update_prompt_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + const updatedData = await client.updatePrompt(promptName, { + description: "Updated description", + isPublic: true, + tags: ["test", "update"], + }); + + expect(updatedData).toBeDefined(); + + const updatedPrompt = await client.getPrompt(promptName); + expect(updatedPrompt?.description).toBe("Updated description"); + expect(updatedPrompt?.is_public).toBe(true); + expect(updatedPrompt?.tags).toEqual( + expect.arrayContaining(["test", "update"]) + ); + + await client.deletePrompt(promptName); +}); + +test("Test delete prompt", async () => { + const client = new Client(); + + const promptName = `test_delete_prompt_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + expect(await client.promptExists(promptName)).toBe(true); + await client.deletePrompt(promptName); + expect(await client.promptExists(promptName)).toBe(false); +}); + +test("test listing projects by metadata", async () => { + const client = new Client(); + const uid = uuidv4(); + const projectName = `my_metadata_project_${uid}`; + + await client.createProject({ + projectName: projectName, + metadata: { + foobar: uid, + baz: "barfooqux", + }, + }); + + const projects = await client.listProjects({ metadata: { foobar: uid } }); + + let myProject: TracerSession | null = null; + for await (const project of projects) { + myProject = project; + } + expect(myProject?.name).toEqual(projectName); + + await client.deleteProject({ projectName: projectName }); +}); + +test("Test create commit", async () => { + const client = new Client(); + + const promptName = `test_create_commit_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + const newTemplate = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "My question is: {{question}}" }), + ], + { templateFormat: "mustache" } + ); + const commitUrl = await client.createCommit(promptName, newTemplate); + + expect(commitUrl).toBeDefined(); + expect(commitUrl).toContain(promptName); + + await client.deletePrompt(promptName); +}); + +test("Test like and unlike prompt", async () => { + const client = new Client(); + + const promptName = `test_like_prompt_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + await client.likePrompt(promptName); + let prompt = await client.getPrompt(promptName); + expect(prompt?.num_likes).toBe(1); + + await client.unlikePrompt(promptName); + prompt = await client.getPrompt(promptName); + expect(prompt?.num_likes).toBe(0); + + await client.deletePrompt(promptName); +}); + +test("Test pull prompt commit", async () => { + const client = new Client(); + + const promptName = `test_pull_commit_${uuidv4().slice(0, 8)}`; + const initialTemplate = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ); + await client.pushPrompt(promptName, { object: initialTemplate }); + + const promptCommit = await client.pullPromptCommit(promptName); + expect(promptCommit).toBeDefined(); + expect(promptCommit.repo).toBe(promptName); + + await client.deletePrompt(promptName); +}); + +test("Test push and pull prompt", async () => { + const client = new Client(); + + const promptName = `test_push_pull_${uuidv4().slice(0, 8)}`; + const template = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ); + const template2 = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "My question is: {{question}}" }), + ], + { templateFormat: "mustache" } + ); + + await client.pushPrompt(promptName, { + object: template, + description: "Test description", + readme: "Test readme", + tags: ["test", "tag"], + }); + + // test you can push an updated manifest without any other options + await client.pushPrompt(promptName, { + object: template2, + }); + + const pulledPrompt = await client._pullPrompt(promptName); + expect(pulledPrompt).toBeDefined(); + + const promptInfo = await client.getPrompt(promptName); + expect(promptInfo?.description).toBe("Test description"); + expect(promptInfo?.readme).toBe("Test readme"); + expect(promptInfo?.tags).toEqual(expect.arrayContaining(["test", "tag"])); + expect(promptInfo?.is_public).toBe(false); + + await client.deletePrompt(promptName); +}); + +test("Test pull prompt include model", async () => { + const client = new Client(); + const model = new ChatOpenAI({}); + const promptTemplate = PromptTemplate.fromTemplate( + "Tell me a joke about {topic}" + ); + const promptWithModel = promptTemplate.pipe(model); + + const promptName = `test_prompt_with_model_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { object: promptWithModel }); + + const pulledPrompt = await client._pullPrompt(promptName, { + includeModel: true, + }); + const rs: RunnableSequence = await load(pulledPrompt); + expect(rs).toBeDefined(); + expect(rs).toBeInstanceOf(RunnableSequence); + + await client.deletePrompt(promptName); +}); diff --git a/js/src/tests/client.test.ts b/js/src/tests/client.test.ts index d1b822d9e..694fb1e3c 100644 --- a/js/src/tests/client.test.ts +++ b/js/src/tests/client.test.ts @@ -6,6 +6,10 @@ import { getLangChainEnvVars, getLangChainEnvVarsMetadata, } from "../utils/env.js"; +import { + isVersionGreaterOrEqual, + parsePromptIdentifier, +} from "../utils/prompts.js"; describe("Client", () => { describe("createLLMExample", () => { @@ -91,10 +95,10 @@ describe("Client", () => { expect(result).toBe("http://example.com"); }); - it("should return 'http://localhost' if apiUrl is localhost", () => { + it("should return 'http://localhost:3000' if apiUrl is localhost", () => { const client = new Client({ apiUrl: "http://localhost/api" }); const result = (client as any).getHostUrl(); - expect(result).toBe("http://localhost"); + expect(result).toBe("http://localhost:3000"); }); it("should return the webUrl without '/api' if apiUrl contains '/api'", () => { @@ -115,6 +119,15 @@ describe("Client", () => { expect(result).toBe("https://dev.smith.langchain.com"); }); + it("should return 'https://eu.smith.langchain.com' if apiUrl contains 'eu'", () => { + const client = new Client({ + apiUrl: "https://eu.smith.langchain.com/api", + apiKey: "test-api-key", + }); + const result = (client as any).getHostUrl(); + expect(result).toBe("https://eu.smith.langchain.com"); + }); + it("should return 'https://smith.langchain.com' for any other apiUrl", () => { const client = new Client({ apiUrl: "https://smith.langchain.com/api", @@ -166,4 +179,62 @@ describe("Client", () => { }); }); }); + + describe("isVersionGreaterOrEqual", () => { + it("should return true if the version is greater or equal", () => { + // Test versions equal to 0.5.23 + expect(isVersionGreaterOrEqual("0.5.23", "0.5.23")).toBe(true); + + // Test versions greater than 0.5.23 + expect(isVersionGreaterOrEqual("0.5.24", "0.5.23")); + expect(isVersionGreaterOrEqual("0.6.0", "0.5.23")); + expect(isVersionGreaterOrEqual("1.0.0", "0.5.23")); + + // Test versions less than 0.5.23 + expect(isVersionGreaterOrEqual("0.5.22", "0.5.23")).toBe(false); + expect(isVersionGreaterOrEqual("0.5.0", "0.5.23")).toBe(false); + expect(isVersionGreaterOrEqual("0.4.99", "0.5.23")).toBe(false); + }); + }); + + describe("parsePromptIdentifier", () => { + it("should parse valid identifiers correctly", () => { + expect(parsePromptIdentifier("name")).toEqual(["-", "name", "latest"]); + expect(parsePromptIdentifier("owner/name")).toEqual([ + "owner", + "name", + "latest", + ]); + expect(parsePromptIdentifier("owner/name:commit")).toEqual([ + "owner", + "name", + "commit", + ]); + expect(parsePromptIdentifier("name:commit")).toEqual([ + "-", + "name", + "commit", + ]); + }); + + it("should throw an error for invalid identifiers", () => { + const invalidIdentifiers = [ + "", + "/", + ":", + "owner/", + "/name", + "owner//name", + "owner/name/", + "owner/name/extra", + ":commit", + ]; + + invalidIdentifiers.forEach((identifier) => { + expect(() => parsePromptIdentifier(identifier)).toThrowError( + `Invalid identifier format: ${identifier}` + ); + }); + }); + }); }); diff --git a/js/src/tests/evaluate.int.test.ts b/js/src/tests/evaluate.int.test.ts index 198f66473..98ab6c6c8 100644 --- a/js/src/tests/evaluate.int.test.ts +++ b/js/src/tests/evaluate.int.test.ts @@ -1,27 +1,33 @@ -import { EvaluationResult } from "../evaluation/evaluator.js"; +import { + EvaluationResult, + EvaluationResults, +} from "../evaluation/evaluator.js"; import { evaluate } from "../evaluation/_runner.js"; -import { Example, Run } from "../schemas.js"; +import { Example, Run, TracerSession } from "../schemas.js"; import { Client } from "../index.js"; import { afterAll, beforeAll } from "@jest/globals"; -import { RunnableLambda } from "@langchain/core/runnables"; - -const TESTING_DATASET_NAME = "test_dataset_js_evaluate_123"; +import { RunnableLambda, RunnableSequence } from "@langchain/core/runnables"; +import { v4 as uuidv4 } from "uuid"; +const TESTING_DATASET_NAME = `test_dataset_js_evaluate_${uuidv4()}`; +const TESTING_DATASET_NAME2 = `my_splits_ds_${uuidv4()}`; beforeAll(async () => { const client = new Client(); - // create a new dataset - await client.createDataset(TESTING_DATASET_NAME, { - description: - "For testing purposed. Is created & deleted for each test run.", - }); - // create examples - const res = await client.createExamples({ - inputs: [{ input: 1 }, { input: 2 }], - outputs: [{ output: 2 }, { output: 3 }], - datasetName: TESTING_DATASET_NAME, - }); - if (res.length !== 2) { - throw new Error("Failed to create examples"); + if (!(await client.hasDataset({ datasetName: TESTING_DATASET_NAME }))) { + // create a new dataset + await client.createDataset(TESTING_DATASET_NAME, { + description: + "For testing purposed. Is created & deleted for each test run.", + }); + // create examples + const res = await client.createExamples({ + inputs: [{ input: 1 }, { input: 2 }], + outputs: [{ output: 2 }, { output: 3 }], + datasetName: TESTING_DATASET_NAME, + }); + if (res.length !== 2) { + throw new Error("Failed to create examples"); + } } }); @@ -30,11 +36,17 @@ afterAll(async () => { await client.deleteDataset({ datasetName: TESTING_DATASET_NAME, }); + try { + await client.deleteDataset({ + datasetName: "my_splits_ds2", + }); + } catch { + //pass + } }); test("evaluate can evaluate", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -70,6 +82,34 @@ test("evaluate can evaluate", async () => { expect(secondRunResults.results).toHaveLength(0); }); +test("evaluate can repeat", async () => { + const targetFunc = (input: Record) => { + return { + foo: input.input + 1, + }; + }; + + const evalRes = await evaluate(targetFunc, { + data: TESTING_DATASET_NAME, + description: "Experiment from evaluate can evaluate integration test", + numRepetitions: 3, + }); + expect(evalRes.results).toHaveLength(6); + + for (let i = 0; i < 6; i++) { + expect(evalRes.results[i].run).toBeDefined(); + expect(evalRes.results[i].example).toBeDefined(); + expect(evalRes.results[i].evaluationResults).toBeDefined(); + const currRun = evalRes.results[i].run; + // The examples are not always in the same order, so it should always be 2 or 3 + expect(currRun.outputs?.foo).toBeGreaterThanOrEqual(2); + expect(currRun.outputs?.foo).toBeLessThanOrEqual(3); + + const firstRunResults = evalRes.results[i].evaluationResults; + expect(firstRunResults.results).toHaveLength(0); + } +}); + test("evaluate can evaluate with RunEvaluator evaluators", async () => { const targetFunc = (input: { input: number }) => { return { foo: input.input + 1 }; @@ -143,7 +183,6 @@ test("evaluate can evaluate with RunEvaluator evaluators", async () => { test("evaluate can evaluate with custom evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -215,7 +254,6 @@ test("evaluate can evaluate with custom evaluators", async () => { test("evaluate can evaluate with summary evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -273,7 +311,6 @@ test("evaluate can evaluate with summary evaluators", async () => { test.skip("can iterate over evaluate results", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -302,7 +339,6 @@ test.skip("can iterate over evaluate results", async () => { test("can pass multiple evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -323,12 +359,8 @@ test("can pass multiple evaluators", async () => { }); }; const evaluators = [ - { - evaluateRun: customEvaluatorOne, - }, - { - evaluateRun: customEvaluatorTwo, - }, + { evaluateRun: customEvaluatorOne }, + { evaluateRun: customEvaluatorTwo }, ]; const evalRes = await evaluate(targetFunc, { data: TESTING_DATASET_NAME, @@ -351,9 +383,89 @@ test("can pass multiple evaluators", async () => { ); }); +test("split info saved correctly", async () => { + const client = new Client(); + // create a new dataset + await client.createDataset(TESTING_DATASET_NAME2, { + description: + "For testing purposed. Is created & deleted for each test run.", + }); + // create examples + await client.createExamples({ + inputs: [{ input: 1 }, { input: 2 }, { input: 3 }], + outputs: [{ output: 2 }, { output: 3 }, { output: 4 }], + splits: [["test"], ["train"], ["validation", "test"]], + datasetName: TESTING_DATASET_NAME2, + }); + + const targetFunc = (input: Record) => { + return { + foo: input.input + 1, + }; + }; + await evaluate(targetFunc, { + data: client.listExamples({ datasetName: TESTING_DATASET_NAME2 }), + description: "splits info saved correctly", + }); + + const exp = client.listProjects({ + referenceDatasetName: TESTING_DATASET_NAME2, + }); + let myExp: TracerSession | null = null; + for await (const session of exp) { + myExp = session; + } + expect(myExp?.extra?.metadata?.dataset_splits.sort()).toEqual( + ["test", "train", "validation"].sort() + ); + + await evaluate(targetFunc, { + data: client.listExamples({ + datasetName: TESTING_DATASET_NAME2, + splits: ["test"], + }), + description: "splits info saved correctly", + }); + + const exp2 = client.listProjects({ + referenceDatasetName: TESTING_DATASET_NAME2, + }); + let myExp2: TracerSession | null = null; + for await (const session of exp2) { + if (myExp2 === null || session.start_time > myExp2.start_time) { + myExp2 = session; + } + } + + expect(myExp2?.extra?.metadata?.dataset_splits.sort()).toEqual( + ["test", "validation"].sort() + ); + + await evaluate(targetFunc, { + data: client.listExamples({ + datasetName: TESTING_DATASET_NAME2, + splits: ["train"], + }), + description: "splits info saved correctly", + }); + + const exp3 = client.listProjects({ + referenceDatasetName: TESTING_DATASET_NAME2, + }); + let myExp3: TracerSession | null = null; + for await (const session of exp3) { + if (myExp3 === null || session.start_time > myExp3.start_time) { + myExp3 = session; + } + } + + expect(myExp3?.extra?.metadata?.dataset_splits.sort()).toEqual( + ["train"].sort() + ); +}); + test("can pass multiple summary evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -404,7 +516,6 @@ test("can pass AsyncIterable of Example's to evaluator instead of dataset name", }); const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -438,7 +549,6 @@ test("can pass AsyncIterable of Example's to evaluator instead of dataset name", test("max concurrency works with custom evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -474,7 +584,6 @@ test("max concurrency works with custom evaluators", async () => { test("max concurrency works with summary evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -517,14 +626,14 @@ test("max concurrency works with summary evaluators", async () => { }); test("Target func can be a runnable", async () => { - const targetFunc = new RunnableLambda({ - func: (input: Record) => { - console.log("__input__", input); - return { - foo: input.input + 1, - }; - }, - }); + const targetFunc = RunnableSequence.from([ + RunnableLambda.from((input: Record) => ({ + foo: input.input + 1, + })).withConfig({ runName: "First Step" }), + RunnableLambda.from((input: { foo: number }) => ({ + foo: input.foo + 1, + })).withConfig({ runName: "Second Step" }), + ]); const customEvaluator = async (run: Run, example?: Example) => { return Promise.resolve({ @@ -560,6 +669,24 @@ test("Target func can be a runnable", async () => { expect(firstEvalResults.results).toHaveLength(1); expect(firstEvalResults.results[0].key).toEqual("key"); expect(firstEvalResults.results[0].score).toEqual(1); + + // check if the evaluated function has valid children + const gatheredChildRunNames = []; + const queue = [firstRun]; + const visited = new Set(); + while (queue.length > 0) { + const current = queue.shift(); + if (!current || visited.has(current.id)) continue; + visited.add(current.id); + if (current.child_runs) { + gatheredChildRunNames.push(...current.child_runs.map((run) => run.name)); + queue.push(...current.child_runs); + } + } + + expect(gatheredChildRunNames).toEqual( + expect.arrayContaining(["RunnableSequence", "First Step", "Second Step"]) + ); }); test("evaluate can accept array of examples", async () => { @@ -573,7 +700,6 @@ test("evaluate can accept array of examples", async () => { } const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -604,3 +730,43 @@ test("evaluate can accept array of examples", async () => { expect(firstEvalResults.evaluationResults.results).toHaveLength(1); expect(receivedCommentStrings).toEqual(expectedCommentStrings); }); + +test("evaluate accepts evaluators which return multiple feedback keys", async () => { + const targetFunc = (input: Record) => { + return { foo: input.input + 1 }; + }; + + const customEvaluator = ( + run: Run, + example?: Example + ): Promise => { + return Promise.resolve({ + results: [ + { + key: "first-key", + score: 1, + comment: `Run: ${run.id} Example: ${example?.id}`, + }, + { + key: "second-key", + score: 2, + comment: `Run: ${run.id} Example: ${example?.id}`, + }, + ], + }); + }; + + const evalRes = await evaluate(targetFunc, { + data: TESTING_DATASET_NAME, + evaluators: [customEvaluator], + description: "evaluate can evaluate with custom evaluators", + }); + + expect(evalRes.results).toHaveLength(2); + + const comment = `Run: ${evalRes.results[0].run.id} Example: ${evalRes.results[0].example.id}`; + expect(evalRes.results[0].evaluationResults.results).toMatchObject([ + { key: "first-key", score: 1, comment }, + { key: "second-key", score: 2, comment }, + ]); +}); diff --git a/js/src/tests/evaluate_comparative.int.test.ts b/js/src/tests/evaluate_comparative.int.test.ts index 99058e77f..5b18884bb 100644 --- a/js/src/tests/evaluate_comparative.int.test.ts +++ b/js/src/tests/evaluate_comparative.int.test.ts @@ -1,6 +1,7 @@ import { evaluate } from "../evaluation/_runner.js"; import { evaluateComparative } from "../evaluation/evaluate_comparative.js"; import { Client } from "../index.js"; +import { waitUntilRunFound } from "./utils.js"; const TESTING_DATASET_NAME = "test_evaluate_comparative_js"; @@ -27,6 +28,8 @@ afterAll(async () => { describe("evaluate comparative", () => { test("basic", async () => { + const client = new Client(); + const firstEval = await evaluate( (input) => ({ foo: `first:${input.input}` }), { data: TESTING_DATASET_NAME } @@ -37,6 +40,12 @@ describe("evaluate comparative", () => { { data: TESTING_DATASET_NAME } ); + await Promise.all( + [firstEval, secondEval].flatMap(({ results }) => + results.flatMap(({ run }) => waitUntilRunFound(client, run.id)) + ) + ); + const pairwise = await evaluateComparative( [firstEval.experimentName, secondEval.experimentName], { @@ -49,7 +58,29 @@ describe("evaluate comparative", () => { } ); - // TODO: we should a) wait for runs to be persisted, b) allow passing runnables / traceables directly - expect(pairwise.results.length).toBeGreaterThanOrEqual(1); + expect(pairwise.results.length).toEqual(2); + }); + + test("pass directly", async () => { + const pairwise = await evaluateComparative( + [ + evaluate((input) => ({ foo: `first:${input.input}` }), { + data: TESTING_DATASET_NAME, + }), + evaluate((input) => ({ foo: `second:${input.input}` }), { + data: TESTING_DATASET_NAME, + }), + ], + { + evaluators: [ + (runs) => ({ + key: "latter_precedence", + scores: Object.fromEntries(runs.map((run, i) => [run.id, i % 2])), + }), + ], + } + ); + + expect(pairwise.results.length).toEqual(2); }); }); diff --git a/js/src/tests/lcls_handoff.int.test.ts b/js/src/tests/lcls_handoff.int.test.ts index ff8382a75..3a064a07f 100644 --- a/js/src/tests/lcls_handoff.int.test.ts +++ b/js/src/tests/lcls_handoff.int.test.ts @@ -35,19 +35,18 @@ test.concurrent( }; // Define the two nodes we will cycle between - workflow.addNode( - "agent", - new RunnableLambda({ - func: async () => new HumanMessage({ content: "Hello!" }), - }) - ); - workflow.addNode("action", new RunnableLambda({ func: myFunc })); + workflow + .addNode( + "agent", + new RunnableLambda({ + func: async () => new HumanMessage({ content: "Hello!" }), + }) + ) + .addNode("action", new RunnableLambda({ func: myFunc })) + .addEdge("__start__", "agent") + .addEdge("agent", "action") + .addEdge("action", "__end__"); - // Set the entrypoint as `agent` - // This means that this node is the first one called - workflow.setEntryPoint("agent"); - workflow.addEdge("agent", "action"); - workflow.setFinishPoint("action"); const app = workflow.compile(); const tracer = new LangChainTracer({ projectName }); const client = new Client({ diff --git a/js/src/tests/run_trees.test.ts b/js/src/tests/run_trees.test.ts index 9567ed832..c9d7ea49e 100644 --- a/js/src/tests/run_trees.test.ts +++ b/js/src/tests/run_trees.test.ts @@ -89,3 +89,26 @@ test("serializing run tree", () => { ], }); }); + +test("distributed", () => { + const parent = new RunTree({ + name: "parent_1", + id: "00000000-0000-0000-0000-00000000000", + start_time: Date.parse("2021-05-03T00:00:00.000Z"), + }); + + const serialized = parent.toHeaders(); + + const child2 = RunTree.fromHeaders(serialized)?.createChild({ + name: "child_2", + id: "00000000-0000-0000-0000-00000000001", + start_time: Date.parse("2021-05-03T00:00:01.000Z"), + }); + + expect(JSON.parse(JSON.stringify(child2))).toMatchObject({ + name: "child_2", + run_type: "chain", + dotted_order: + "20210503T000000000001Z00000000-0000-0000-0000-00000000000.20210503T000001000002Z00000000-0000-0000-0000-00000000001", + }); +}); diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index 94fb50367..4c755c31e 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -1,10 +1,7 @@ -import type { RunTree, RunTreeConfig } from "../run_trees.js"; -import { ROOT, traceable } from "../traceable.js"; +import { RunTree, RunTreeConfig } from "../run_trees.js"; +import { ROOT, traceable, withRunTree } from "../traceable.js"; import { getAssumedTreeFromCalls } from "./utils/tree.js"; import { mockClient } from "./utils/mock_client.js"; -import { FakeChatModel } from "@langchain/core/utils/testing"; -import { ChatPromptTemplate } from "@langchain/core/prompts"; -import { StringOutputParser } from "@langchain/core/output_parsers"; test("basic traceable implementation", async () => { const { client, callSpy } = mockClient(); @@ -118,6 +115,117 @@ test("passing run tree manually", async () => { }); }); +describe("distributed tracing", () => { + it("default", async () => { + const { client, callSpy } = mockClient(); + const child = traceable( + async (depth = 0): Promise => { + if (depth < 2) return child(depth + 1); + return 3; + }, + { name: "child" } + ); + + const parent = traceable(async function parent() { + const first = await child(); + const second = await child(); + return first + second; + }); + + const clientRunTree = new RunTree({ + name: "client", + client, + tracingEnabled: true, + }); + await clientRunTree.postRun(); + + // do nothing with the client run tree + + await clientRunTree.patchRun(); + + const response = await withRunTree(clientRunTree, () => parent()); + expect(response).toBe(6); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "client:0", + "parent:1", + "child:2", + "child:3", + "child:4", + "child:5", + "child:6", + "child:7", + ], + edges: [ + ["client:0", "parent:1"], + ["parent:1", "child:2"], + ["child:2", "child:3"], + ["child:3", "child:4"], + ["parent:1", "child:5"], + ["child:5", "child:6"], + ["child:6", "child:7"], + ], + }); + }); + + it("sync function", async () => { + const { client, callSpy } = mockClient(); + const child = traceable( + async (depth = 0): Promise => { + if (depth < 2) return child(depth + 1); + return 3; + }, + { name: "child" } + ); + + const parent = traceable(async function parent() { + const first = await child(); + const second = await child(); + return first + second; + }); + + const clientRunTree = new RunTree({ + name: "client", + client, + tracingEnabled: true, + }); + await clientRunTree.postRun(); + await clientRunTree.patchRun(); + + let promiseOutside: Promise = Promise.resolve(); + + const response = await withRunTree(clientRunTree, () => { + promiseOutside = parent(); + }); + + expect(response).toBeUndefined(); + await promiseOutside; + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "client:0", + "parent:1", + "child:2", + "child:3", + "child:4", + "child:5", + "child:6", + "child:7", + ], + edges: [ + ["client:0", "parent:1"], + ["parent:1", "child:2"], + ["child:2", "child:3"], + ["child:3", "child:4"], + ["parent:1", "child:5"], + ["child:5", "child:6"], + ["child:6", "child:7"], + ], + }); + }); +}); + describe("async generators", () => { test("success", async () => { const { client, callSpy } = mockClient(); @@ -405,6 +513,44 @@ describe("async generators", () => { }, }); }); + + test("iterable with props", async () => { + const { client, callSpy } = mockClient(); + + const iterableTraceable = traceable( + function iterableWithProps() { + return { + *[Symbol.asyncIterator]() { + yield 0; + }, + prop: "value", + }; + }, + { + client, + tracingEnabled: true, + } + ); + + const numbers: number[] = []; + const iterableWithProps = await iterableTraceable(); + for await (const num of iterableWithProps) { + numbers.push(num); + } + + expect(numbers).toEqual([0]); + + expect(iterableWithProps.prop).toBe("value"); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: ["iterableWithProps:0"], + edges: [], + data: { + "iterableWithProps:0": { + outputs: { outputs: [0] }, + }, + }, + }); + }); }); describe("deferred input", () => { @@ -648,41 +794,6 @@ describe("deferred input", () => { }); }); -describe("langchain", () => { - test.skip("bound", async () => { - const { client, callSpy } = mockClient(); - - const llm = new FakeChatModel({}); - const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ - ["human", "{text}"], - ]); - const parser = new StringOutputParser(); - const chain = prompt.pipe(llm).pipe(parser); - - const main = traceable(chain.invoke.bind(chain), { - client, - tracingEnabled: true, - }); - - const result = await main({ text: "Hello world" }); - expect(result).toEqual("Hello world"); - - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: [ - "bound invoke:0", - "ChatPromptTemplate:1", - "FakeChatModel:2", - "StringOutputParser:3", - ], - edges: [ - ["bound invoke:0", "ChatPromptTemplate:1"], - ["ChatPromptTemplate:1", "FakeChatModel:2"], - ["FakeChatModel:2", "StringOutputParser:3"], - ], - }); - }); -}); - describe("generator", () => { function gatherAll(iterator: Iterator) { const chunks: unknown[] = []; diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts new file mode 100644 index 000000000..45986dfe6 --- /dev/null +++ b/js/src/tests/traceable_langchain.test.ts @@ -0,0 +1,517 @@ +import { traceable } from "../traceable.js"; +import { getAssumedTreeFromCalls } from "./utils/tree.js"; +import { mockClient } from "./utils/mock_client.js"; +import { FakeChatModel } from "@langchain/core/utils/testing"; +import { ChatPromptTemplate } from "@langchain/core/prompts"; +import { StringOutputParser } from "@langchain/core/output_parsers"; +import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; +import { BaseMessage, HumanMessage } from "@langchain/core/messages"; +import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; +import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js"; +import { RunnableLambda } from "@langchain/core/runnables"; + +describe("to langchain", () => { + const llm = new FakeChatModel({}); + const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ + ["human", "{text}"], + ]); + const parser = new StringOutputParser(); + const chain = prompt.pipe(llm).pipe(parser); + + test("invoke", async () => { + const { client, callSpy } = mockClient(); + + const main = traceable( + async (input: { text: string }) => { + return chain.invoke(input, { + callbacks: await getLangchainCallbacks(), + }); + }, + { + name: "main", + client, + tracingEnabled: true, + tags: ["welcome"], + metadata: { hello: "world" }, + } + ); + + const result = await main({ text: "Hello world" }); + expect(result).toEqual("Hello world"); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "RunnableSequence:1", + "ChatPromptTemplate:2", + "FakeChatModel:3", + "StrOutputParser:4", + ], + edges: [ + ["main:0", "RunnableSequence:1"], + ["RunnableSequence:1", "ChatPromptTemplate:2"], + ["RunnableSequence:1", "FakeChatModel:3"], + ["RunnableSequence:1", "StrOutputParser:4"], + ], + data: { + "main:0": { + inputs: { text: "Hello world" }, + outputs: { outputs: "Hello world" }, + tags: ["welcome"], + extra: { metadata: { hello: "world" } }, + }, + }, + }); + }); + + test("stream", async () => { + const { client, callSpy } = mockClient(); + + const main = traceable( + async function* main(input: { text: string }) { + for await (const token of await chain.stream(input, { + callbacks: await getLangchainCallbacks(), + })) { + yield token; + } + }, + { client, tracingEnabled: true } + ); + + const result = []; + for await (const token of main({ text: "Hello world" })) { + result.push(token); + } + + expect(result).toEqual(["Hello world"]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "RunnableSequence:1", + "ChatPromptTemplate:2", + "FakeChatModel:3", + "StrOutputParser:4", + ], + edges: [ + ["main:0", "RunnableSequence:1"], + ["RunnableSequence:1", "ChatPromptTemplate:2"], + ["RunnableSequence:1", "FakeChatModel:3"], + ["RunnableSequence:1", "StrOutputParser:4"], + ], + }); + }); + + test("batch", async () => { + const { client, callSpy } = mockClient(); + + const main = traceable( + async (input: { texts: string[] }) => { + return chain.batch( + input.texts.map((text) => ({ text })), + { callbacks: await getLangchainCallbacks() } + ); + }, + { name: "main", client, tracingEnabled: true } + ); + + const result = await main({ texts: ["Hello world", "Who are you?"] }); + + expect(result).toEqual(["Hello world", "Who are you?"]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "RunnableSequence:1", + "RunnableSequence:2", + "ChatPromptTemplate:3", + "ChatPromptTemplate:4", + "FakeChatModel:5", + "FakeChatModel:6", + "StrOutputParser:7", + "StrOutputParser:8", + ], + edges: [ + ["main:0", "RunnableSequence:1"], + ["main:0", "RunnableSequence:2"], + ["RunnableSequence:1", "ChatPromptTemplate:3"], + ["RunnableSequence:2", "ChatPromptTemplate:4"], + ["RunnableSequence:1", "FakeChatModel:5"], + ["RunnableSequence:2", "FakeChatModel:6"], + ["RunnableSequence:1", "StrOutputParser:7"], + ["RunnableSequence:2", "StrOutputParser:8"], + ], + }); + }); +}); + +describe("to traceable", () => { + test("invoke", async () => { + const { client, callSpy } = mockClient(); + + const llm = new FakeChatModel({}); + const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ + ["human", "{text}"], + ]); + const parser = new StringOutputParser(); + + const addValueTraceable = traceable( + (msg: BaseMessage) => + new HumanMessage({ content: msg.content + " world" }), + { name: "add_negligible_value" } + ); + + const chain = prompt + .pipe(llm) + .pipe(RunnableTraceable.from(addValueTraceable)) + .pipe(parser); + + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + const tracer = new LangChainTracer({ client }); + const response = await chain.invoke( + { text: "Hello" }, + { callbacks: [tracer] } + ); + + // callbacks are backgrounded by default + await awaitAllCallbacks(); + + expect(response).toEqual("Hello world"); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "RunnableSequence:0", + "ChatPromptTemplate:1", + "FakeChatModel:2", + "add_negligible_value:3", + "StrOutputParser:4", + ], + edges: [ + ["RunnableSequence:0", "ChatPromptTemplate:1"], + ["RunnableSequence:0", "FakeChatModel:2"], + ["RunnableSequence:0", "add_negligible_value:3"], + ["RunnableSequence:0", "StrOutputParser:4"], + ], + }); + }); + + test("array stream", async () => { + const { client, callSpy } = mockClient(); + + const source = RunnableTraceable.from( + traceable(function (input: { text: string }) { + return input.text.split(" "); + }) + ); + + const tokens: unknown[] = []; + for await (const chunk of await source.stream( + { text: "Hello world" }, + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + { callbacks: [new LangChainTracer({ client })] } + )) { + tokens.push(chunk); + } + + expect(tokens).toEqual([["Hello", "world"]]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [":0"], + edges: [], + }); + }); + + test("generator stream", async () => { + const { client, callSpy } = mockClient(); + + const source = RunnableTraceable.from( + traceable(function* (input: { text: string }) { + const chunks = input.text.split(" "); + for (const chunk of chunks) { + yield chunk; + } + }) + ); + + const tokens: unknown[] = []; + for await (const chunk of await source.stream( + { text: "Hello world" }, + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + { callbacks: [new LangChainTracer({ client })] } + )) { + tokens.push(chunk); + } + + expect(tokens).toEqual(["Hello", "world"]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [":0"], + edges: [], + }); + }); + + test("readable stream", async () => { + const { client, callSpy } = mockClient(); + + const source = RunnableTraceable.from( + traceable(async function (input: { text: string }) { + const readStream = new ReadableStream({ + async pull(controller) { + for (const item of input.text.split(" ")) { + controller.enqueue(item); + } + controller.close(); + }, + }); + + return readStream; + }) + ); + + const tokens: unknown[] = []; + for await (const chunk of await source.stream( + { text: "Hello world" }, + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + { callbacks: [new LangChainTracer({ client })] } + )) { + tokens.push(chunk); + } + + expect(tokens).toEqual(["Hello", "world"]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [":0"], + edges: [], + }); + }); + + test("async generator stream", async () => { + const { client, callSpy } = mockClient(); + const source = RunnableTraceable.from( + traceable(async function* (input: { text: string }) { + const chunks = input.text.split(" "); + for (const chunk of chunks) { + yield chunk; + } + }) + ); + + const tokens: unknown[] = []; + for await (const chunk of await source.stream( + { text: "Hello world" }, + { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + callbacks: [new LangChainTracer({ client })], + } + )) { + tokens.push(chunk); + } + + expect(tokens).toEqual(["Hello", "world"]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [":0"], + edges: [], + }); + }); +}); + +test("explicit nested", async () => { + const { client, callSpy } = mockClient(); + + const llm = new FakeChatModel({}); + const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ + ["human", "{text}"], + ]); + const parser = new StringOutputParser(); + const chain = prompt.pipe(llm).pipe(parser).withConfig({ runName: "chain" }); + + const wrappedModel = new RunnableTraceable({ + func: traceable( + async (value: { input: string }) => { + const callbacks = await getLangchainCallbacks(); + + return chain.invoke( + { text: `Wrapped input: ${value.input}` }, + { callbacks } + ); + }, + { name: "wrappedModel" } + ), + }); + + const main = traceable( + async () => { + return { + response: [ + await wrappedModel.invoke( + { input: "Are you ready?" }, + { callbacks: await getLangchainCallbacks() } + ), + await wrappedModel.invoke( + { input: "I said, Are. You. Ready?" }, + { callbacks: await getLangchainCallbacks() } + ), + ], + }; + }, + { name: "main", client, tracingEnabled: true } + ); + + const result = await main(); + await awaitAllCallbacks(); + + expect(result).toEqual({ + response: [ + "Wrapped input: Are you ready?", + "Wrapped input: I said, Are. You. Ready?", + ], + }); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "wrappedModel:1", + "chain:2", + "ChatPromptTemplate:3", + "FakeChatModel:4", + "StrOutputParser:5", + "wrappedModel:6", + "chain:7", + "ChatPromptTemplate:8", + "FakeChatModel:9", + "StrOutputParser:10", + ], + edges: [ + ["main:0", "wrappedModel:1"], + ["wrappedModel:1", "chain:2"], + ["chain:2", "ChatPromptTemplate:3"], + ["chain:2", "FakeChatModel:4"], + ["chain:2", "StrOutputParser:5"], + ["main:0", "wrappedModel:6"], + ["wrappedModel:6", "chain:7"], + ["chain:7", "ChatPromptTemplate:8"], + ["chain:7", "FakeChatModel:9"], + ["chain:7", "StrOutputParser:10"], + ], + }); +}); + +// skip until the @langchain/core 0.2.17 is out +describe.skip("automatic tracing", () => { + it("root langchain", async () => { + const { callSpy, langChainTracer } = mockClient(); + + const lc = RunnableLambda.from(async () => "Hello from LangChain"); + const ls = traceable(() => "Hello from LangSmith", { name: "traceable" }); + + const childA = RunnableLambda.from(async () => { + const results: string[] = []; + results.push(await lc.invoke({})); + results.push(await ls()); + return results.join("\n"); + }); + + const childB = traceable( + async () => [await lc.invoke({}), await ls()].join("\n"), + { name: "childB" } + ); + + const rootLC = RunnableLambda.from(async () => { + return [ + await childA.invoke({}, { runName: "childA" }), + await childB(), + ].join("\n"); + }); + + expect( + await rootLC.invoke( + {}, + { callbacks: [langChainTracer], runName: "rootLC" } + ) + ).toEqual( + [ + "Hello from LangChain", + "Hello from LangSmith", + "Hello from LangChain", + "Hello from LangSmith", + ].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "rootLC:0", + "childA:1", + "RunnableLambda:2", + "traceable:3", + "childB:4", + "RunnableLambda:5", + "traceable:6", + ], + edges: [ + ["rootLC:0", "childA:1"], + ["childA:1", "RunnableLambda:2"], + ["childA:1", "traceable:3"], + ["rootLC:0", "childB:4"], + ["childB:4", "RunnableLambda:5"], + ["childB:4", "traceable:6"], + ], + }); + }); + + it("root traceable", async () => { + const { client, callSpy } = mockClient(); + + const lc = RunnableLambda.from(async () => "Hello from LangChain"); + const ls = traceable(() => "Hello from LangSmith", { name: "traceable" }); + + const childA = RunnableLambda.from(async () => { + const results: string[] = []; + results.push(await lc.invoke({})); + results.push(await ls()); + return results.join("\n"); + }); + + const childB = traceable( + async () => [await lc.invoke({}), await ls()].join("\n"), + { name: "childB" } + ); + + const rootLS = traceable( + async () => { + return [ + await childA.invoke({}, { runName: "childA" }), + await childB(), + ].join("\n"); + }, + { name: "rootLS", client, tracingEnabled: true } + ); + + expect(await rootLS()).toEqual( + [ + "Hello from LangChain", + "Hello from LangSmith", + "Hello from LangChain", + "Hello from LangSmith", + ].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "rootLS:0", + "childA:1", + "RunnableLambda:2", + "traceable:3", + "childB:4", + "RunnableLambda:5", + "traceable:6", + ], + edges: [ + ["rootLS:0", "childA:1"], + ["childA:1", "RunnableLambda:2"], + ["childA:1", "traceable:3"], + ["rootLS:0", "childB:4"], + ["childB:4", "RunnableLambda:5"], + ["childB:4", "traceable:6"], + ], + }); + }); +}); diff --git a/js/src/tests/utils.ts b/js/src/tests/utils.ts index a7e4d3197..faf458542 100644 --- a/js/src/tests/utils.ts +++ b/js/src/tests/utils.ts @@ -1,4 +1,8 @@ import { Client } from "../client.js"; +import { v4 as uuidv4 } from "uuid"; +// eslint-disable-next-line import/no-extraneous-dependencies +import { faker } from "@faker-js/faker"; +import { RunCreate } from "../schemas.js"; export async function toArray(iterable: AsyncIterable): Promise { const result: T[] = []; @@ -11,7 +15,8 @@ export async function toArray(iterable: AsyncIterable): Promise { export async function waitUntil( condition: () => Promise, timeout: number, - interval: number + interval: number, + prefix?: string ): Promise { const start = Date.now(); while (Date.now() - start < timeout) { @@ -25,7 +30,9 @@ export async function waitUntil( await new Promise((resolve) => setTimeout(resolve, interval)); } const elapsed = Date.now() - start; - throw new Error(`Timeout after ${elapsed / 1000}s`); + throw new Error( + [prefix, `Timeout after ${elapsed / 1000}s`].filter(Boolean).join(": ") + ); } export async function pollRunsUntilCount( @@ -93,7 +100,27 @@ export async function waitUntilRunFound( } }, 30_000, - 5_000 + 5_000, + `Waiting for run "${runId}"` + ); +} + +export async function waitUntilProjectFound( + client: Client, + projectName: string +) { + return waitUntil( + async () => { + try { + await client.readProject({ projectName }); + return true; + } catch (e) { + return false; + } + }, + 10_000, + 5_000, + `Waiting for project "${projectName}"` ); } @@ -112,3 +139,26 @@ export function sanitizePresignedUrls(payload: unknown) { return value; }); } + +/** + * Factory which returns a list of `RunCreate` objects. + * @param {number} count Number of runs to create (default: 10) + * @returns {Array} List of `RunCreate` objects + */ +export function createRunsFactory( + projectName: string, + count = 10 +): Array { + return Array.from({ length: count }).map((_, idx) => ({ + id: uuidv4(), + name: `${idx}-${faker.lorem.words()}`, + run_type: faker.helpers.arrayElement(["tool", "chain", "llm", "retriever"]), + inputs: { + question: faker.lorem.sentence(), + }, + outputs: { + answer: faker.lorem.sentence(), + }, + project_name: projectName, + })); +} diff --git a/js/src/tests/utils/mock_client.ts b/js/src/tests/utils/mock_client.ts index b46675865..2cf8bf9c6 100644 --- a/js/src/tests/utils/mock_client.ts +++ b/js/src/tests/utils/mock_client.ts @@ -1,12 +1,24 @@ // eslint-disable-next-line import/no-extraneous-dependencies import { jest } from "@jest/globals"; import { Client } from "../../index.js"; +import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; -export const mockClient = () => { - const client = new Client({ autoBatchTracing: false }); +type ClientParams = Exclude[0], undefined>; +export const mockClient = (config?: Omit) => { + const client = new Client({ + ...config, + apiKey: "MOCK", + autoBatchTracing: false, + }); const callSpy = jest .spyOn((client as any).caller, "call") .mockResolvedValue({ ok: true, text: () => "" }); - return { client, callSpy }; + const langChainTracer = new LangChainTracer({ + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore Overriden client + client, + }); + + return { client, callSpy, langChainTracer }; }; diff --git a/js/src/tests/wrapped_ai_sdk.int.test.ts b/js/src/tests/wrapped_ai_sdk.int.test.ts new file mode 100644 index 000000000..ddc221741 --- /dev/null +++ b/js/src/tests/wrapped_ai_sdk.int.test.ts @@ -0,0 +1,77 @@ +import { openai } from "@ai-sdk/openai"; +import { + generateObject, + generateText, + streamObject, + streamText, + tool, +} from "ai"; +import { z } from "zod"; +import { wrapAISDKModel } from "../wrappers/vercel.js"; + +test("AI SDK generateText", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { text } = await generateText({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + }); + console.log(text); +}); + +test("AI SDK generateText with a tool", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { text } = await generateText({ + model: modelWithTracing, + prompt: + "Write a vegetarian lasagna recipe for 4 people. Get ingredients first.", + tools: { + getIngredients: tool({ + description: "get a list of ingredients", + parameters: z.object({ + ingredients: z.array(z.string()), + }), + execute: async () => + JSON.stringify(["pasta", "tomato", "cheese", "onions"]), + }), + }, + maxToolRoundtrips: 2, + }); + console.log(text); +}); + +test("AI SDK generateObject", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { object } = await generateObject({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + schema: z.object({ + ingredients: z.array(z.string()), + }), + }); + console.log(object); +}); + +test("AI SDK streamText", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { textStream } = await streamText({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + }); + for await (const chunk of textStream) { + console.log(chunk); + } +}); + +test("AI SDK streamObject", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { partialObjectStream } = await streamObject({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + schema: z.object({ + ingredients: z.array(z.string()), + }), + }); + for await (const chunk of partialObjectStream) { + console.log(chunk); + } +}); diff --git a/js/src/tests/wrapped_openai.int.test.ts b/js/src/tests/wrapped_openai.int.test.ts index 95962b979..be80e895d 100644 --- a/js/src/tests/wrapped_openai.int.test.ts +++ b/js/src/tests/wrapped_openai.int.test.ts @@ -61,7 +61,7 @@ test.concurrent("chat.completions", async () => { stream: true, }); - const originalChoices = []; + const originalChoices: unknown[] = []; for await (const chunk of originalStream) { originalChoices.push(chunk.choices); } @@ -74,7 +74,7 @@ test.concurrent("chat.completions", async () => { stream: true, }); - const patchedChoices = []; + const patchedChoices: unknown[] = []; for await (const chunk of patchedStream) { patchedChoices.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -126,7 +126,7 @@ test.concurrent("chat.completions", async () => { } ); - const patchedChoices2 = []; + const patchedChoices2: unknown[] = []; for await (const chunk of patchedStreamWithMetadata) { patchedChoices2.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -246,7 +246,7 @@ test.concurrent("chat completions with tool calling", async () => { stream: true, }); - const originalChoices = []; + const originalChoices: any[] = []; for await (const chunk of originalStream) { originalChoices.push(chunk.choices); } @@ -264,7 +264,7 @@ test.concurrent("chat completions with tool calling", async () => { stream: true, }); - const patchedChoices = []; + const patchedChoices: any[] = []; for await (const chunk of patchedStream) { patchedChoices.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -305,7 +305,7 @@ test.concurrent("chat completions with tool calling", async () => { } ); - const patchedChoices2 = []; + const patchedChoices2: any[] = []; for await (const chunk of patchedStream2) { patchedChoices2.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -322,6 +322,10 @@ test.concurrent("chat completions with tool calling", async () => { // eslint-disable-next-line @typescript-eslint/no-explicit-any expect(JSON.parse((call[2] as any).body).extra.metadata).toEqual({ thing1: "thing2", + ls_model_name: "gpt-3.5-turbo", + ls_model_type: "chat", + ls_provider: "openai", + ls_temperature: 0, }); } callSpy.mockClear(); @@ -364,7 +368,7 @@ test.concurrent("completions", async () => { stream: true, }); - const originalChoices = []; + const originalChoices: unknown[] = []; for await (const chunk of originalStream) { originalChoices.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -380,7 +384,7 @@ test.concurrent("completions", async () => { stream: true, }); - const patchedChoices = []; + const patchedChoices: unknown[] = []; for await (const chunk of patchedStream) { patchedChoices.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -411,7 +415,7 @@ test.concurrent("completions", async () => { } ); - const patchedChoices2 = []; + const patchedChoices2: unknown[] = []; for await (const chunk of patchedStream2) { patchedChoices2.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -441,7 +445,7 @@ test.skip("with initialization time config", async () => { stream: true, }); - const patchedChoices = []; + const patchedChoices: unknown[] = []; for await (const chunk of patchedStream) { patchedChoices.push(chunk.choices); // @ts-expect-error Should type check streamed output diff --git a/js/src/traceable.ts b/js/src/traceable.ts index cfec78c76..dc43af0d3 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -1,4 +1,4 @@ -import { AsyncLocalStorage } from "async_hooks"; +import { AsyncLocalStorage } from "node:async_hooks"; import { RunTree, @@ -7,141 +7,26 @@ import { isRunTree, isRunnableConfigLike, } from "./run_trees.js"; -import { KVMap } from "./schemas.js"; -import { getEnvironmentVariable } from "./utils/env.js"; - -function isPromiseMethod( - x: string | symbol -): x is "then" | "catch" | "finally" { - if (x === "then" || x === "catch" || x === "finally") { - return true; - } - return false; -} - -const asyncLocalStorage = new AsyncLocalStorage(); - -export const ROOT = Symbol("langsmith:traceable:root"); - -export type RunTreeLike = RunTree; - -type SmartPromise = T extends AsyncGenerator - ? T - : T extends Promise - ? T - : Promise; - -type WrapArgReturnPair = Pair extends [ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - infer Args extends any[], - infer Return -] - ? Args extends [RunTreeLike, ...infer RestArgs] - ? { - ( - runTree: RunTreeLike | typeof ROOT, - ...args: RestArgs - ): SmartPromise; - (config: RunnableConfigLike, ...args: RestArgs): SmartPromise; - } - : { - (...args: Args): SmartPromise; - (runTree: RunTreeLike, ...rest: Args): SmartPromise; - (config: RunnableConfigLike, ...args: Args): SmartPromise; - } - : never; - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -type UnionToIntersection = (U extends any ? (x: U) => void : never) extends ( - x: infer I -) => void - ? I - : never; - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -export type TraceableFunction any> = - // function overloads are represented as intersections rather than unions - // matches the behavior introduced in https://github.com/microsoft/TypeScript/pull/54448 - Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - (...args: infer A3): infer R3; - (...args: infer A4): infer R4; - (...args: infer A5): infer R5; - } - ? UnionToIntersection< - WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4] | [A5, R5]> - > - : Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - (...args: infer A3): infer R3; - (...args: infer A4): infer R4; - } - ? UnionToIntersection< - WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4]> - > - : Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - (...args: infer A3): infer R3; - } - ? UnionToIntersection> - : Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - } - ? UnionToIntersection> - : Func extends { - (...args: infer A1): infer R1; - } - ? UnionToIntersection> - : never; - -const isAsyncIterable = (x: unknown): x is AsyncIterable => - x != null && - typeof x === "object" && - // eslint-disable-next-line @typescript-eslint/no-explicit-any - typeof (x as any)[Symbol.asyncIterator] === "function"; - -const GeneratorFunction = function* () {}.constructor; - -const isIteratorLike = (x: unknown): x is Iterator => - x != null && - typeof x === "object" && - "next" in x && - typeof x.next === "function"; - -const isGenerator = (x: unknown): x is Generator => - // eslint-disable-next-line no-instanceof/no-instanceof - x != null && typeof x === "function" && x instanceof GeneratorFunction; - -const isThenable = (x: unknown): x is Promise => - x != null && - typeof x === "object" && - "then" in x && - typeof x.then === "function"; - -const isReadableStream = (x: unknown): x is ReadableStream => - x != null && - typeof x === "object" && - "getReader" in x && - typeof x.getReader === "function"; - -const tracingIsEnabled = (tracingEnabled?: boolean): boolean => { - if (tracingEnabled !== undefined) { - return tracingEnabled; - } - const envVars = [ - "LANGSMITH_TRACING_V2", - "LANGCHAIN_TRACING_V2", - "LANGSMITH_TRACING", - "LANGCHAIN_TRACING", - ]; - return Boolean( - envVars.find((envVar) => getEnvironmentVariable(envVar) === "true") - ); -}; +import { InvocationParamsSchema, KVMap } from "./schemas.js"; +import { isTracingEnabled } from "./env.js"; +import { + ROOT, + AsyncLocalStorageProviderSingleton, +} from "./singletons/traceable.js"; +import { TraceableFunction } from "./singletons/types.js"; +import { + isKVMap, + isReadableStream, + isAsyncIterable, + isIteratorLike, + isThenable, + isGenerator, + isPromiseMethod, +} from "./utils/asserts.js"; + +AsyncLocalStorageProviderSingleton.initializeGlobalInstance( + new AsyncLocalStorage() +); const handleRunInputs = (rawInputs: unknown[]): KVMap => { const firstInput = rawInputs[0]; @@ -153,6 +38,7 @@ const handleRunInputs = (rawInputs: unknown[]): KVMap => { if (rawInputs.length > 1) { return { args: rawInputs }; } + if (isKVMap(firstInput)) { return firstInput; } @@ -167,16 +53,28 @@ const handleRunOutputs = (rawOutputs: unknown): KVMap => { return { outputs: rawOutputs }; }; -const getTracingRunTree = ( +const getTracingRunTree = ( runTree: RunTree, - inputs: unknown[] + inputs: Args, + getInvocationParams: + | ((...args: Args) => InvocationParamsSchema | undefined) + | undefined ): RunTree | undefined => { - const tracingEnabled_ = tracingIsEnabled(runTree.tracingEnabled); - if (!tracingEnabled_) { + if (!isTracingEnabled(runTree.tracingEnabled)) { return undefined; } runTree.inputs = handleRunInputs(inputs); + + const invocationParams = getInvocationParams?.(...inputs); + if (invocationParams != null) { + runTree.extra ??= {}; + runTree.extra.metadata = { + ...invocationParams, + ...runTree.extra.metadata, + }; + } + return runTree; }; @@ -381,13 +279,31 @@ export function traceable any>( // eslint-disable-next-line @typescript-eslint/no-explicit-any aggregator?: (args: any[]) => any; argsConfigPath?: [number] | [number, string]; + __finalTracedIteratorKey?: string; + + /** + * Extract invocation parameters from the arguments of the traced function. + * This is useful for LangSmith to properly track common metadata like + * provider, model name and temperature. + * + * @param args Arguments of the traced function + * @returns Key-value map of the invocation parameters, which will be merged with the existing metadata + */ + getInvocationParams?: ( + ...args: Parameters + ) => InvocationParamsSchema | undefined; } ) { type Inputs = Parameters; - const { aggregator, argsConfigPath, ...runTreeConfig } = config ?? {}; + const { + aggregator, + argsConfigPath, + __finalTracedIteratorKey, + ...runTreeConfig + } = config ?? {}; const traceableFunc = ( - ...args: Inputs | [RunTreeLike, ...Inputs] | [RunnableConfigLike, ...Inputs] + ...args: Inputs | [RunTree, ...Inputs] | [RunnableConfigLike, ...Inputs] ) => { let ensuredConfig: RunTreeConfig; try { @@ -440,6 +356,8 @@ export function traceable any>( }; } + const asyncLocalStorage = AsyncLocalStorageProviderSingleton.getInstance(); + // TODO: deal with possible nested promises and async iterables const processedArgs = args as unknown as Inputs; for (let i = 0; i < processedArgs.length; i++) { @@ -454,13 +372,14 @@ export function traceable any>( return [ getTracingRunTree( RunTree.fromRunnableConfig(firstArg, ensuredConfig), - restArgs + restArgs as Inputs, + config?.getInvocationParams ), restArgs as Inputs, ]; } - // legacy CallbackManagerRunTree used in runOnDataset + // deprecated: legacy CallbackManagerRunTree used in runOnDataset // override ALS and do not pass-through the run tree if ( isRunTree(firstArg) && @@ -477,7 +396,8 @@ export function traceable any>( firstArg === ROOT ? new RunTree(ensuredConfig) : firstArg.createChild(ensuredConfig), - restArgs + restArgs as Inputs, + config?.getInvocationParams ); return [currentRunTree, [currentRunTree, ...restArgs] as Inputs]; @@ -490,7 +410,8 @@ export function traceable any>( return [ getTracingRunTree( prevRunFromStore.createChild(ensuredConfig), - processedArgs + processedArgs, + config?.getInvocationParams ), processedArgs as Inputs, ]; @@ -498,7 +419,8 @@ export function traceable any>( const currentRunTree = getTracingRunTree( new RunTree(ensuredConfig), - processedArgs + processedArgs, + config?.getInvocationParams ); return [currentRunTree, processedArgs as Inputs]; })(); @@ -518,14 +440,54 @@ export function traceable any>( return chunks; } - async function* wrapAsyncGeneratorForTracing( - iterable: AsyncIterable, + function tapReadableStreamForTracing( + stream: ReadableStream, + snapshot: ReturnType | undefined + ) { + const reader = stream.getReader(); + let finished = false; + const chunks: unknown[] = []; + + const tappedStream = new ReadableStream({ + async start(controller) { + // eslint-disable-next-line no-constant-condition + while (true) { + const result = await (snapshot + ? snapshot(() => reader.read()) + : reader.read()); + if (result.done) { + finished = true; + await currentRunTree?.end( + handleRunOutputs(await handleChunks(chunks)) + ); + await handleEnd(); + controller.close(); + break; + } + chunks.push(result.value); + controller.enqueue(result.value); + } + }, + async cancel(reason) { + if (!finished) await currentRunTree?.end(undefined, "Cancelled"); + await currentRunTree?.end( + handleRunOutputs(await handleChunks(chunks)) + ); + await handleEnd(); + return reader.cancel(reason); + }, + }); + + return tappedStream; + } + + async function* wrapAsyncIteratorForTracing( + iterator: AsyncIterator, snapshot: ReturnType | undefined ) { let finished = false; const chunks: unknown[] = []; try { - const iterator = iterable[Symbol.asyncIterator](); while (true) { const { value, done } = await (snapshot ? snapshot(() => iterator.next()) @@ -549,6 +511,19 @@ export function traceable any>( } } + function wrapAsyncGeneratorForTracing( + iterable: AsyncIterable, + snapshot: ReturnType | undefined + ) { + if (isReadableStream(iterable)) { + return tapReadableStreamForTracing(iterable, snapshot); + } + const iterator = iterable[Symbol.asyncIterator](); + const wrappedIterator = wrapAsyncIteratorForTracing(iterator, snapshot); + iterable[Symbol.asyncIterator] = () => wrappedIterator; + return iterable; + } + async function handleEnd() { const onEnd = config?.on_end; if (onEnd) { @@ -588,6 +563,25 @@ export function traceable any>( return wrapAsyncGeneratorForTracing(returnValue, snapshot); } + if ( + !Array.isArray(returnValue) && + typeof returnValue === "object" && + returnValue != null && + __finalTracedIteratorKey !== undefined && + isAsyncIterable( + (returnValue as Record)[__finalTracedIteratorKey] + ) + ) { + const snapshot = AsyncLocalStorage.snapshot(); + return { + ...returnValue, + [__finalTracedIteratorKey]: wrapAsyncGeneratorForTracing( + (returnValue as Record)[__finalTracedIteratorKey], + snapshot + ), + }; + } + const tracedPromise = new Promise((resolve, reject) => { Promise.resolve(returnValue) .then( @@ -599,6 +593,27 @@ export function traceable any>( ); } + if ( + !Array.isArray(rawOutput) && + typeof rawOutput === "object" && + rawOutput != null && + __finalTracedIteratorKey !== undefined && + isAsyncIterable( + (rawOutput as Record)[__finalTracedIteratorKey] + ) + ) { + const snapshot = AsyncLocalStorage.snapshot(); + return { + ...rawOutput, + [__finalTracedIteratorKey]: wrapAsyncGeneratorForTracing( + (rawOutput as Record)[ + __finalTracedIteratorKey + ], + snapshot + ), + }; + } + if (isGenerator(wrappedFunc) && isIteratorLike(rawOutput)) { const chunks = gatherAll(rawOutput); @@ -664,56 +679,11 @@ export function traceable any>( return traceableFunc as TraceableFunction; } -/** - * Return the current run tree from within a traceable-wrapped function. - * Will throw an error if called outside of a traceable function. - * - * @returns The run tree for the given context. - */ -export function getCurrentRunTree(): RunTree { - const runTree = asyncLocalStorage.getStore(); - if (runTree === undefined) { - throw new Error( - [ - "Could not get the current run tree.", - "", - "Please make sure you are calling this method within a traceable function.", - ].join("\n") - ); - } - return runTree; -} +export { + getCurrentRunTree, + isTraceableFunction, + withRunTree, + ROOT, +} from "./singletons/traceable.js"; -export function isTraceableFunction( - x: unknown - // eslint-disable-next-line @typescript-eslint/no-explicit-any -): x is TraceableFunction { - return typeof x === "function" && "langsmith:traceable" in x; -} - -function isKVMap(x: unknown): x is Record { - if (typeof x !== "object" || x == null) { - return false; - } - - const prototype = Object.getPrototypeOf(x); - return ( - (prototype === null || - prototype === Object.prototype || - Object.getPrototypeOf(prototype) === null) && - !(Symbol.toStringTag in x) && - !(Symbol.iterator in x) - ); -} - -export function wrapFunctionAndEnsureTraceable< - Func extends (...args: any[]) => any ->(target: Func, options: Partial, name = "target") { - if (typeof target === "function") { - return traceable(target, { - ...options, - name, - }); - } - throw new Error("Target must be runnable function"); -} +export type { RunTreeLike, TraceableFunction } from "./singletons/types.js"; diff --git a/js/src/utils/asserts.ts b/js/src/utils/asserts.ts new file mode 100644 index 000000000..55bc260db --- /dev/null +++ b/js/src/utils/asserts.ts @@ -0,0 +1,51 @@ +export function isPromiseMethod( + x: string | symbol +): x is "then" | "catch" | "finally" { + if (x === "then" || x === "catch" || x === "finally") { + return true; + } + return false; +} + +export function isKVMap(x: unknown): x is Record { + if (typeof x !== "object" || x == null) { + return false; + } + + const prototype = Object.getPrototypeOf(x); + return ( + (prototype === null || + prototype === Object.prototype || + Object.getPrototypeOf(prototype) === null) && + !(Symbol.toStringTag in x) && + !(Symbol.iterator in x) + ); +} +export const isAsyncIterable = (x: unknown): x is AsyncIterable => + x != null && + typeof x === "object" && + // eslint-disable-next-line @typescript-eslint/no-explicit-any + typeof (x as any)[Symbol.asyncIterator] === "function"; + +export const isIteratorLike = (x: unknown): x is Iterator => + x != null && + typeof x === "object" && + "next" in x && + typeof x.next === "function"; + +const GeneratorFunction = function* () {}.constructor; +export const isGenerator = (x: unknown): x is Generator => + // eslint-disable-next-line no-instanceof/no-instanceof + x != null && typeof x === "function" && x instanceof GeneratorFunction; + +export const isThenable = (x: unknown): x is Promise => + x != null && + typeof x === "object" && + "then" in x && + typeof x.then === "function"; + +export const isReadableStream = (x: unknown): x is ReadableStream => + x != null && + typeof x === "object" && + "getReader" in x && + typeof x.getReader === "function"; diff --git a/js/src/utils/env.ts b/js/src/utils/env.ts index 4c073a796..535ef2772 100644 --- a/js/src/utils/env.ts +++ b/js/src/utils/env.ts @@ -200,6 +200,15 @@ export function getEnvironmentVariable(name: string): string | undefined { } } +export function getLangSmithEnvironmentVariable( + name: string +): string | undefined { + return ( + getEnvironmentVariable(`LANGSMITH_${name}`) || + getEnvironmentVariable(`LANGCHAIN_${name}`) + ); +} + export function setEnvironmentVariable(name: string, value: string): void { if (typeof process !== "undefined") { // eslint-disable-next-line no-process-env diff --git a/js/src/utils/error.ts b/js/src/utils/error.ts new file mode 100644 index 000000000..8739cb091 --- /dev/null +++ b/js/src/utils/error.ts @@ -0,0 +1,23 @@ +function getErrorStackTrace(e: unknown) { + if (typeof e !== "object" || e == null) return undefined; + if (!("stack" in e) || typeof e.stack !== "string") return undefined; + + let stack = e.stack; + + const prevLine = `${e}`; + if (stack.startsWith(prevLine)) { + stack = stack.slice(prevLine.length); + } + + if (stack.startsWith("\n")) { + stack = stack.slice(1); + } + + return stack; +} + +export function printErrorStackTrace(e: unknown) { + const stack = getErrorStackTrace(e); + if (stack == null) return; + console.error(stack); +} diff --git a/js/src/utils/lodash/LICENSE b/js/src/utils/lodash/LICENSE new file mode 100644 index 000000000..5b807415b --- /dev/null +++ b/js/src/utils/lodash/LICENSE @@ -0,0 +1,49 @@ +The MIT License + +Copyright JS Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. \ No newline at end of file diff --git a/js/src/utils/lodash/assignValue.ts b/js/src/utils/lodash/assignValue.ts new file mode 100644 index 000000000..f02ed4991 --- /dev/null +++ b/js/src/utils/lodash/assignValue.ts @@ -0,0 +1,27 @@ +import baseAssignValue from "./baseAssignValue.js"; +import eq from "./eq.js"; + +/** Used to check objects for own properties. */ +const hasOwnProperty = Object.prototype.hasOwnProperty; + +/** + * Assigns `value` to `key` of `object` if the existing value is not equivalent. + * + * @private + * @param {Object} object The object to modify. + * @param {string} key The key of the property to assign. + * @param {*} value The value to assign. + */ +function assignValue(object: Record, key: string, value: any) { + const objValue = object[key]; + + if (!(hasOwnProperty.call(object, key) && eq(objValue, value))) { + if (value !== 0 || 1 / value === 1 / objValue) { + baseAssignValue(object, key, value); + } + } else if (value === undefined && !(key in object)) { + baseAssignValue(object, key, value); + } +} + +export default assignValue; diff --git a/js/src/utils/lodash/baseAssignValue.ts b/js/src/utils/lodash/baseAssignValue.ts new file mode 100644 index 000000000..5d1d70d16 --- /dev/null +++ b/js/src/utils/lodash/baseAssignValue.ts @@ -0,0 +1,23 @@ +/** + * The base implementation of `assignValue` and `assignMergeValue` without + * value checks. + * + * @private + * @param {Object} object The object to modify. + * @param {string} key The key of the property to assign. + * @param {*} value The value to assign. + */ +function baseAssignValue(object: Record, key: string, value: any) { + if (key === "__proto__") { + Object.defineProperty(object, key, { + configurable: true, + enumerable: true, + value: value, + writable: true, + }); + } else { + object[key] = value; + } +} + +export default baseAssignValue; diff --git a/js/src/utils/lodash/baseSet.ts b/js/src/utils/lodash/baseSet.ts new file mode 100644 index 000000000..5db4ddf76 --- /dev/null +++ b/js/src/utils/lodash/baseSet.ts @@ -0,0 +1,52 @@ +// @ts-nocheck + +import assignValue from "./assignValue.js"; +import castPath from "./castPath.js"; +import isIndex from "./isIndex.js"; +import isObject from "./isObject.js"; +import toKey from "./toKey.js"; + +/** + * The base implementation of `set`. + * + * @private + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {*} value The value to set. + * @param {Function} [customizer] The function to customize path creation. + * @returns {Object} Returns `object`. + */ +function baseSet(object, path, value, customizer) { + if (!isObject(object)) { + return object; + } + path = castPath(path, object); + + const length = path.length; + const lastIndex = length - 1; + + let index = -1; + let nested = object; + + while (nested != null && ++index < length) { + const key = toKey(path[index]); + let newValue = value; + + if (index !== lastIndex) { + const objValue = nested[key]; + newValue = customizer ? customizer(objValue, key, nested) : undefined; + if (newValue === undefined) { + newValue = isObject(objValue) + ? objValue + : isIndex(path[index + 1]) + ? [] + : {}; + } + } + assignValue(nested, key, newValue); + nested = nested[key]; + } + return object; +} + +export default baseSet; diff --git a/js/src/utils/lodash/castPath.ts b/js/src/utils/lodash/castPath.ts new file mode 100644 index 000000000..4ae161c6f --- /dev/null +++ b/js/src/utils/lodash/castPath.ts @@ -0,0 +1,19 @@ +import isKey from "./isKey.js"; +import stringToPath from "./stringToPath.js"; + +/** + * Casts `value` to a path array if it's not one. + * + * @private + * @param {*} value The value to inspect. + * @param {Object} [object] The object to query keys on. + * @returns {Array} Returns the cast property path array. + */ +function castPath(value: any, object: Record) { + if (Array.isArray(value)) { + return value; + } + return isKey(value, object) ? [value] : stringToPath(value); +} + +export default castPath; diff --git a/js/src/utils/lodash/eq.ts b/js/src/utils/lodash/eq.ts new file mode 100644 index 000000000..11ece1229 --- /dev/null +++ b/js/src/utils/lodash/eq.ts @@ -0,0 +1,35 @@ +/** + * Performs a + * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * comparison between two values to determine if they are equivalent. + * + * @since 4.0.0 + * @category Lang + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if the values are equivalent, else `false`. + * @example + * + * const object = { 'a': 1 } + * const other = { 'a': 1 } + * + * eq(object, object) + * // => true + * + * eq(object, other) + * // => false + * + * eq('a', 'a') + * // => true + * + * eq('a', Object('a')) + * // => false + * + * eq(NaN, NaN) + * // => true + */ +function eq(value: any, other: any) { + return value === other || (value !== value && other !== other); +} + +export default eq; diff --git a/js/src/utils/lodash/getTag.ts b/js/src/utils/lodash/getTag.ts new file mode 100644 index 000000000..c616a26e0 --- /dev/null +++ b/js/src/utils/lodash/getTag.ts @@ -0,0 +1,19 @@ +// @ts-nocheck + +const toString = Object.prototype.toString; + +/** + * Gets the `toStringTag` of `value`. + * + * @private + * @param {*} value The value to query. + * @returns {string} Returns the `toStringTag`. + */ +function getTag(value) { + if (value == null) { + return value === undefined ? "[object Undefined]" : "[object Null]"; + } + return toString.call(value); +} + +export default getTag; diff --git a/js/src/utils/lodash/isIndex.ts b/js/src/utils/lodash/isIndex.ts new file mode 100644 index 000000000..eb956ca70 --- /dev/null +++ b/js/src/utils/lodash/isIndex.ts @@ -0,0 +1,30 @@ +// @ts-nocheck + +/** Used as references for various `Number` constants. */ +const MAX_SAFE_INTEGER = 9007199254740991; + +/** Used to detect unsigned integer values. */ +const reIsUint = /^(?:0|[1-9]\d*)$/; + +/** + * Checks if `value` is a valid array-like index. + * + * @private + * @param {*} value The value to check. + * @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index. + * @returns {boolean} Returns `true` if `value` is a valid index, else `false`. + */ +function isIndex(value, length) { + const type = typeof value; + length = length == null ? MAX_SAFE_INTEGER : length; + + return ( + !!length && + (type === "number" || (type !== "symbol" && reIsUint.test(value))) && + value > -1 && + value % 1 === 0 && + value < length + ); +} + +export default isIndex; diff --git a/js/src/utils/lodash/isKey.ts b/js/src/utils/lodash/isKey.ts new file mode 100644 index 000000000..5c46772b9 --- /dev/null +++ b/js/src/utils/lodash/isKey.ts @@ -0,0 +1,36 @@ +// @ts-nocheck +import isSymbol from "./isSymbol.js"; + +/** Used to match property names within property paths. */ +const reIsDeepProp = /\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/; +const reIsPlainProp = /^\w*$/; + +/** + * Checks if `value` is a property name and not a property path. + * + * @private + * @param {*} value The value to check. + * @param {Object} [object] The object to query keys on. + * @returns {boolean} Returns `true` if `value` is a property name, else `false`. + */ +function isKey(value, object) { + if (Array.isArray(value)) { + return false; + } + const type = typeof value; + if ( + type === "number" || + type === "boolean" || + value == null || + isSymbol(value) + ) { + return true; + } + return ( + reIsPlainProp.test(value) || + !reIsDeepProp.test(value) || + (object != null && value in Object(object)) + ); +} + +export default isKey; diff --git a/js/src/utils/lodash/isObject.ts b/js/src/utils/lodash/isObject.ts new file mode 100644 index 000000000..56c8930f8 --- /dev/null +++ b/js/src/utils/lodash/isObject.ts @@ -0,0 +1,31 @@ +// @ts-nocheck + +/** + * Checks if `value` is the + * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types) + * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`) + * + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an object, else `false`. + * @example + * + * isObject({}) + * // => true + * + * isObject([1, 2, 3]) + * // => true + * + * isObject(Function) + * // => true + * + * isObject(null) + * // => false + */ +function isObject(value) { + const type = typeof value; + return value != null && (type === "object" || type === "function"); +} + +export default isObject; diff --git a/js/src/utils/lodash/isSymbol.ts b/js/src/utils/lodash/isSymbol.ts new file mode 100644 index 000000000..94e65a60f --- /dev/null +++ b/js/src/utils/lodash/isSymbol.ts @@ -0,0 +1,28 @@ +// @ts-nocheck + +import getTag from "./getTag.js"; + +/** + * Checks if `value` is classified as a `Symbol` primitive or object. + * + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a symbol, else `false`. + * @example + * + * isSymbol(Symbol.iterator) + * // => true + * + * isSymbol('abc') + * // => false + */ +function isSymbol(value) { + const type = typeof value; + return ( + type === "symbol" || + (type === "object" && value != null && getTag(value) === "[object Symbol]") + ); +} + +export default isSymbol; diff --git a/js/src/utils/lodash/memoizeCapped.ts b/js/src/utils/lodash/memoizeCapped.ts new file mode 100644 index 000000000..c4696ddd3 --- /dev/null +++ b/js/src/utils/lodash/memoizeCapped.ts @@ -0,0 +1,69 @@ +// @ts-nocheck + +/** + * Creates a function that memoizes the result of `func`. If `resolver` is + * provided, it determines the cache key for storing the result based on the + * arguments provided to the memoized function. By default, the first argument + * provided to the memoized function is used as the map cache key. The `func` + * is invoked with the `this` binding of the memoized function. + * + * **Note:** The cache is exposed as the `cache` property on the memoized + * function. Its creation may be customized by replacing the `memoize.Cache` + * constructor with one whose instances implement the + * [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object) + * method interface of `clear`, `delete`, `get`, `has`, and `set`. + * + * @since 0.1.0 + * @category Function + * @param {Function} func The function to have its output memoized. + * @param {Function} [resolver] The function to resolve the cache key. + * @returns {Function} Returns the new memoized function. + * @example + * + * const object = { 'a': 1, 'b': 2 } + * const other = { 'c': 3, 'd': 4 } + * + * const values = memoize(values) + * values(object) + * // => [1, 2] + * + * values(other) + * // => [3, 4] + * + * object.a = 2 + * values(object) + * // => [1, 2] + * + * // Modify the result cache. + * values.cache.set(object, ['a', 'b']) + * values(object) + * // => ['a', 'b'] + * + * // Replace `memoize.Cache`. + * memoize.Cache = WeakMap + */ +function memoize(func, resolver) { + if ( + typeof func !== "function" || + (resolver != null && typeof resolver !== "function") + ) { + throw new TypeError("Expected a function"); + } + const memoized = function (...args) { + const key = resolver ? resolver.apply(this, args) : args[0]; + const cache = memoized.cache; + + if (cache.has(key)) { + return cache.get(key); + } + const result = func.apply(this, args); + memoized.cache = cache.set(key, result) || cache; + return result; + }; + memoized.cache = new (memoize.Cache || Map)(); + return memoized; +} + +memoize.Cache = Map; + +export default memoize; diff --git a/js/src/utils/lodash/set.ts b/js/src/utils/lodash/set.ts new file mode 100644 index 000000000..01f277ce4 --- /dev/null +++ b/js/src/utils/lodash/set.ts @@ -0,0 +1,39 @@ +// @ts-nocheck + +import baseSet from "./baseSet.js"; + +/** + * Sets the value at `path` of `object`. If a portion of `path` doesn't exist, + * it's created. Arrays are created for missing index properties while objects + * are created for all other missing properties. Use `setWith` to customize + * `path` creation. + * + * **Note:** This method mutates `object`. + * + * Inlined to just use set functionality and patch vulnerabilities + * on existing isolated "lodash.set" package. + * + * @since 3.7.0 + * @category Object + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {*} value The value to set. + * @returns {Object} Returns `object`. + * @see has, hasIn, get, unset + * @example + * + * const object = { 'a': [{ 'b': { 'c': 3 } }] } + * + * set(object, 'a[0].b.c', 4) + * console.log(object.a[0].b.c) + * // => 4 + * + * set(object, ['x', '0', 'y', 'z'], 5) + * console.log(object.x[0].y.z) + * // => 5 + */ +function set(object, path, value) { + return object == null ? object : baseSet(object, path, value); +} + +export default set; diff --git a/js/src/utils/lodash/stringToPath.ts b/js/src/utils/lodash/stringToPath.ts new file mode 100644 index 000000000..d4e99ab9f --- /dev/null +++ b/js/src/utils/lodash/stringToPath.ts @@ -0,0 +1,49 @@ +// @ts-nocheck + +import memoizeCapped from "./memoizeCapped.js"; + +const charCodeOfDot = ".".charCodeAt(0); +const reEscapeChar = /\\(\\)?/g; +const rePropName = RegExp( + // Match anything that isn't a dot or bracket. + "[^.[\\]]+" + + "|" + + // Or match property names within brackets. + "\\[(?:" + + // Match a non-string expression. + "([^\"'][^[]*)" + + "|" + + // Or match strings (supports escaping characters). + "([\"'])((?:(?!\\2)[^\\\\]|\\\\.)*?)\\2" + + ")\\]" + + "|" + + // Or match "" as the space between consecutive dots or empty brackets. + "(?=(?:\\.|\\[\\])(?:\\.|\\[\\]|$))", + "g" +); + +/** + * Converts `string` to a property path array. + * + * @private + * @param {string} string The string to convert. + * @returns {Array} Returns the property path array. + */ +const stringToPath = memoizeCapped((string: string) => { + const result = []; + if (string.charCodeAt(0) === charCodeOfDot) { + result.push(""); + } + string.replace(rePropName, (match, expression, quote, subString) => { + let key = match; + if (quote) { + key = subString.replace(reEscapeChar, "$1"); + } else if (expression) { + key = expression.trim(); + } + result.push(key); + }); + return result; +}); + +export default stringToPath; diff --git a/js/src/utils/lodash/toKey.ts b/js/src/utils/lodash/toKey.ts new file mode 100644 index 000000000..98b327455 --- /dev/null +++ b/js/src/utils/lodash/toKey.ts @@ -0,0 +1,23 @@ +// @ts-nocheck + +import isSymbol from "./isSymbol.js"; + +/** Used as references for various `Number` constants. */ +const INFINITY = 1 / 0; + +/** + * Converts `value` to a string key if it's not a string or symbol. + * + * @private + * @param {*} value The value to inspect. + * @returns {string|symbol} Returns the key. + */ +function toKey(value) { + if (typeof value === "string" || isSymbol(value)) { + return value; + } + const result = `${value}`; + return result === "0" && 1 / value === -INFINITY ? "-0" : result; +} + +export default toKey; diff --git a/js/src/utils/prompts.ts b/js/src/utils/prompts.ts new file mode 100644 index 000000000..53bbee3c4 --- /dev/null +++ b/js/src/utils/prompts.ts @@ -0,0 +1,45 @@ +import { parse as parseVersion } from "semver"; + +export function isVersionGreaterOrEqual( + current_version: string, + target_version: string +): boolean { + const current = parseVersion(current_version); + const target = parseVersion(target_version); + + if (!current || !target) { + throw new Error("Invalid version format."); + } + + return current.compare(target) >= 0; +} + +export function parsePromptIdentifier( + identifier: string +): [string, string, string] { + if ( + !identifier || + identifier.split("/").length > 2 || + identifier.startsWith("/") || + identifier.endsWith("/") || + identifier.split(":").length > 2 + ) { + throw new Error(`Invalid identifier format: ${identifier}`); + } + + const [ownerNamePart, commitPart] = identifier.split(":"); + const commit = commitPart || "latest"; + + if (ownerNamePart.includes("/")) { + const [owner, name] = ownerNamePart.split("/", 2); + if (!owner || !name) { + throw new Error(`Invalid identifier format: ${identifier}`); + } + return [owner, name, commit]; + } else { + if (!ownerNamePart) { + throw new Error(`Invalid identifier format: ${identifier}`); + } + return ["-", ownerNamePart, commit]; + } +} diff --git a/js/src/utils/warn.ts b/js/src/utils/warn.ts new file mode 100644 index 000000000..b7bdc70f3 --- /dev/null +++ b/js/src/utils/warn.ts @@ -0,0 +1,8 @@ +const warnedMessages: Record = {}; + +export function warnOnce(message: string): void { + if (!warnedMessages[message]) { + console.warn(message); + warnedMessages[message] = true; + } +} diff --git a/js/src/wrappers/generic.ts b/js/src/wrappers/generic.ts new file mode 100644 index 000000000..3b62bc0f8 --- /dev/null +++ b/js/src/wrappers/generic.ts @@ -0,0 +1,72 @@ +import type { RunTreeConfig } from "../index.js"; +import { traceable } from "../traceable.js"; + +export const _wrapClient = ( + sdk: T, + runName: string, + options?: Omit +): T => { + return new Proxy(sdk, { + get(target, propKey, receiver) { + const originalValue = target[propKey as keyof T]; + if (typeof originalValue === "function") { + return traceable(originalValue.bind(target), { + run_type: "llm", + ...options, + name: [runName, propKey.toString()].join("."), + }); + } else if ( + originalValue != null && + !Array.isArray(originalValue) && + // eslint-disable-next-line no-instanceof/no-instanceof + !(originalValue instanceof Date) && + typeof originalValue === "object" + ) { + return _wrapClient( + originalValue, + [runName, propKey.toString()].join("."), + options + ); + } else { + return Reflect.get(target, propKey, receiver); + } + }, + }); +}; + +type WrapSDKOptions = Partial< + RunTreeConfig & { + /** + * @deprecated Use `name` instead. + */ + runName: string; + } +>; + +/** + * Wrap an arbitrary SDK, enabling automatic LangSmith tracing. + * Method signatures are unchanged. + * + * Note that this will wrap and trace ALL SDK methods, not just + * LLM completion methods. If the passed SDK contains other methods, + * we recommend using the wrapped instance for LLM calls only. + * @param sdk An arbitrary SDK instance. + * @param options LangSmith options. + * @returns + */ +export const wrapSDK = ( + sdk: T, + options?: WrapSDKOptions +): T => { + const traceableOptions = options ? { ...options } : undefined; + if (traceableOptions != null) { + delete traceableOptions.runName; + delete traceableOptions.name; + } + + return _wrapClient( + sdk, + options?.name ?? options?.runName ?? sdk.constructor?.name, + traceableOptions + ); +}; diff --git a/js/src/wrappers/index.ts b/js/src/wrappers/index.ts index e8f265647..6ff1385b0 100644 --- a/js/src/wrappers/index.ts +++ b/js/src/wrappers/index.ts @@ -1 +1,2 @@ export * from "./openai.js"; +export { wrapSDK } from "./generic.js"; diff --git a/js/src/wrappers/openai.ts b/js/src/wrappers/openai.ts index 03154d35a..05fae4d5d 100644 --- a/js/src/wrappers/openai.ts +++ b/js/src/wrappers/openai.ts @@ -1,6 +1,6 @@ import { OpenAI } from "openai"; import type { APIPromise } from "openai/core"; -import type { Client, RunTreeConfig } from "../index.js"; +import type { RunTreeConfig } from "../index.js"; import { isTraceableFunction, traceable } from "../traceable.js"; // Extra leniency around types in case multiple OpenAI SDK versions get installed @@ -223,6 +223,24 @@ export const wrapOpenAI = ( run_type: "llm", aggregator: chatAggregator, argsConfigPath: [1, "langsmithExtra"], + getInvocationParams: (payload: unknown) => { + if (typeof payload !== "object" || payload == null) return undefined; + // we can safely do so, as the types are not exported in TSC + const params = payload as OpenAI.ChatCompletionCreateParams; + + const ls_stop = + (typeof params.stop === "string" ? [params.stop] : params.stop) ?? + undefined; + + return { + ls_provider: "openai", + ls_model_type: "chat", + ls_model_name: params.model, + ls_max_tokens: params.max_tokens ?? undefined, + ls_temperature: params.temperature ?? undefined, + ls_stop, + }; + }, ...options, } ); @@ -234,64 +252,27 @@ export const wrapOpenAI = ( run_type: "llm", aggregator: textAggregator, argsConfigPath: [1, "langsmithExtra"], + getInvocationParams: (payload: unknown) => { + if (typeof payload !== "object" || payload == null) return undefined; + // we can safely do so, as the types are not exported in TSC + const params = payload as OpenAI.CompletionCreateParams; + + const ls_stop = + (typeof params.stop === "string" ? [params.stop] : params.stop) ?? + undefined; + + return { + ls_provider: "openai", + ls_model_type: "text", + ls_model_name: params.model, + ls_max_tokens: params.max_tokens ?? undefined, + ls_temperature: params.temperature ?? undefined, + ls_stop, + }; + }, ...options, } ); return openai as PatchedOpenAIClient; }; - -const _wrapClient = ( - sdk: T, - runName: string, - options?: { client?: Client } -): T => { - return new Proxy(sdk, { - get(target, propKey, receiver) { - const originalValue = target[propKey as keyof T]; - if (typeof originalValue === "function") { - return traceable( - originalValue.bind(target), - Object.assign( - { name: [runName, propKey.toString()].join("."), run_type: "llm" }, - options - ) - ); - } else if ( - originalValue != null && - !Array.isArray(originalValue) && - // eslint-disable-next-line no-instanceof/no-instanceof - !(originalValue instanceof Date) && - typeof originalValue === "object" - ) { - return _wrapClient( - originalValue, - [runName, propKey.toString()].join("."), - options - ); - } else { - return Reflect.get(target, propKey, receiver); - } - }, - }); -}; - -/** - * Wrap an arbitrary SDK, enabling automatic LangSmith tracing. - * Method signatures are unchanged. - * - * Note that this will wrap and trace ALL SDK methods, not just - * LLM completion methods. If the passed SDK contains other methods, - * we recommend using the wrapped instance for LLM calls only. - * @param sdk An arbitrary SDK instance. - * @param options LangSmith options. - * @returns - */ -export const wrapSDK = ( - sdk: T, - options?: { client?: Client; runName?: string } -): T => { - return _wrapClient(sdk, options?.runName ?? sdk.constructor?.name, { - client: options?.client, - }); -}; diff --git a/js/src/wrappers/vercel.ts b/js/src/wrappers/vercel.ts new file mode 100644 index 000000000..dc022d7c8 --- /dev/null +++ b/js/src/wrappers/vercel.ts @@ -0,0 +1,109 @@ +import type { RunTreeConfig } from "../index.js"; +import { traceable } from "../traceable.js"; +import { _wrapClient } from "./generic.js"; + +/** + * Wrap a Vercel AI SDK model, enabling automatic LangSmith tracing. + * After wrapping a model, you can use it with the Vercel AI SDK Core + * methods as normal. + * + * @example + * ```ts + * import { anthropic } from "@ai-sdk/anthropic"; + * import { streamText } from "ai"; + * import { wrapAISDKModel } from "langsmith/wrappers/vercel"; + * + * const anthropicModel = anthropic("claude-3-haiku-20240307"); + * + * const modelWithTracing = wrapAISDKModel(anthropicModel); + * + * const { textStream } = await streamText({ + * model: modelWithTracing, + * prompt: "Write a vegetarian lasagna recipe for 4 people.", + * }); + * + * for await (const chunk of textStream) { + * console.log(chunk); + * } + * ``` + * @param model An AI SDK model instance. + * @param options LangSmith options. + * @returns + */ +export const wrapAISDKModel = ( + model: T, + options?: Partial +): T => { + if ( + !("doStream" in model) || + typeof model.doStream !== "function" || + !("doGenerate" in model) || + typeof model.doGenerate !== "function" + ) { + throw new Error( + `Received invalid input. This version of wrapAISDKModel only supports Vercel LanguageModelV1 instances.` + ); + } + const runName = options?.name ?? model.constructor?.name; + return new Proxy(model, { + get(target, propKey, receiver) { + const originalValue = target[propKey as keyof T]; + if (typeof originalValue === "function") { + let __finalTracedIteratorKey; + let aggregator; + if (propKey === "doStream") { + __finalTracedIteratorKey = "stream"; + aggregator = (chunks: any[]) => { + return chunks.reduce( + (aggregated, chunk) => { + if (chunk.type === "text-delta") { + return { + ...aggregated, + text: aggregated.text + chunk.textDelta, + }; + } else if (chunk.type === "tool-call") { + return { + ...aggregated, + ...chunk, + }; + } else if (chunk.type === "finish") { + return { + ...aggregated, + usage: chunk.usage, + finishReason: chunk.finishReason, + }; + } else { + return aggregated; + } + }, + { + text: "", + } + ); + }; + } + return traceable(originalValue.bind(target), { + run_type: "llm", + name: runName, + ...options, + __finalTracedIteratorKey, + aggregator, + }); + } else if ( + originalValue != null && + !Array.isArray(originalValue) && + // eslint-disable-next-line no-instanceof/no-instanceof + !(originalValue instanceof Date) && + typeof originalValue === "object" + ) { + return _wrapClient( + originalValue, + [runName, propKey.toString()].join("."), + options + ); + } else { + return Reflect.get(target, propKey, receiver); + } + }, + }); +}; diff --git a/js/tsconfig.json b/js/tsconfig.json index 5beb3ad27..ab24d6247 100644 --- a/js/tsconfig.json +++ b/js/tsconfig.json @@ -36,9 +36,14 @@ "src/run_trees.ts", "src/traceable.ts", "src/evaluation/index.ts", + "src/evaluation/langchain.ts", "src/schemas.ts", + "src/langchain.ts", "src/wrappers/index.ts", - "src/wrappers/openai.ts" + "src/anonymizer/index.ts", + "src/wrappers/openai.ts", + "src/wrappers/vercel.ts", + "src/singletons/traceable.ts" ] } } diff --git a/js/yarn.lock b/js/yarn.lock index 94e03d6a3..28195c859 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -2,6 +2,74 @@ # yarn lockfile v1 +"@ai-sdk/openai@^0.0.40": + version "0.0.40" + resolved "https://registry.yarnpkg.com/@ai-sdk/openai/-/openai-0.0.40.tgz#227df69c8edf8b26b17f78ae55daa03e58a58870" + integrity sha512-9Iq1UaBHA5ZzNv6j3govuKGXrbrjuWvZIgWNJv4xzXlDMHu9P9hnqlBr/Aiay54WwCuTVNhTzAUTfFgnTs2kbQ== + dependencies: + "@ai-sdk/provider" "0.0.14" + "@ai-sdk/provider-utils" "1.0.5" + +"@ai-sdk/provider-utils@1.0.5": + version "1.0.5" + resolved "https://registry.yarnpkg.com/@ai-sdk/provider-utils/-/provider-utils-1.0.5.tgz#765c60871019ded104d79b4cea0805ba563bb5aa" + integrity sha512-XfOawxk95X3S43arn2iQIFyWGMi0DTxsf9ETc6t7bh91RPWOOPYN1tsmS5MTKD33OGJeaDQ/gnVRzXUCRBrckQ== + dependencies: + "@ai-sdk/provider" "0.0.14" + eventsource-parser "1.1.2" + nanoid "3.3.6" + secure-json-parse "2.7.0" + +"@ai-sdk/provider@0.0.14": + version "0.0.14" + resolved "https://registry.yarnpkg.com/@ai-sdk/provider/-/provider-0.0.14.tgz#a07569c39a8828aa8312cf1ac6f35ce6ee1b2fce" + integrity sha512-gaQ5Y033nro9iX1YUjEDFDRhmMcEiCk56LJdIUbX5ozEiCNCfpiBpEqrjSp/Gp5RzBS2W0BVxfG7UGW6Ezcrzg== + dependencies: + json-schema "0.4.0" + +"@ai-sdk/react@0.0.30": + version "0.0.30" + resolved "https://registry.yarnpkg.com/@ai-sdk/react/-/react-0.0.30.tgz#51d586141a81d7f9b76798922b206e8c6faf04dc" + integrity sha512-VnHYRzwhiM4bZdL9DXwJltN8Qnz1MkFdRTa1y7KdmHSJ18ebCNWmPO5XJhnZiQdEXHYmrzZ3WiVt2X6pxK07FA== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + swr "2.2.5" + +"@ai-sdk/solid@0.0.23": + version "0.0.23" + resolved "https://registry.yarnpkg.com/@ai-sdk/solid/-/solid-0.0.23.tgz#712cf1a02bfc337806c5c1b486d16252bec57a15" + integrity sha512-GMojG2PsqwnOGfx7C1MyQPzPBIlC44qn3ykjp9OVnN2Fu47mcFp3QM6gwWoHwNqi7FQDjRy+s/p+8EqYIQcAwg== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + +"@ai-sdk/svelte@0.0.24": + version "0.0.24" + resolved "https://registry.yarnpkg.com/@ai-sdk/svelte/-/svelte-0.0.24.tgz#2519b84a0c104c82d5e48d3b8e9350e9dd4af6cf" + integrity sha512-ZjzzvfYLE01VTO0rOZf6z9sTGhJhe6IYZMxQiM3P+zemufRYe57NDcLYEb6h+2qhvU6Z+k/Q+Nh/spAt0JzGUg== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + sswr "2.1.0" + +"@ai-sdk/ui-utils@0.0.20": + version "0.0.20" + resolved "https://registry.yarnpkg.com/@ai-sdk/ui-utils/-/ui-utils-0.0.20.tgz#c68968185a7cc33f7d98d13999731e1c7b672cbb" + integrity sha512-6MRWigzXfuxUcAYEFMLP6cLbALJkg12Iz1Sl+wuPMpB6aw7di2ePiTuNakFUYjgP7TNsW4UxzpypBqqJ1KNB0A== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + secure-json-parse "2.7.0" + +"@ai-sdk/vue@0.0.24": + version "0.0.24" + resolved "https://registry.yarnpkg.com/@ai-sdk/vue/-/vue-0.0.24.tgz#2e72f7e755850ed51540f9a7b25dc6b228a8647a" + integrity sha512-0S+2dVSui6LFgaWoFx+3h5R7GIP9MxdJo63tFuLvgyKr2jmpo5S5kGcWl95vNdzKDqaesAXfOnky+tn5A2d49A== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + swrv "1.0.4" + "@ampproject/remapping@^2.2.0": version "2.2.1" resolved "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz" @@ -10,14 +78,7 @@ "@jridgewell/gen-mapping" "^0.3.0" "@jridgewell/trace-mapping" "^0.3.9" -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.21.4": - version "7.21.4" - resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.21.4.tgz" - integrity sha512-LYvhNKfwWSPpocw8GI7gpK2nq3HSDuEPC/uSYaALSJu9xjsalaaYFOq0Pwt5KmVqwEbZlDu81aLXwBOmD/Fv9g== - dependencies: - "@babel/highlight" "^7.18.6" - -"@babel/code-frame@^7.22.13": +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.21.4", "@babel/code-frame@^7.22.13": version "7.22.13" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.22.13.tgz#e3c1c099402598483b7a8c46a721d1038803755e" integrity sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w== @@ -51,17 +112,7 @@ json5 "^2.2.2" semver "^6.3.0" -"@babel/generator@^7.22.0", "@babel/generator@^7.7.2": - version "7.22.3" - resolved "https://registry.npmjs.org/@babel/generator/-/generator-7.22.3.tgz" - integrity sha512-C17MW4wlk//ES/CJDL51kPNwl+qiBQyN7b9SKyVp11BLGFeSPoVaHrv+MNt8jwQFhQWowW88z1eeBx3pFz9v8A== - dependencies: - "@babel/types" "^7.22.3" - "@jridgewell/gen-mapping" "^0.3.2" - "@jridgewell/trace-mapping" "^0.3.17" - jsesc "^2.5.1" - -"@babel/generator@^7.23.0": +"@babel/generator@^7.22.0", "@babel/generator@^7.23.0", "@babel/generator@^7.7.2": version "7.23.0" resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.23.0.tgz#df5c386e2218be505b34837acbcb874d7a983420" integrity sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g== @@ -132,25 +183,12 @@ resolve "^1.14.2" semver "^6.1.2" -"@babel/helper-environment-visitor@^7.18.9", "@babel/helper-environment-visitor@^7.22.1": - version "7.22.1" - resolved "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.1.tgz" - integrity sha512-Z2tgopurB/kTbidvzeBrc2To3PUP/9i5MUe+fU6QJCQDyPwSH2oRapkLw3KGECDYSjhQZCNxEvNvZlLw8JjGwA== - -"@babel/helper-environment-visitor@^7.22.20": +"@babel/helper-environment-visitor@^7.18.9", "@babel/helper-environment-visitor@^7.22.1", "@babel/helper-environment-visitor@^7.22.20": version "7.22.20" resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz#96159db61d34a29dba454c959f5ae4a649ba9167" integrity sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA== -"@babel/helper-function-name@^7.18.9", "@babel/helper-function-name@^7.19.0", "@babel/helper-function-name@^7.21.0": - version "7.21.0" - resolved "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.21.0.tgz" - integrity sha512-HfK1aMRanKHpxemaY2gqBmL04iAPOPRj7DxtNbiDOrJK+gdwkiNRVpCpUJYbUT+aZyemKN8brqTOxzCaG6ExRg== - dependencies: - "@babel/template" "^7.20.7" - "@babel/types" "^7.21.0" - -"@babel/helper-function-name@^7.23.0": +"@babel/helper-function-name@^7.18.9", "@babel/helper-function-name@^7.19.0", "@babel/helper-function-name@^7.21.0", "@babel/helper-function-name@^7.23.0": version "7.23.0" resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz#1f9a3cdbd5b2698a670c30d2735f9af95ed52759" integrity sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw== @@ -158,14 +196,7 @@ "@babel/template" "^7.22.15" "@babel/types" "^7.23.0" -"@babel/helper-hoist-variables@^7.18.6": - version "7.18.6" - resolved "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz" - integrity sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-hoist-variables@^7.22.5": +"@babel/helper-hoist-variables@^7.18.6", "@babel/helper-hoist-variables@^7.22.5": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz#c01a007dac05c085914e8fb652b339db50d823bb" integrity sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw== @@ -248,36 +279,19 @@ dependencies: "@babel/types" "^7.20.0" -"@babel/helper-split-export-declaration@^7.18.6": - version "7.18.6" - resolved "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz" - integrity sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-split-export-declaration@^7.22.6": +"@babel/helper-split-export-declaration@^7.18.6", "@babel/helper-split-export-declaration@^7.22.6": version "7.22.6" resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz#322c61b7310c0997fe4c323955667f18fcefb91c" integrity sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g== dependencies: "@babel/types" "^7.22.5" -"@babel/helper-string-parser@^7.21.5": - version "7.21.5" - resolved "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.21.5.tgz" - integrity sha512-5pTUx3hAJaZIdW99sJ6ZUUgWq/Y+Hja7TowEnLNMm1VivRgZQL3vpBY3qUACVsvw+yQU6+YgfBVmcbLaZtrA1w== - "@babel/helper-string-parser@^7.22.5": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz#533f36457a25814cf1df6488523ad547d784a99f" integrity sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw== -"@babel/helper-validator-identifier@^7.18.6", "@babel/helper-validator-identifier@^7.19.1": - version "7.19.1" - resolved "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz" - integrity sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w== - -"@babel/helper-validator-identifier@^7.22.20": +"@babel/helper-validator-identifier@^7.19.1", "@babel/helper-validator-identifier@^7.22.20": version "7.22.20" resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz#c4ae002c61d2879e724581d96665583dbc1dc0e0" integrity sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A== @@ -306,15 +320,6 @@ "@babel/traverse" "^7.22.1" "@babel/types" "^7.22.3" -"@babel/highlight@^7.18.6": - version "7.18.6" - resolved "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz" - integrity sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g== - dependencies: - "@babel/helper-validator-identifier" "^7.18.6" - chalk "^2.0.0" - js-tokens "^4.0.0" - "@babel/highlight@^7.22.13": version "7.22.20" resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.22.20.tgz#4ca92b71d80554b01427815e06f2df965b9c1f54" @@ -324,12 +329,7 @@ chalk "^2.4.2" js-tokens "^4.0.0" -"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.21.9", "@babel/parser@^7.22.0": - version "7.22.4" - resolved "https://registry.npmjs.org/@babel/parser/-/parser-7.22.4.tgz" - integrity sha512-VLLsx06XkEYqBtE5YGPwfSGwfrjnyPP5oiGty3S8pQLFDFLaS8VwWSIxkTXpcvr5zeYLE6+MBNl2npl/YnfofA== - -"@babel/parser@^7.22.15", "@babel/parser@^7.23.0": +"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.22.0", "@babel/parser@^7.22.15", "@babel/parser@^7.23.0": version "7.23.0" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.0.tgz#da950e622420bf96ca0d0f2909cdddac3acd8719" integrity sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw== @@ -1013,16 +1013,7 @@ dependencies: regenerator-runtime "^0.13.11" -"@babel/template@^7.18.10", "@babel/template@^7.20.7", "@babel/template@^7.21.9", "@babel/template@^7.3.3": - version "7.21.9" - resolved "https://registry.npmjs.org/@babel/template/-/template-7.21.9.tgz" - integrity sha512-MK0X5k8NKOuWRamiEfc3KEJiHMTkGZNUjzMipqCGDDc6ijRl/B7RGSKVGncu4Ro/HdyzzY6cmoXuKI2Gffk7vQ== - dependencies: - "@babel/code-frame" "^7.21.4" - "@babel/parser" "^7.21.9" - "@babel/types" "^7.21.5" - -"@babel/template@^7.22.15": +"@babel/template@^7.18.10", "@babel/template@^7.20.7", "@babel/template@^7.21.9", "@babel/template@^7.22.15", "@babel/template@^7.3.3": version "7.22.15" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.22.15.tgz#09576efc3830f0430f4548ef971dde1350ef2f38" integrity sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w== @@ -1047,16 +1038,7 @@ debug "^4.1.0" globals "^11.1.0" -"@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.18.9", "@babel/types@^7.20.0", "@babel/types@^7.20.5", "@babel/types@^7.20.7", "@babel/types@^7.21.0", "@babel/types@^7.21.4", "@babel/types@^7.21.5", "@babel/types@^7.22.0", "@babel/types@^7.22.3", "@babel/types@^7.22.4", "@babel/types@^7.3.3", "@babel/types@^7.4.4": - version "7.22.4" - resolved "https://registry.npmjs.org/@babel/types/-/types-7.22.4.tgz" - integrity sha512-Tx9x3UBHTTsMSW85WB2kphxYQVvrZ/t1FxD88IpSgIjiUJlCm9z+xWIDwyo1vffTwSqteqyznB8ZE9vYYk16zA== - dependencies: - "@babel/helper-string-parser" "^7.21.5" - "@babel/helper-validator-identifier" "^7.19.1" - to-fast-properties "^2.0.0" - -"@babel/types@^7.22.15", "@babel/types@^7.22.5", "@babel/types@^7.23.0": +"@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.18.9", "@babel/types@^7.20.0", "@babel/types@^7.20.5", "@babel/types@^7.20.7", "@babel/types@^7.21.4", "@babel/types@^7.21.5", "@babel/types@^7.22.0", "@babel/types@^7.22.15", "@babel/types@^7.22.3", "@babel/types@^7.22.4", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.3.3", "@babel/types@^7.4.4": version "7.23.0" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.23.0.tgz#8c1f020c9df0e737e4e247c0619f58c68458aaeb" integrity sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg== @@ -1109,6 +1091,11 @@ resolved "https://registry.npmjs.org/@eslint/js/-/js-8.41.0.tgz" integrity sha512-LxcyMGxwmTh2lY9FwHPGWOHmYFCZvbrFCBZL4FzSSsxsRPuhrYUg/49/0KDfW8tnIEaEHtfmn6+NPN+1DqaNmA== +"@faker-js/faker@^8.4.1": + version "8.4.1" + resolved "https://registry.yarnpkg.com/@faker-js/faker/-/faker-8.4.1.tgz#5d5e8aee8fce48f5e189bf730ebd1f758f491451" + integrity sha512-XQ3cU+Q8Uqmrbf2e0cIC/QN43sTBSC8KF12u29Mb47tWrt2hAgBXSgpZMj4Ao8Uk0iJcU99QsOCaIL8934obCg== + "@humanwhocodes/config-array@^0.11.8": version "0.11.8" resolved "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz" @@ -1381,29 +1368,62 @@ "@jridgewell/resolve-uri" "3.1.0" "@jridgewell/sourcemap-codec" "1.4.14" -"@langchain/core@^0.1.27", "@langchain/core@^0.1.32": - version "0.1.32" - resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.1.32.tgz#cd6748cc91b8b208ba7c736c16c6dbeb291dc86c" - integrity sha512-7b8wBQMej2QxaDDS0fCQa3/zrA2raTh1RBe2h1som7QxFpWJkHSxwVwdvGUotX9SopmsY99TK54sK0amfDvBBA== +"@langchain/core@>0.1.0 <0.3.0", "@langchain/core@>=0.2.11 <0.3.0", "@langchain/core@>=0.2.16 <0.3.0", "@langchain/core@^0.2.17": + version "0.2.17" + resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.2.17.tgz#dfd44a2ccf79cef88ba765741a1c277bc22e483f" + integrity sha512-WnFiZ7R/ZUVeHO2IgcSL7Tu+CjApa26Iy99THJP5fax/NF8UQCc/ZRcw2Sb/RUuRPVm6ALDass0fSQE1L9YNJg== dependencies: ansi-styles "^5.0.0" camelcase "6" decamelize "1.2.0" - js-tiktoken "^1.0.8" - langsmith "~0.1.1" + js-tiktoken "^1.0.12" + langsmith "~0.1.30" ml-distance "^4.0.0" + mustache "^4.2.0" p-queue "^6.6.2" p-retry "4" - uuid "^9.0.0" + uuid "^10.0.0" zod "^3.22.4" zod-to-json-schema "^3.22.3" -"@langchain/langgraph@^0.0.8": - version "0.0.8" - resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.0.8.tgz#910d6190effee4433fc829c3e76940c4565e53e8" - integrity sha512-NVARwCBPfRqCDS2d/VBMfbGIoZij6kB6Q+HUtTFCsfZ36FnovQ6L3YwKT11SmTf6xIil9S0zvK4gPf3asLzRaw== +"@langchain/langgraph@^0.0.29": + version "0.0.29" + resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.0.29.tgz#eda31d101e7a75981e0929661c41ab2461ff8640" + integrity sha512-BSFFJarkXqrMdH9yH6AIiBCw4ww0VsXXpBwqaw+9/7iulW0pBFRSkWXHjEYnmsdCRgyIxoP8vYQAQ8Jtu3qzZA== dependencies: - "@langchain/core" "^0.1.27" + "@langchain/core" ">=0.2.16 <0.3.0" + uuid "^10.0.0" + zod "^3.23.8" + +"@langchain/openai@>=0.1.0 <0.3.0": + version "0.2.4" + resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.2.4.tgz#02d210d2aacdaf654bceb686b3ec49517fb3b1ea" + integrity sha512-PQGmnnKbsC8odwjGbYf2aHAQEZ/uVXYtXqKnwk7BTVMZlFnt+Rt9eigp940xMKAadxHzqtKJpSd7Xf6G+LI6KA== + dependencies: + "@langchain/core" ">=0.2.16 <0.3.0" + js-tiktoken "^1.0.12" + openai "^4.49.1" + zod "^3.22.4" + zod-to-json-schema "^3.22.3" + +"@langchain/openai@^0.2.5": + version "0.2.5" + resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.2.5.tgz#e85b983986a7415ea743d4c854bb0674134334d4" + integrity sha512-gQXS5VBFyAco0jgSnUVan6fYVSIxlffmDaeDGpXrAmz2nQPgiN/h24KYOt2NOZ1zRheRzRuO/CfRagMhyVUaFA== + dependencies: + "@langchain/core" ">=0.2.16 <0.3.0" + js-tiktoken "^1.0.12" + openai "^4.49.1" + zod "^3.22.4" + zod-to-json-schema "^3.22.3" + +"@langchain/textsplitters@~0.0.0": + version "0.0.2" + resolved "https://registry.yarnpkg.com/@langchain/textsplitters/-/textsplitters-0.0.2.tgz#500baa8341fb7fc86fca531a4192665a319504a3" + integrity sha512-6bQOuYHTGYlkgPY/8M5WPq4nnXZpEysGzRopQCYjg2WLcEoIPUMMrXsAaNNdvU3BOeMrhin8izvpDPD165hX6Q== + dependencies: + "@langchain/core" ">0.1.0 <0.3.0" + js-tiktoken "^1.0.12" "@nodelib/fs.scandir@2.1.5": version "2.1.5" @@ -1426,6 +1446,11 @@ "@nodelib/fs.scandir" "2.1.5" fastq "^1.6.0" +"@opentelemetry/api@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/api/-/api-1.9.0.tgz#d03eba68273dc0f7509e2a3d5cba21eae10379fe" + integrity sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg== + "@sinclair/typebox@^0.25.16": version "0.25.24" resolved "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz" @@ -1503,6 +1528,11 @@ dependencies: "@babel/types" "^7.20.7" +"@types/diff-match-patch@^1.0.36": + version "1.0.36" + resolved "https://registry.yarnpkg.com/@types/diff-match-patch/-/diff-match-patch-1.0.36.tgz#dcef10a69d357fe9d43ac4ff2eca6b85dbf466af" + integrity sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg== + "@types/graceful-fs@^4.1.3": version "4.1.6" resolved "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.6.tgz" @@ -1717,6 +1747,26 @@ agentkeepalive@^4.2.1: dependencies: humanize-ms "^1.2.1" +ai@^3.2.37: + version "3.2.37" + resolved "https://registry.yarnpkg.com/ai/-/ai-3.2.37.tgz#148ed3124e6b0a01c703597471718520ef1c498d" + integrity sha512-waqKYZOE1zJwKEHx69R4v/xNG0a1o0He8TDgX29hUu36Zk0yrBJoVSlXbC9KoFuxW4eRpt+gZv1kqd1nVc1CGg== + dependencies: + "@ai-sdk/provider" "0.0.14" + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/react" "0.0.30" + "@ai-sdk/solid" "0.0.23" + "@ai-sdk/svelte" "0.0.24" + "@ai-sdk/ui-utils" "0.0.20" + "@ai-sdk/vue" "0.0.24" + "@opentelemetry/api" "1.9.0" + eventsource-parser "1.1.2" + json-schema "0.4.0" + jsondiffpatch "0.6.0" + nanoid "3.3.6" + secure-json-parse "2.7.0" + zod-to-json-schema "3.22.5" + ajv@^6.10.0, ajv@^6.12.4: version "6.12.6" resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz" @@ -1931,6 +1981,11 @@ base64-js@^1.5.1: resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== +binary-extensions@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522" + integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw== + binary-search@^1.3.5: version "1.3.6" resolved "https://registry.yarnpkg.com/binary-search/-/binary-search-1.3.6.tgz#e32426016a0c5092f0f3598836a1c7da3560565c" @@ -1945,11 +2000,11 @@ brace-expansion@^1.1.7: concat-map "0.0.1" braces@^3.0.2: - version "3.0.2" - resolved "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== dependencies: - fill-range "^7.0.1" + fill-range "^7.1.1" browserslist@^4.21.3, browserslist@^4.21.5: version "4.21.7" @@ -2008,7 +2063,7 @@ caniuse-lite@^1.0.30001489: resolved "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001491.tgz" integrity sha512-17EYIi4TLnPiTzVKMveIxU5ETlxbSO3B6iPvMbprqnKh4qJsQGk5Nh1Lp4jIMAE0XfrujsJuWZAM3oJdMHaKBA== -chalk@^2.0.0, chalk@^2.4.2: +chalk@^2.4.2: version "2.4.2" resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz" integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== @@ -2025,6 +2080,11 @@ chalk@^4.0.0: ansi-styles "^4.1.0" supports-color "^7.1.0" +chalk@^5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.3.0.tgz#67c20a7ebef70e7f3970a01f90fa210cb6860385" + integrity sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w== + char-regex@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz" @@ -2040,6 +2100,11 @@ cjs-module-lexer@^1.0.0: resolved "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.2.tgz" integrity sha512-cOU9usZw8/dXIXKtwa8pM0OTJQuJkxMN6w30csNRUerHfeQ5R6U3kkU/FtJeIf3M202OHfY2U8ccInBG7/xogA== +client-only@^0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/client-only/-/client-only-0.0.1.tgz#38bba5d403c41ab150bff64a95c85013cf73bca1" + integrity sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA== + cliui@^8.0.1: version "8.0.1" resolved "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz" @@ -2190,6 +2255,11 @@ detect-newline@^3.0.0: resolved "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz" integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA== +diff-match-patch@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/diff-match-patch/-/diff-match-patch-1.0.5.tgz#abb584d5f10cd1196dfc55aa03701592ae3f7b37" + integrity sha512-IayShXAgj/QMXgB0IWmKx+rOPuGMhqm5w6jvFxmVenXKIzRqTAAsbBPT3kWQeGANj3jGgvcvv4yK6SxqYmikgw== + diff-sequences@^29.4.3: version "29.4.3" resolved "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.4.3.tgz" @@ -2506,6 +2576,11 @@ eventemitter3@^4.0.4: resolved "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz" integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== +eventsource-parser@1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/eventsource-parser/-/eventsource-parser-1.1.2.tgz#ed6154a4e3dbe7cda9278e5e35d2ffc58b309f89" + integrity sha512-v0eOBUbiaFojBu2s2NPBfYUoRR9GjcDNvCXVaqEf5vVfpIAh9f8RCo4vXTP8c63QRKCFwoLpMpTdPwwhEKVgzA== + execa@^5.0.0: version "5.1.1" resolved "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz" @@ -2589,10 +2664,10 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== dependencies: to-regex-range "^5.0.1" @@ -3469,10 +3544,10 @@ jest@^29.5.0: import-local "^3.0.2" jest-cli "^29.5.0" -js-tiktoken@^1.0.8: - version "1.0.10" - resolved "https://registry.yarnpkg.com/js-tiktoken/-/js-tiktoken-1.0.10.tgz#2b343ec169399dcee8f9ef9807dbd4fafd3b30dc" - integrity sha512-ZoSxbGjvGyMT13x6ACo9ebhDha/0FHdKA+OsQcMOWcm1Zs7r90Rhk5lhERLzji+3rA7EKpXCgwXcM5fF3DMpdA== +js-tiktoken@^1.0.12: + version "1.0.12" + resolved "https://registry.yarnpkg.com/js-tiktoken/-/js-tiktoken-1.0.12.tgz#af0f5cf58e5e7318240d050c8413234019424211" + integrity sha512-L7wURW1fH9Qaext0VzaUDpFGVQgjkdE3Dgsy9/+yXyGEpBKnylTd0mU0bfbNkKDlXRb6TEsZkwuflu1B8uQbJQ== dependencies: base64-js "^1.5.1" @@ -3516,6 +3591,11 @@ json-schema-traverse@^0.4.1: resolved "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz" integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== +json-schema@0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.4.0.tgz#f7de4cf6efab838ebaeb3236474cbba5a1930ab5" + integrity sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA== + json-stable-stringify-without-jsonify@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz" @@ -3533,15 +3613,56 @@ json5@^2.2.2, json5@^2.2.3: resolved "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz" integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== +jsondiffpatch@0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/jsondiffpatch/-/jsondiffpatch-0.6.0.tgz#daa6a25bedf0830974c81545568d5f671c82551f" + integrity sha512-3QItJOXp2AP1uv7waBkao5nCvhEv+QmJAd38Ybq7wNI74Q+BBmnLn4EDKz6yI9xGAIQoUF87qHt+kc1IVxB4zQ== + dependencies: + "@types/diff-match-patch" "^1.0.36" + chalk "^5.3.0" + diff-match-patch "^1.0.5" + +jsonpointer@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-5.0.1.tgz#2110e0af0900fd37467b5907ecd13a7884a1b559" + integrity sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ== + kleur@^3.0.3: version "3.0.3" resolved "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== -langsmith@~0.1.1: - version "0.1.3" - resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.3.tgz#b086c5dd0709c41da417bc8b672e8f4a03e80809" - integrity sha512-kQMS3QySeU0Qt9A71d9trUXbeKn33HfxpRc7hRjSB967zcdTAngh66NcqYqBflD3nOL4FK6LKmvfb3vbNDEoPg== +langchain@^0.2.10: + version "0.2.10" + resolved "https://registry.yarnpkg.com/langchain/-/langchain-0.2.10.tgz#35b74038e54650efbd9fe7d9d59765fe2790bb47" + integrity sha512-i0fC+RlX/6w6HKPWL3N5zrhrkijvpe2Xu4t/qbWzq4uFf8WBfPwmNFom3RtO2RatuPnHLm8mViU6nw8YBDiVwA== + dependencies: + "@langchain/core" ">=0.2.11 <0.3.0" + "@langchain/openai" ">=0.1.0 <0.3.0" + "@langchain/textsplitters" "~0.0.0" + binary-extensions "^2.2.0" + js-tiktoken "^1.0.12" + js-yaml "^4.1.0" + jsonpointer "^5.0.1" + langchainhub "~0.0.8" + langsmith "~0.1.30" + ml-distance "^4.0.0" + openapi-types "^12.1.3" + p-retry "4" + uuid "^10.0.0" + yaml "^2.2.1" + zod "^3.22.4" + zod-to-json-schema "^3.22.3" + +langchainhub@~0.0.8: + version "0.0.10" + resolved "https://registry.yarnpkg.com/langchainhub/-/langchainhub-0.0.10.tgz#7579440a3255d67571b7046f3910593c5664f064" + integrity sha512-mOVso7TGTMSlvTTUR1b4zUIMtu8zgie/pcwRm1SeooWwuHYMQovoNXjT6gEjvWEZ6cjt4gVH+1lu2tp1/phyIQ== + +langsmith@~0.1.30: + version "0.1.38" + resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.38.tgz#51c50db3110ffff15f522d0486dbeb069c82ca45" + integrity sha512-h8UHgvtGzIoo/52oN7gZlAPP+7FREFnZYFJ7HSPOYej9DE/yQMg6qjgIn9RwjhUgWWQlmvRN6fM3kqbCCDX5EQ== dependencies: "@types/uuid" "^9.0.1" commander "^10.0.1" @@ -3712,16 +3833,26 @@ ml-tree-similarity@^1.0.0: binary-search "^1.3.5" num-sort "^2.0.0" -ms@2.1.2, ms@^2.1.1: +ms@2.1.2: version "2.1.2" resolved "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@^2.0.0: +ms@^2.0.0, ms@^2.1.1: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== +mustache@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/mustache/-/mustache-4.2.0.tgz#e5892324d60a12ec9c2a73359edca52972bf6f64" + integrity sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ== + +nanoid@3.3.6: + version "3.3.6" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.6.tgz#443380c856d6e9f9824267d960b4236ad583ea4c" + integrity sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA== + natural-compare-lite@^1.4.0: version "1.4.0" resolved "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz" @@ -3814,10 +3945,10 @@ onetime@^5.1.2: dependencies: mimic-fn "^2.1.0" -openai@^4.38.5: - version "4.38.5" - resolved "https://registry.yarnpkg.com/openai/-/openai-4.38.5.tgz#87de78eed9f7e63331fb6b1307d8c9dd986b39d0" - integrity sha512-Ym5GJL98ZhLJJ7enBx53jjG3vwN/fsB+Ozh46nnRZZS9W1NiYqbwkJ+sXd3dkCIiWIgcyyOPL2Zr8SQAzbpj3g== +openai@^4.38.5, openai@^4.49.1: + version "4.52.7" + resolved "https://registry.yarnpkg.com/openai/-/openai-4.52.7.tgz#e32b000142287a9e8eda8512ba28df33d11ec1f1" + integrity sha512-dgxA6UZHary6NXUHEDj5TWt8ogv0+ibH+b4pT5RrWMjiRZVylNwLcw/2ubDrX5n0oUmHX/ZgudMJeemxzOvz7A== dependencies: "@types/node" "^18.11.18" "@types/node-fetch" "^2.6.4" @@ -3828,6 +3959,11 @@ openai@^4.38.5: node-fetch "^2.6.7" web-streams-polyfill "^3.2.1" +openapi-types@^12.1.3: + version "12.1.3" + resolved "https://registry.yarnpkg.com/openapi-types/-/openapi-types-12.1.3.tgz#471995eb26c4b97b7bd356aacf7b91b73e777dd3" + integrity sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw== + optionator@^0.9.1: version "0.9.1" resolved "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz" @@ -4140,6 +4276,11 @@ safe-regex-test@^1.0.0: get-intrinsic "^1.1.3" is-regex "^1.1.4" +secure-json-parse@2.7.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/secure-json-parse/-/secure-json-parse-2.7.0.tgz#5a5f9cd6ae47df23dba3151edd06855d47e09862" + integrity sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw== + semver@7.x, semver@^7.3.5, semver@^7.3.7: version "7.5.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" @@ -4152,6 +4293,11 @@ semver@^6.0.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== +semver@^7.6.3: + version "7.6.3" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" + integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== + shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz" @@ -4206,6 +4352,13 @@ sprintf-js@~1.0.2: resolved "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz" integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== +sswr@2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/sswr/-/sswr-2.1.0.tgz#1eb64cd647cc9e11f871e7f43554abd8c64e1103" + integrity sha512-Cqc355SYlTAaUt8iDPaC/4DPPXK925PePLMxyBKuWd5kKc5mwsG3nT9+Mq2tyguL5s7b4Jg+IRMpTRsNTAfpSQ== + dependencies: + swrev "^4.0.0" + stack-utils@^2.0.3: version "2.0.6" resolved "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz" @@ -4310,6 +4463,24 @@ supports-preserve-symlinks-flag@^1.0.0: resolved "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz" integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== +swr@2.2.5: + version "2.2.5" + resolved "https://registry.yarnpkg.com/swr/-/swr-2.2.5.tgz#063eea0e9939f947227d5ca760cc53696f46446b" + integrity sha512-QtxqyclFeAsxEUeZIYmsaQ0UjimSq1RZ9Un7I68/0ClKK/U3LoyQunwkQfJZr2fc22DfIXLNDc2wFyTEikCUpg== + dependencies: + client-only "^0.0.1" + use-sync-external-store "^1.2.0" + +swrev@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/swrev/-/swrev-4.0.0.tgz#83da6983c7ef9d71ac984a9b169fc197cbf18ff8" + integrity sha512-LqVcOHSB4cPGgitD1riJ1Hh4vdmITOp+BkmfmXRh4hSF/t7EnS4iD+SOTmq7w5pPm/SiPeto4ADbKS6dHUDWFA== + +swrv@1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/swrv/-/swrv-1.0.4.tgz#278b4811ed4acbb1ae46654972a482fd1847e480" + integrity sha512-zjEkcP8Ywmj+xOJW3lIT65ciY/4AL4e/Or7Gj0MzU3zBJNMdJiT8geVZhINavnlHRMMCcJLHhraLTAiDOTmQ9g== + test-exclude@^6.0.0: version "6.0.0" resolved "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz" @@ -4490,10 +4661,20 @@ uri-js@^4.2.2: dependencies: punycode "^2.1.0" +use-sync-external-store@^1.2.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/use-sync-external-store/-/use-sync-external-store-1.2.2.tgz#c3b6390f3a30eba13200d2302dcdf1e7b57b2ef9" + integrity sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw== + +uuid@^10.0.0: + version "10.0.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-10.0.0.tgz#5a95aa454e6e002725c79055fd42aaba30ca6294" + integrity sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ== + uuid@^9.0.0: - version "9.0.0" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.0.tgz#592f550650024a38ceb0c562f2f6aa435761efb5" - integrity sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg== + version "9.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" + integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA== v8-compile-cache-lib@^3.0.1: version "3.0.1" @@ -4611,6 +4792,11 @@ yallist@^4.0.0: resolved "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz" integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== +yaml@^2.2.1: + version "2.4.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.4.2.tgz#7a2b30f2243a5fc299e1f14ca58d475ed4bc5362" + integrity sha512-B3VqDZ+JAg1nZpaEmWtTXUlBneoGx6CPM9b0TENK6aoSu5t73dItudwdgmi6tHlIZZId4dZ9skcAQ2UbcyAeVA== + yargs-parser@^21.0.1, yargs-parser@^21.1.1: version "21.1.1" resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz" @@ -4639,6 +4825,11 @@ yocto-queue@^0.1.0: resolved "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== +zod-to-json-schema@3.22.5: + version "3.22.5" + resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.22.5.tgz#3646e81cfc318dbad2a22519e5ce661615418673" + integrity sha512-+akaPo6a0zpVCCseDed504KBJUQpEW5QZw7RMneNmKw+fGaML1Z9tUNLnHHAC8x6dzVRO1eB2oEMyZRnuBZg7Q== + zod-to-json-schema@^3.22.3: version "3.22.4" resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.22.4.tgz#f8cc691f6043e9084375e85fb1f76ebafe253d70" @@ -4648,3 +4839,8 @@ zod@^3.22.4: version "3.22.4" resolved "https://registry.yarnpkg.com/zod/-/zod-3.22.4.tgz#f31c3a9386f61b1f228af56faa9255e845cf3fff" integrity sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg== + +zod@^3.23.8: + version "3.23.8" + resolved "https://registry.yarnpkg.com/zod/-/zod-3.23.8.tgz#e37b957b5d52079769fb8097099b592f0ef4067d" + integrity sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g== diff --git a/python/Makefile b/python/Makefile index 795e4c168..d06830bf9 100644 --- a/python/Makefile +++ b/python/Makefile @@ -1,7 +1,7 @@ .PHONY: tests lint format build publish doctest integration_tests integration_tests_fast evals tests: - poetry run python -m pytest -n auto --durations=10 tests/unit_tests + PYTHONDEVMODE=1 PYTHONASYNCIODEBUG=1 poetry run python -m pytest --disable-socket --allow-unix-socket -n auto --durations=10 tests/unit_tests tests_watch: poetry run ptw --now . -- -vv -x tests/unit_tests diff --git a/python/README.md b/python/README.md index 97fbfb296..85de1e11a 100644 --- a/python/README.md +++ b/python/README.md @@ -70,6 +70,7 @@ Tracing can be activated by setting the following environment variables or by ma import os os.environ["LANGSMITH_TRACING_V2"] = "true" os.environ["LANGSMITH_ENDPOINT"] = "https://api.smith.langchain.com" +# os.environ["LANGSMITH_ENDPOINT"] = "https://eu.api.smith.langchain.com" # If signed up in the EU region os.environ["LANGSMITH_API_KEY"] = "" # os.environ["LANGSMITH_PROJECT"] = "My Project Name" # Optional: "default" is used if not set ``` diff --git a/python/langsmith/__init__.py b/python/langsmith/__init__.py index d53de8a51..b865c4754 100644 --- a/python/langsmith/__init__.py +++ b/python/langsmith/__init__.py @@ -4,12 +4,21 @@ if TYPE_CHECKING: from langsmith._expect import expect - from langsmith._testing import unit + from langsmith._testing import test, unit from langsmith.client import Client from langsmith.evaluation import aevaluate, evaluate from langsmith.evaluation.evaluator import EvaluationResult, RunEvaluator - from langsmith.run_helpers import trace, traceable + from langsmith.run_helpers import ( + get_current_run_tree, + get_tracing_context, + trace, + traceable, + tracing_context, + ) from langsmith.run_trees import RunTree + from langsmith.utils import ( + ContextThreadPoolExecutor, + ) def __getattr__(name: str) -> Any: @@ -44,10 +53,11 @@ def __getattr__(name: str) -> Any: from langsmith.run_helpers import traceable return traceable - elif name == "unit": - from langsmith._testing import unit - return unit + elif name == "test": + from langsmith._testing import test + + return test elif name == "expect": from langsmith._expect import expect @@ -61,6 +71,31 @@ def __getattr__(name: str) -> Any: from langsmith.evaluation import aevaluate return aevaluate + elif name == "tracing_context": + from langsmith.run_helpers import tracing_context + + return tracing_context + + elif name == "get_tracing_context": + from langsmith.run_helpers import get_tracing_context + + return get_tracing_context + + elif name == "get_current_run_tree": + from langsmith.run_helpers import get_current_run_tree + + return get_current_run_tree + + elif name == "unit": + from langsmith._testing import unit + + return unit + elif name == "ContextThreadPoolExecutor": + from langsmith.utils import ( + ContextThreadPoolExecutor, + ) + + return ContextThreadPoolExecutor raise AttributeError(f"module {__name__!r} has no attribute {name!r}") @@ -71,10 +106,16 @@ def __getattr__(name: str) -> Any: "__version__", "EvaluationResult", "RunEvaluator", + "anonymizer", "traceable", "trace", "unit", + "test", "expect", "evaluate", "aevaluate", + "tracing_context", + "get_tracing_context", + "get_current_run_tree", + "ContextThreadPoolExecutor", ] diff --git a/python/langsmith/_expect.py b/python/langsmith/_expect.py index db914c31e..967390597 100644 --- a/python/langsmith/_expect.py +++ b/python/langsmith/_expect.py @@ -1,14 +1,14 @@ """Make approximate assertions as "expectations" on test results. -This module is designed to be used within test cases decorated with the `@unit` decorator +This module is designed to be used within test cases decorated with the `@test` decorator It allows you to log scores about a test case and optionally make assertions that log as "expectation" feedback to LangSmith. Example usage: - from langsmith import expect, unit + from langsmith import expect, test - @unit + @test def test_output_semantically_close(): response = oai_client.chat.completions.create( model="gpt-3.5-turbo", @@ -37,7 +37,7 @@ def test_output_semantically_close(): # Or using a custom check expect.value(response_txt).against(lambda x: "Hello" in x) - # You can even use this for basic metric logging within unit tests + # You can even use this for basic metric logging within tests expect.score(0.8) expect.score(0.7, key="similarity").to_be_greater_than(0.7) @@ -46,9 +46,16 @@ def test_output_semantically_close(): from __future__ import annotations import atexit -import concurrent.futures import inspect -from typing import TYPE_CHECKING, Any, Callable, Optional, Union, overload +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Optional, + Union, + overload, +) from langsmith import client as ls_client from langsmith import run_helpers as rh @@ -59,30 +66,46 @@ def test_output_semantically_close(): from langsmith._internal._embedding_distance import EmbeddingConfig +# Sentinel class used until PEP 0661 is accepted +class _NULL_SENTRY: + """A sentinel singleton class used to distinguish omitted keyword arguments + from those passed in with the value None (which may have different behavior). + """ # noqa: D205 + + def __bool__(self) -> Literal[False]: + return False + + def __repr__(self) -> str: + return "NOT_GIVEN" + + +NOT_GIVEN = _NULL_SENTRY() + + class _Matcher: """A class for making assertions on expectation values.""" def __init__( self, - client: ls_client.Client, + client: Optional[ls_client.Client], key: str, value: Any, - _executor: Optional[concurrent.futures.ThreadPoolExecutor] = None, + _executor: Optional[ls_utils.ContextThreadPoolExecutor] = None, run_id: Optional[str] = None, ): - self.client = client + self._client = client self.key = key self.value = value - self._executor = _executor or concurrent.futures.ThreadPoolExecutor( - max_workers=3 - ) + self._executor = _executor or ls_utils.ContextThreadPoolExecutor(max_workers=3) rt = rh.get_current_run_tree() self._run_id = rt.trace_id if rt else run_id def _submit_feedback(self, score: int, message: Optional[str] = None) -> None: if not ls_utils.test_tracking_is_disabled(): + if not self._client: + self._client = ls_client.Client() self._executor.submit( - self.client.create_feedback, + self._client.create_feedback, run_id=self._run_id, key="expectation", score=score, @@ -179,6 +202,18 @@ def to_equal(self, value: float) -> None: "to_equal", ) + def to_be_none(self) -> None: + """Assert that the expectation value is None. + + Raises: + AssertionError: If the expectation value is not None. + """ + self._assert( + self.value is None, + f"Expected {self.key} to be None, but got {self.value}", + "to_be_none", + ) + def to_contain(self, value: Any) -> None: """Assert that the expectation value contains the given value. @@ -216,8 +251,8 @@ class _Expect: """A class for setting expectations on test results.""" def __init__(self, *, client: Optional[ls_client.Client] = None): - self.client = client or ls_client.Client() - self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3) + self._client = client + self.executor = ls_utils.ContextThreadPoolExecutor(max_workers=3) atexit.register(self.executor.shutdown, wait=True) def embedding_distance( @@ -271,7 +306,7 @@ def embedding_distance( }, ) return _Matcher( - self.client, "embedding_distance", score, _executor=self.executor + self._client, "embedding_distance", score, _executor=self.executor ) def edit_distance( @@ -321,7 +356,7 @@ def edit_distance( }, ) return _Matcher( - self.client, + self._client, "edit_distance", score, _executor=self.executor, @@ -339,7 +374,7 @@ def value(self, value: Any) -> _Matcher: Examples: >>> expect.value(10).to_be_less_than(20) """ - return _Matcher(self.client, "value", value, _executor=self.executor) + return _Matcher(self._client, "value", value, _executor=self.executor) def score( self, @@ -370,7 +405,7 @@ def score( "comment": comment, }, ) - return _Matcher(self.client, key, score, _executor=self.executor) + return _Matcher(self._client, key, score, _executor=self.executor) ## Private Methods @@ -381,10 +416,13 @@ def __call__(self, value: Any, /) -> _Matcher: ... def __call__(self, /, *, client: ls_client.Client) -> _Expect: ... def __call__( - self, value: Optional[Any] = None, /, client: Optional[ls_client.Client] = None + self, + value: Optional[Any] = NOT_GIVEN, + /, + client: Optional[ls_client.Client] = None, ) -> Union[_Expect, _Matcher]: expected = _Expect(client=client) - if value is not None: + if value is not NOT_GIVEN: return expected.value(value) return expected @@ -392,8 +430,10 @@ def _submit_feedback(self, key: str, results: dict): current_run = rh.get_current_run_tree() run_id = current_run.trace_id if current_run else None if not ls_utils.test_tracking_is_disabled(): + if not self._client: + self._client = ls_client.Client() self.executor.submit( - self.client.create_feedback, run_id=run_id, key=key, **results + self._client.create_feedback, run_id=run_id, key=key, **results ) diff --git a/python/langsmith/_internal/_aiter.py b/python/langsmith/_internal/_aiter.py index aeb9d857a..7ae217f68 100644 --- a/python/langsmith/_internal/_aiter.py +++ b/python/langsmith/_internal/_aiter.py @@ -6,6 +6,8 @@ """ import asyncio +import contextvars +import functools import inspect from collections import deque from typing import ( @@ -277,8 +279,13 @@ async def process_item(item): async def process_generator(): tasks = [] + accepts_context = asyncio_accepts_context() async for item in generator: - task = asyncio.create_task(process_item(item)) + if accepts_context: + context = contextvars.copy_context() + task = asyncio.create_task(process_item(item), context=context) + else: + task = asyncio.create_task(process_item(item)) tasks.append(task) if n is not None and len(tasks) >= n: done, pending = await asyncio.wait( @@ -300,3 +307,28 @@ def accepts_context(callable: Callable[..., Any]) -> bool: return inspect.signature(callable).parameters.get("context") is not None except ValueError: return False + + +# Ported from Python 3.9+ to support Python 3.8 +async def aio_to_thread( + func, /, *args, __ctx: Optional[contextvars.Context] = None, **kwargs +): + """Asynchronously run function *func* in a separate thread. + + Any *args and **kwargs supplied for this function are directly passed + to *func*. Also, the current :class:`contextvars.Context` is propagated, + allowing context variables from the main thread to be accessed in the + separate thread. + + Return a coroutine that can be awaited to get the eventual result of *func*. + """ + loop = asyncio.get_running_loop() + ctx = __ctx or contextvars.copy_context() + func_call = functools.partial(ctx.run, func, *args, **kwargs) + return await loop.run_in_executor(None, func_call) + + +@functools.lru_cache(maxsize=1) +def asyncio_accepts_context(): + """Check if the current asyncio event loop accepts a context argument.""" + return accepts_context(asyncio.create_task) diff --git a/python/langsmith/beta/_utils.py b/python/langsmith/_internal/_beta_decorator.py similarity index 80% rename from python/langsmith/beta/_utils.py rename to python/langsmith/_internal/_beta_decorator.py index d1ebcbe1b..433ade058 100644 --- a/python/langsmith/beta/_utils.py +++ b/python/langsmith/_internal/_beta_decorator.py @@ -11,7 +11,7 @@ def warn_beta(func: Callable) -> Callable: @functools.wraps(func) def wrapper(*args, **kwargs): warnings.warn( - f"Function {func.__name__} is in beta.", UserWarning, stacklevel=2 + f"Function {func.__name__} is in beta.", LangSmithBetaWarning, stacklevel=2 ) return func(*args, **kwargs) diff --git a/python/langsmith/_internal/_embedding_distance.py b/python/langsmith/_internal/_embedding_distance.py index 4eb9fc83b..dff2d1f00 100644 --- a/python/langsmith/_internal/_embedding_distance.py +++ b/python/langsmith/_internal/_embedding_distance.py @@ -15,7 +15,7 @@ from typing_extensions import TypedDict if TYPE_CHECKING: - import numpy as np + import numpy as np # type: ignore logger = logging.getLogger(__name__) diff --git a/python/langsmith/_testing.py b/python/langsmith/_testing.py index 5e03af80f..3d5ac9c3b 100644 --- a/python/langsmith/_testing.py +++ b/python/langsmith/_testing.py @@ -1,7 +1,6 @@ from __future__ import annotations import atexit -import concurrent.futures import datetime import functools import inspect @@ -40,13 +39,13 @@ class SkipException(Exception): # type: ignore[no-redef] @overload -def unit( +def test( func: Callable, ) -> Callable: ... @overload -def unit( +def test( *, id: Optional[uuid.UUID] = None, output_keys: Optional[Sequence[str]] = None, @@ -55,8 +54,8 @@ def unit( ) -> Callable[[Callable], Callable]: ... -def unit(*args: Any, **kwargs: Any) -> Callable: - """Create a unit test case in LangSmith. +def test(*args: Any, **kwargs: Any) -> Callable: + """Create a test case in LangSmith. This decorator is used to mark a function as a test case for LangSmith. It ensures that the necessary example data is created and associated with the test function. @@ -90,9 +89,9 @@ def unit(*args: Any, **kwargs: Any) -> Callable: without re-executing the code. Requires the 'langsmith[vcr]' package. Example: - For basic usage, simply decorate a test function with `@unit`: + For basic usage, simply decorate a test function with `@test`: - >>> @unit + >>> @test ... def test_addition(): ... assert 3 + 4 == 7 @@ -106,7 +105,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: ... def generate_numbers(): ... return 3, 4 - >>> @unit + >>> @test ... def test_nested(): ... # Traced code will be included in the test case ... a, b = generate_numbers() @@ -128,7 +127,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: >>> import openai >>> from langsmith.wrappers import wrap_openai >>> oai_client = wrap_openai(openai.Client()) - >>> @unit + >>> @test ... def test_openai_says_hello(): ... # Traced code will be included in the test case ... response = oai_client.chat.completions.create( @@ -144,7 +143,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: `expect` to score and make approximate assertions on your results. >>> from langsmith import expect - >>> @unit + >>> @test ... def test_output_semantically_close(): ... response = oai_client.chat.completions.create( ... model="gpt-3.5-turbo", @@ -168,7 +167,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: ... # And then log a pass/fail score to LangSmith ... ).to_be_less_than(1.0) - The `@unit` decorator works natively with pytest fixtures. + The `@test` decorator works natively with pytest fixtures. The values will populate the "inputs" of the corresponding example in LangSmith. >>> import pytest @@ -176,7 +175,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: ... def some_input(): ... return "Some input" >>> - >>> @unit + >>> @test ... def test_with_fixture(some_input: str): ... assert "input" in some_input >>> @@ -184,7 +183,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: You can still use pytest.parametrize() as usual to run multiple test cases using the same test function. - >>> @unit(output_keys=["expected"]) + >>> @test(output_keys=["expected"]) ... @pytest.mark.parametrize( ... "a, b, expected", ... [ @@ -198,18 +197,18 @@ def unit(*args: Any, **kwargs: Any) -> Callable: By default, each test case will be assigned a consistent, unique identifier based on the function name and module. You can also provide a custom identifier using the `id` argument: - >>> @unit(id="1a77e4b5-1d38-4081-b829-b0442cf3f145") + >>> @test(id="1a77e4b5-1d38-4081-b829-b0442cf3f145") ... def test_multiplication(): ... assert 3 * 4 == 12 - By default, all unit test inputs are saved as "inputs" to a dataset. + By default, all test test inputs are saved as "inputs" to a dataset. You can specify the `output_keys` argument to persist those keys within the dataset's "outputs" fields. >>> @pytest.fixture ... def expected_output(): ... return "input" - >>> @unit(output_keys=["expected_output"]) + >>> @test(output_keys=["expected_output"]) ... def test_with_expected_output(some_input: str, expected_output: str): ... assert expected_output in some_input @@ -299,7 +298,7 @@ def _get_test_suite( return client.read_dataset(dataset_name=test_suite_name) else: repo = ls_env.get_git_info().get("remote_url") or "" - description = "Unit test suite" + description = "Test suite" if repo: description += f" for {repo}" return client.create_dataset( @@ -392,7 +391,7 @@ def __init__( self._experiment = experiment self._dataset = dataset self._version: Optional[datetime.datetime] = None - self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + self._executor = ls_utils.ContextThreadPoolExecutor(max_workers=1) atexit.register(_end_tests, self) @property @@ -409,10 +408,13 @@ def experiment(self): @classmethod def from_test( - cls, client: Optional[ls_client.Client], func: Callable + cls, + client: Optional[ls_client.Client], + func: Callable, + test_suite_name: Optional[str] = None, ) -> _LangSmithTestSuite: client = client or ls_client.Client() - test_suite_name = _get_test_suite_name(func) + test_suite_name = test_suite_name or _get_test_suite_name(func) with cls._lock: if not cls._instances: cls._instances = {} @@ -496,6 +498,7 @@ def _sync_example( outputs=outputs_, dataset_id=self.id, metadata=metadata, + created_at=self._experiment.start_time, ) if example.modified_at: self.update_version(example.modified_at) @@ -531,7 +534,9 @@ def _ensure_example( if output_keys: for k in output_keys: outputs[k] = inputs.pop(k, None) - test_suite = _LangSmithTestSuite.from_test(client, func) + test_suite = _LangSmithTestSuite.from_test( + client, func, langtest_extra.get("test_suite_name") + ) example_id, example_name = _get_id(func, inputs, test_suite.id) example_id = langtest_extra["id"] or example_id test_suite.sync_example( @@ -669,3 +674,7 @@ async def _test(): cache_path, ignore_hosts=[test_suite.client.api_url] ): await _test() + + +# For backwards compatibility +unit = test diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py new file mode 100644 index 000000000..77e1136f6 --- /dev/null +++ b/python/langsmith/anonymizer.py @@ -0,0 +1,179 @@ +import re # noqa +import inspect +from abc import abstractmethod +from collections import defaultdict +from typing import Any, Callable, List, Optional, Tuple, TypedDict, Union + + +class _ExtractOptions(TypedDict): + max_depth: Optional[int] + """ + Maximum depth to traverse to to extract string nodes + """ + + +class StringNode(TypedDict): + """String node extracted from the data.""" + + value: str + """String value.""" + + path: List[Union[str, int]] + """Path to the string node in the data.""" + + +def _extract_string_nodes(data: Any, options: _ExtractOptions) -> List[StringNode]: + max_depth = options.get("max_depth") or 10 + + queue: List[Tuple[Any, int, List[Union[str, int]]]] = [(data, 0, [])] + result: List[StringNode] = [] + + while queue: + task = queue.pop(0) + if task is None: + continue + value, depth, path = task + + if isinstance(value, (dict, defaultdict)): + if depth >= max_depth: + continue + for key, nested_value in value.items(): + queue.append((nested_value, depth + 1, path + [key])) + elif isinstance(value, list): + if depth >= max_depth: + continue + for i, item in enumerate(value): + queue.append((item, depth + 1, path + [i])) + elif isinstance(value, str): + result.append(StringNode(value=value, path=path)) + + return result + + +class StringNodeProcessor: + """Processes a list of string nodes for masking.""" + + @abstractmethod + def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: + """Accept and return a list of string nodes to be masked.""" + + +class ReplacerOptions(TypedDict): + """Configuration options for replacing sensitive data.""" + + max_depth: Optional[int] + """Maximum depth to traverse to to extract string nodes.""" + + deep_clone: Optional[bool] + """Deep clone the data before replacing.""" + + +class StringNodeRule(TypedDict): + """Declarative rule used for replacing sensitive data.""" + + pattern: re.Pattern + """Regex pattern to match.""" + + replace: Optional[str] + """Replacement value. Defaults to `[redacted]` if not specified.""" + + +class RuleNodeProcessor(StringNodeProcessor): + """String node processor that uses a list of rules to replace sensitive data.""" + + rules: List[StringNodeRule] + + def __init__(self, rules: List[StringNodeRule]): + """Initialize the processor with a list of rules.""" + self.rules = rules + + def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: + """Mask nodes using the rules.""" + result = [] + for item in nodes: + new_value = item["value"] + for rule in self.rules: + new_value = rule["pattern"].sub( + ( + rule["replace"] + if isinstance(rule["replace"], str) + else "[redacted]" + ), + new_value, + ) + if new_value != item["value"]: + result.append(StringNode(value=new_value, path=item["path"])) + return result + + +class CallableNodeProcessor(StringNodeProcessor): + """String node processor that uses a callable function to replace sensitive data.""" + + func: Union[Callable[[str], str], Callable[[str, List[Union[str, int]]], str]] + accepts_path: bool + + def __init__( + self, + func: Union[Callable[[str], str], Callable[[str, List[Union[str, int]]], str]], + ): + """Initialize the processor with a callable function.""" + self.func = func + self.accepts_path = len(inspect.signature(func).parameters) == 2 + + def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: + """Mask nodes using the callable function.""" + retval: List[StringNode] = [] + for node in nodes: + candidate = ( + self.func(node["value"], node["path"]) # type: ignore[call-arg] + if self.accepts_path + else self.func(node["value"]) # type: ignore[call-arg] + ) + if candidate != node["value"]: + retval.append(StringNode(value=candidate, path=node["path"])) + return retval + + +ReplacerType = Union[ + Callable[[str, List[Union[str, int]]], str], + List[StringNodeRule], + StringNodeProcessor, +] + + +def _get_node_processor(replacer: ReplacerType) -> StringNodeProcessor: + if isinstance(replacer, list): + return RuleNodeProcessor(rules=replacer) + elif callable(replacer): + return CallableNodeProcessor(func=replacer) + else: + return replacer + + +def create_anonymizer( + replacer: ReplacerType, + *, + max_depth: Optional[int] = None, +) -> Callable[[Any], Any]: + """Create an anonymizer function.""" + processor = _get_node_processor(replacer) + + def anonymizer(data: Any) -> Any: + nodes = _extract_string_nodes(data, {"max_depth": max_depth or 10}) + mutate_value = data + + to_update = processor.mask_nodes(nodes) + for node in to_update: + if not node["path"]: + mutate_value = node["value"] + else: + temp = mutate_value + for part in node["path"][:-1]: + temp = temp[part] + + last_part = node["path"][-1] + temp[last_part] = node["value"] + + return mutate_value + + return anonymizer diff --git a/python/langsmith/beta/__init__.py b/python/langsmith/beta/__init__.py index 9240296a3..f712c1adb 100644 --- a/python/langsmith/beta/__init__.py +++ b/python/langsmith/beta/__init__.py @@ -1,6 +1,6 @@ """Beta functionality prone to change.""" +from langsmith._internal._beta_decorator import warn_beta from langsmith.beta._evals import compute_test_metrics, convert_runs_to_test -from langsmith.beta._utils import warn_beta __all__ = ["convert_runs_to_test", "compute_test_metrics", "warn_beta"] diff --git a/python/langsmith/beta/_evals.py b/python/langsmith/beta/_evals.py index f41bc8785..de6103d81 100644 --- a/python/langsmith/beta/_evals.py +++ b/python/langsmith/beta/_evals.py @@ -4,15 +4,14 @@ """ import collections -import concurrent.futures import datetime import itertools import uuid from typing import DefaultDict, List, Optional, Sequence, Tuple, TypeVar -import langsmith.beta._utils as beta_utils import langsmith.schemas as ls_schemas from langsmith import evaluation as ls_eval +from langsmith._internal._beta_decorator import warn_beta from langsmith.client import Client @@ -66,7 +65,7 @@ def _convert_root_run(root: ls_schemas.Run, run_to_example_map: dict) -> List[di return result -@beta_utils.warn_beta +@warn_beta def convert_runs_to_test( runs: Sequence[ls_schemas.Run], *, @@ -197,7 +196,7 @@ def _outer_product(list1: List[T], list2: List[U]) -> List[Tuple[T, U]]: return list(itertools.product(list1, list2)) -@beta_utils.warn_beta +@warn_beta def compute_test_metrics( project_name: str, *, @@ -218,6 +217,8 @@ def compute_test_metrics( Returns: None: This function does not return any value. """ + from langsmith import ContextThreadPoolExecutor + evaluators_: List[ls_eval.RunEvaluator] = [] for func in evaluators: if isinstance(func, ls_eval.RunEvaluator): @@ -230,7 +231,7 @@ def compute_test_metrics( ) client = client or Client() traces = _load_nested_traces(project_name, client) - with concurrent.futures.ThreadPoolExecutor(max_workers=max_concurrency) as executor: + with ContextThreadPoolExecutor(max_workers=max_concurrency) as executor: results = executor.map( client.evaluate_run, *zip(*_outer_product(traces, evaluators_)) ) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index 7e32c3802..2d6722926 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -1,7 +1,7 @@ # Don't change this file. Instead, copy it to .env and change the values there. The default values will work out of the box as long as you provide your license key. -_LANGSMITH_IMAGE_VERSION=0.2.17 +_LANGSMITH_IMAGE_VERSION=0.6.9 LANGSMITH_LICENSE_KEY=your-license-key # Change to your Langsmith license key -OPENAI_API_KEY=your-openai-api-key # Needed for Online Evals and Magic Query features +OPENAI_API_KEY=your-openai-api-key # Needed for Magic Query features AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0 OAUTH_CLIENT_ID=your-client-id # Required if AUTH_TYPE=oauth OAUTH_ISSUER_URL=https://your-issuer-url # Required if AUTH_TYPE=oauth @@ -10,4 +10,12 @@ POSTGRES_DATABASE_URI=postgres:postgres@langchain-db:5432/postgres # Change to y REDIS_DATABASE_URI=redis://langchain-redis:6379 # Change to your Redis URI if using external Redis. Otherwise, leave it as is LOG_LEVEL=warning # Change to your desired log level MAX_ASYNC_JOBS_PER_WORKER=10 # Change to your desired maximum async jobs per worker. We recommend 10/suggest spinning up more replicas of the queue worker if you need more throughput -ASYNCPG_POOL_MAX_SIZE=${ASYNCPG_POOL_MAX_SIZE:-3} # Change the PG pool size based off your pg instance/requirements. +ASYNCPG_POOL_MAX_SIZE=3 # Change the PG pool size based off your pg instance/requirements. +CLICKHOUSE_HOST=langchain-clickhouse # Change to your Clickhouse host if using external Clickhouse. Otherwise, leave it as is +CLICKHOUSE_USER=default # Change to your Clickhouse user if needed +CLICKHOUSE_DB=default # Change to your Clickhouse database if needed +CLICKHOUSE_PORT=8123 # Change to your Clickhouse port if needed +CLICKHOUSE_TLS=false # Change to true if you are using TLS to connect to Clickhouse. Otherwise, leave it as is +CLICKHOUSE_PASSWORD=password # Change to your Clickhouse password if needed +CLICKHOUSE_NATIVE_PORT=9000 # Change to your Clickhouse native port if needed +ORG_CREATION_DISABLED=false # Set to true if you want to disable org creation diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index e83631175..172cc6e5d 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -1,25 +1,26 @@ version: "4" services: langchain-playground: - image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.6.9} ports: - 3001:3001 langchain-frontend: - image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} environment: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} - VITE_OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} ports: - - 80:80 + - 1980:1980 depends_on: - langchain-backend - langchain-playground langchain-backend: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} environment: - PORT=1984 - LANGCHAIN_ENV=local_docker + - GO_ENDPOINT=http://langchain-platform-backend:1986 - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY} - LOG_LEVEL=${LOG_LEVEL:-warning} @@ -29,9 +30,17 @@ services: - API_KEY_SALT=${API_KEY_SALT} - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} - - INGESTION_QUEUE=default - - ADHOC_QUEUE=default - - RUN_RULES_QUEUE=default + - CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse} + - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} + - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} + - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} + - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} + - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} + - FF_ORG_CREATION_DISABLED=${ORG_CREATION_DISABLED:-false} + - BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false} + - INITIAL_ORG_ADMIN_EMAIL=${INITIAL_ORG_ADMIN_EMAIL} + - INITIAL_ORG_ADMIN_PASSWORD=${INITIAL_ORG_ADMIN_PASSWORD} + - BASIC_AUTH_JWT_SECRET=${BASIC_AUTH_JWT_SECRET} ports: - 1984:1984 depends_on: @@ -44,10 +53,39 @@ services: postgres-setup: condition: service_completed_successfully restart: always + langchain-platform-backend: + image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} + environment: + - PORT=1986 + - LANGCHAIN_ENV=local_docker + - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - LOG_LEVEL=${LOG_LEVEL:-warning} + - AUTH_TYPE=${AUTH_TYPE:-none} + - OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} + - OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} + - API_KEY_SALT=${API_KEY_SALT} + - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} + - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} + - BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false} + - INITIAL_ORG_ADMIN_EMAIL=${INITIAL_ORG_ADMIN_EMAIL} + - INITIAL_ORG_ADMIN_PASSWORD=${INITIAL_ORG_ADMIN_PASSWORD} + - BASIC_AUTH_JWT_SECRET=${BASIC_AUTH_JWT_SECRET} + ports: + - 1986:1986 + depends_on: + langchain-db: + condition: service_healthy + langchain-redis: + condition: service_healthy + clickhouse-setup: + condition: service_completed_successfully + postgres-setup: + condition: service_completed_successfully + restart: always langchain-queue: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} environment: - - PORT=1984 - LANGCHAIN_ENV=local_docker - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY} @@ -60,9 +98,12 @@ services: - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} - MAX_ASYNC_JOBS_PER_WORKER=${MAX_ASYNC_JOBS_PER_WORKER:-10} - ASYNCPG_POOL_MAX_SIZE=${ASYNCPG_POOL_MAX_SIZE:-3} - - INGESTION_QUEUE=default - - ADHOC_QUEUE=default - - RUN_RULES_QUEUE=default + - CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse} + - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} + - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} + - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} + - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} + - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} command: - "saq" - "app.workers.queues.single_queue_worker.settings" @@ -112,13 +153,13 @@ services: timeout: 2s retries: 30 langchain-clickhouse: - image: clickhouse/clickhouse-server:23.9 + image: clickhouse/clickhouse-server:24.2 user: "101:101" restart: always environment: - - CLICKHOUSE_DB=default - - CLICKHOUSE_USER=default - - CLICKHOUSE_PASSWORD=password + - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} + - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} + - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} volumes: - langchain-clickhouse-data:/var/lib/clickhouse - ./users.xml:/etc/clickhouse-server/users.d/users.xml @@ -131,30 +172,26 @@ services: timeout: 2s retries: 30 clickhouse-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} depends_on: langchain-clickhouse: condition: service_healthy restart: "on-failure:10" environment: - - PORT=1984 - - LANGCHAIN_ENV=local_docker - - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} - - OPENAI_API_KEY=${OPENAI_API_KEY} - - LOG_LEVEL=${LOG_LEVEL:-warning} - - AUTH_TYPE=${AUTH_TYPE:-none} - - OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} - - OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} - - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} - - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} + - CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse} + - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} + - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} + - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} + - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} + - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} entrypoint: [ "bash", "-c", - "migrate -source file://clickhouse/migrations -database 'clickhouse://langchain-clickhouse:9000?username=default&password=password&database=default&x-multi-statement=true&x-migrations-table-engine=MergeTree' up", + "migrate -source file://clickhouse/migrations -database 'clickhouse://${CLICKHOUSE_HOST}:${CLICKHOUSE_NATIVE_PORT}?username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}&database=${CLICKHOUSE_DB}&x-multi-statement=true&x-migrations-table-engine=MergeTree' up", ] postgres-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} depends_on: langchain-db: condition: service_healthy @@ -163,11 +200,20 @@ services: - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY} - LOG_LEVEL=${LOG_LEVEL:-warning} + - AUTH_TYPE=${AUTH_TYPE:-none} - OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} - OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} - API_KEY_SALT=${API_KEY_SALT} - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} + - MAX_ASYNC_JOBS_PER_WORKER=${MAX_ASYNC_JOBS_PER_WORKER:-10} + - ASYNCPG_POOL_MAX_SIZE=${ASYNCPG_POOL_MAX_SIZE:-3} + - CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse} + - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} + - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} + - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} + - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} + - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} restart: "on-failure:10" entrypoint: [ diff --git a/python/langsmith/cli/main.py b/python/langsmith/cli/main.py index 15bc3fc02..f6240ef06 100644 --- a/python/langsmith/cli/main.py +++ b/python/langsmith/cli/main.py @@ -101,12 +101,12 @@ def _start_local(self) -> None: def pull( self, *, - version: str = "0.2.17", + version: str = "0.5.7", ) -> None: """Pull the latest LangSmith images. Args: - version: The LangSmith version to use for LangSmith. Defaults to 0.2.17 + version: The LangSmith version to use for LangSmith. Defaults to 0.5.7 """ os.environ["_LANGSMITH_IMAGE_VERSION"] = version subprocess.run( @@ -123,7 +123,7 @@ def start( *, openai_api_key: Optional[str] = None, langsmith_license_key: str, - version: str = "0.2.17", + version: str = "0.5.7", ) -> None: """Run the LangSmith server locally. @@ -251,8 +251,8 @@ def main() -> None: ) server_start_parser.add_argument( "--version", - default="0.2.17", - help="The LangSmith version to use for LangSmith. Defaults to 0.2.17.", + default="0.5.7", + help="The LangSmith version to use for LangSmith. Defaults to 0.5.7.", ) server_start_parser.set_defaults( func=lambda args: server_command.start( @@ -279,8 +279,8 @@ def main() -> None: ) server_pull_parser.add_argument( "--version", - default="0.2.17", - help="The LangSmith version to use for LangSmith. Defaults to 0.2.17.", + default="0.5.7", + help="The LangSmith version to use for LangSmith. Defaults to 0.5.7.", ) server_pull_parser.set_defaults( func=lambda args: server_command.pull(version=args.version) diff --git a/python/langsmith/cli/users.xml b/python/langsmith/cli/users.xml index d08fcfd28..c29aa8b57 100644 --- a/python/langsmith/cli/users.xml +++ b/python/langsmith/cli/users.xml @@ -12,7 +12,6 @@ 1 2000000 - 1 0 1 diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 872f83530..b48a16be1 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4,6 +4,7 @@ import atexit import collections +import concurrent.futures as cf import datetime import functools import importlib @@ -18,6 +19,7 @@ import sys import threading import time +import typing import uuid import warnings import weakref @@ -46,12 +48,14 @@ import orjson import requests from requests import adapters as requests_adapters +from typing_extensions import TypeGuard from urllib3.util import Retry import langsmith from langsmith import env as ls_env from langsmith import schemas as ls_schemas from langsmith import utils as ls_utils +from langsmith._internal._beta_decorator import warn_beta if TYPE_CHECKING: import pandas as pd # type: ignore @@ -86,7 +90,10 @@ def _is_localhost(url: str) -> bool: def _parse_token_or_url( - url_or_token: Union[str, uuid.UUID], api_url: str, num_parts: int = 2 + url_or_token: Union[str, uuid.UUID], + api_url: str, + num_parts: int = 2, + kind: str = "dataset", ) -> Tuple[str, str]: """Parse a public dataset URL or share token.""" try: @@ -102,7 +109,7 @@ def _parse_token_or_url( if len(path_parts) >= num_parts: token_uuid = path_parts[-num_parts] else: - raise ls_utils.LangSmithUserError(f"Invalid public dataset URL: {url_or_token}") + raise ls_utils.LangSmithUserError(f"Invalid public {kind} URL: {url_or_token}") return api_url, token_uuid @@ -149,6 +156,7 @@ def _default_retry_config() -> Retry: # Sadly urllib3 1.x doesn't support backoff_jitter raise_on_redirect=False, raise_on_status=False, + respect_retry_after_header=True, ) # the `allowed_methods` keyword is not available in urllib3 < 1.26 @@ -264,7 +272,9 @@ def _dumps_json_single( ensure_ascii=True, ).encode("utf-8") try: - result = orjson.dumps(orjson.loads(result.decode("utf-8", errors="lossy"))) + result = orjson.dumps( + orjson.loads(result.decode("utf-8", errors="surrogateescape")) + ) except orjson.JSONDecodeError: result = _elide_surrogates(result) return result @@ -406,6 +416,24 @@ def _as_uuid(value: ID_TYPE, var: Optional[str] = None) -> uuid.UUID: ) from e +@typing.overload +def _ensure_uuid(value: Optional[Union[str, uuid.UUID]]) -> uuid.UUID: ... + + +@typing.overload +def _ensure_uuid( + value: Optional[Union[str, uuid.UUID]], *, accept_null: bool = True +) -> Optional[uuid.UUID]: ... + + +def _ensure_uuid(value: Optional[Union[str, uuid.UUID]], *, accept_null: bool = False): + if value is None: + if accept_null: + return None + return uuid.uuid4() + return _as_uuid(value) + + @functools.lru_cache(maxsize=1) def _parse_url(url): parsed_url = urllib_parse.urlparse(url) @@ -444,10 +472,12 @@ class Client: "tracing_sample_rate", "_sampled_post_uuids", "tracing_queue", + "_anonymizer", "_hide_inputs", "_hide_outputs", "_info", "_write_api_urls", + "_settings", ] def __init__( @@ -460,6 +490,7 @@ def __init__( web_url: Optional[str] = None, session: Optional[requests.Session] = None, auto_batch_tracing: bool = True, + anonymizer: Optional[Callable[[dict], dict]] = None, hide_inputs: Optional[Union[Callable[[dict], dict], bool]] = None, hide_outputs: Optional[Union[Callable[[dict], dict], bool]] = None, info: Optional[Union[dict, ls_schemas.LangSmithInfo]] = None, @@ -485,6 +516,9 @@ def __init__( session: requests.Session or None, default=None The session to use for requests. If None, a new session will be created. + anonymizer : Optional[Callable[[dict], dict]] + A function applied for masking serialized run inputs and outputs, + before sending to the API. hide_inputs: Whether to hide run inputs when tracing with this client. If True, hides the entire inputs. If a function, applied to all run inputs when creating runs. @@ -574,6 +608,7 @@ def __init__( self._get_data_type_cached = functools.lru_cache(maxsize=10)( self._get_data_type ) + self._anonymizer = anonymizer self._hide_inputs = ( hide_inputs if hide_inputs is not None @@ -585,6 +620,8 @@ def __init__( else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" ) + self._settings: Union[ls_schemas.LangSmithSettings, None] = None + def _repr_html_(self) -> str: """Return an HTML representation of the instance with a link to the URL. @@ -622,6 +659,8 @@ def _host_url(self) -> str: elif parsed_url.path.endswith("/api"): new_path = parsed_url.path.rsplit("/api", 1)[0] link = urllib_parse.urlunparse(parsed_url._replace(path=new_path)) + elif parsed_url.netloc.startswith("eu."): + link = "https://eu.smith.langchain.com" elif parsed_url.netloc.startswith("dev."): link = "https://dev.smith.langchain.com" else: @@ -657,8 +696,9 @@ def info(self) -> ls_schemas.LangSmithInfo: """ if self._info is None: try: - response = self.session.get( - self.api_url + "/info", + response = self.request_with_retries( + "GET", + "/info", headers={"Accept": "application/json"}, timeout=(self.timeout_ms[0] / 1000, self.timeout_ms[1] / 1000), ) @@ -671,6 +711,19 @@ def info(self) -> ls_schemas.LangSmithInfo: self._info = ls_schemas.LangSmithInfo() return self._info + def _get_settings(self) -> ls_schemas.LangSmithSettings: + """Get the settings for the current tenant. + + Returns: + dict: The settings for the current tenant. + """ + if self._settings is None: + response = self.request_with_retries("GET", "/settings") + ls_utils.raise_for_status_with_text(response) + self._settings = ls_schemas.LangSmithSettings(**response.json()) + + return self._settings + def request_with_retries( self, /, @@ -725,18 +778,19 @@ def request_with_retries( """ request_kwargs = request_kwargs or {} request_kwargs = { + "timeout": (self.timeout_ms[0] / 1000, self.timeout_ms[1] / 1000), + **request_kwargs, + **kwargs, "headers": { **self._headers, **request_kwargs.get("headers", {}), **kwargs.get("headers", {}), }, - "timeout": (self.timeout_ms[0] / 1000, self.timeout_ms[1] / 1000), - **request_kwargs, - **kwargs, } if ( method != "GET" and "data" in request_kwargs + and "files" not in request_kwargs and not request_kwargs["headers"].get("Content-Type") ): request_kwargs["headers"]["Content-Type"] = "application/json" @@ -746,7 +800,10 @@ def request_with_retries( ] retry_on_: Tuple[Type[BaseException], ...] = ( *(retry_on or []), - *(ls_utils.LangSmithConnectionError, ls_utils.LangSmithAPIError), + *( + ls_utils.LangSmithConnectionError, + ls_utils.LangSmithAPIError, + ), ) to_ignore_: Tuple[Type[BaseException], ...] = (*(to_ignore or ()),) response = None @@ -830,9 +887,14 @@ def request_with_retries( args = list(e.args) msg = args[1] if len(args) > 1 else "" msg = msg.replace("session", "session (project)") - emsg = "\n".join( - [str(args[0])] + [msg] + [str(arg) for arg in args[2:]] - ) + if args: + emsg = "\n".join( + [str(args[0])] + + [msg] + + [str(arg) for arg in (args[2:] if len(args) > 2 else [])] + ) + else: + emsg = msg raise ls_utils.LangSmithError( f"Failed to {method} {pathname} in LangSmith API. {emsg}" ) from e @@ -840,13 +902,29 @@ def request_with_retries( if response is not None: logger.debug("Passing on exception %s", e) return response - # Else we still raise an error + except ls_utils.LangSmithRateLimitError: + if idx + 1 == stop_after_attempt: + raise + if response is not None: + try: + retry_after = float(response.headers.get("retry-after", "30")) + except Exception as e: + logger.warning( + "Invalid retry-after header: %s", + repr(e), + ) + retry_after = 30 + # Add exponential backoff + retry_after = retry_after * 2**idx + random.random() + time.sleep(retry_after) except retry_on_: + # Handle other exceptions more immediately if idx + 1 == stop_after_attempt: raise sleep_time = 2**idx + (random.random() * 0.5) time.sleep(sleep_time) continue + # Else we still raise an error raise ls_utils.LangSmithError( f"Failed to {method} {pathname} in LangSmith API." @@ -1035,19 +1113,20 @@ def upload_csv( data["description"] = description if data_type: data["data_type"] = ls_utils.get_enum_value(data_type) + data["id"] = str(uuid.uuid4()) if isinstance(csv_file, str): with open(csv_file, "rb") as f: file_ = {"file": f} - response = self.session.post( - self.api_url + "/datasets/upload", - headers=self._headers, + response = self.request_with_retries( + "POST", + "/datasets/upload", data=data, files=file_, ) elif isinstance(csv_file, tuple): - response = self.session.post( - self.api_url + "/datasets/upload", - headers=self._headers, + response = self.request_with_retries( + "POST", + "/datasets/upload", data=data, files={"file": csv_file}, ) @@ -1070,11 +1149,14 @@ def _run_transform( self, run: Union[ls_schemas.Run, dict, ls_schemas.RunLikeDict], update: bool = False, + copy: bool = False, ) -> dict: """Transform the given run object into a dictionary representation. Args: run (Union[ls_schemas.Run, dict]): The run object to transform. + update (bool, optional): Whether to update the run. Defaults to False. + copy (bool, optional): Whether to copy the run. Defaults to False. Returns: dict: The transformed run object as a dictionary. @@ -1088,8 +1170,12 @@ def _run_transform( elif isinstance(run_create["id"], str): run_create["id"] = uuid.UUID(run_create["id"]) if "inputs" in run_create and run_create["inputs"] is not None: + if copy: + run_create["inputs"] = ls_utils.deepish_copy(run_create["inputs"]) run_create["inputs"] = self._hide_run_inputs(run_create["inputs"]) if "outputs" in run_create and run_create["outputs"] is not None: + if copy: + run_create["outputs"] = ls_utils.deepish_copy(run_create["outputs"]) run_create["outputs"] = self._hide_run_outputs(run_create["outputs"]) if not update and not run_create.get("start_time"): run_create["start_time"] = datetime.datetime.now(datetime.timezone.utc) @@ -1177,9 +1263,7 @@ def create_run( } if not self._filter_for_sampling([run_create]): return - run_create = self._run_transform(run_create) - self._insert_runtime_env([run_create]) - + run_create = self._run_transform(run_create, copy=True) if revision_id is not None: run_create["extra"]["metadata"]["revision_id"] = revision_id if ( @@ -1191,6 +1275,7 @@ def create_run( return self.tracing_queue.put( TracingQueueItem(run_create["dotted_order"], "create", run_create) ) + self._insert_runtime_env([run_create]) self._create_run(run_create) def _create_run(self, run_create: dict): @@ -1207,17 +1292,23 @@ def _create_run(self, run_create: dict): ) def _hide_run_inputs(self, inputs: dict): - if self._hide_inputs is False: - return inputs if self._hide_inputs is True: return {} + if self._anonymizer: + json_inputs = orjson.loads(_dumps_json(inputs)) + return self._anonymizer(json_inputs) + if self._hide_inputs is False: + return inputs return self._hide_inputs(inputs) def _hide_run_outputs(self, outputs: dict): - if self._hide_outputs is False: - return outputs if self._hide_outputs is True: return {} + if self._anonymizer: + json_outputs = orjson.loads(_dumps_json(outputs)) + return self._anonymizer(json_outputs) + if self._hide_outputs is False: + return outputs return self._hide_outputs(outputs) def batch_ingest_runs( @@ -1322,23 +1413,6 @@ def batch_ingest_runs( self._post_batch_ingest_runs(orjson.dumps(body_chunks)) def _post_batch_ingest_runs(self, body: bytes): - def handle_429(response: requests.Response, attempt: int) -> bool: - # Min of 30 seconds, max of 1 minute - if response.status_code == 429: - try: - retry_after = float(response.headers.get("retry-after", "30")) - except ValueError: - logger.warning( - "Invalid retry-after header value: %s", - response.headers.get("retry-after"), - ) - retry_after = 30 - # Add exponential backoff - retry_after = retry_after * 2 ** (attempt - 1) + random.random() - time.sleep(retry_after) - return True - return False - try: for api_url, api_key in self._write_api_urls.items(): self.request_with_retries( @@ -1353,7 +1427,6 @@ def handle_429(response: requests.Response, attempt: int) -> bool: }, to_ignore=(ls_utils.LangSmithConflictError,), stop_after_attempt=3, - handle_response=handle_429, ) except Exception as e: logger.warning(f"Failed to batch ingest runs: {repr(e)}") @@ -1413,6 +1486,7 @@ def update_run( if inputs is not None: data["inputs"] = self._hide_run_inputs(inputs) if outputs is not None: + outputs = ls_utils.deepish_copy(outputs) data["outputs"] = self._hide_run_outputs(outputs) if events is not None: data["events"] = events @@ -1699,6 +1773,93 @@ def list_runs( if limit is not None and i + 1 >= limit: break + def get_run_stats( + self, + *, + id: Optional[List[ID_TYPE]] = None, + trace: Optional[ID_TYPE] = None, + parent_run: Optional[ID_TYPE] = None, + run_type: Optional[str] = None, + project_names: Optional[List[str]] = None, + project_ids: Optional[List[ID_TYPE]] = None, + reference_example_ids: Optional[List[ID_TYPE]] = None, + start_time: Optional[str] = None, + end_time: Optional[str] = None, + error: Optional[bool] = None, + query: Optional[str] = None, + filter: Optional[str] = None, + trace_filter: Optional[str] = None, + tree_filter: Optional[str] = None, + is_root: Optional[bool] = None, + data_source_type: Optional[str] = None, + ) -> Dict[str, Any]: + """Get aggregate statistics over queried runs. + + Takes in similar query parameters to `list_runs` and returns statistics + based on the runs that match the query. + + Args: + id (Optional[List[ID_TYPE]]): List of run IDs to filter by. + trace (Optional[ID_TYPE]): Trace ID to filter by. + parent_run (Optional[ID_TYPE]): Parent run ID to filter by. + run_type (Optional[str]): Run type to filter by. + projects (Optional[List[ID_TYPE]]): List of session IDs to filter by. + reference_example (Optional[List[ID_TYPE]]): List of reference example IDs to filter by. + start_time (Optional[str]): Start time to filter by. + end_time (Optional[str]): End time to filter by. + error (Optional[bool]): Filter by error status. + query (Optional[str]): Query string to filter by. + filter (Optional[str]): Filter string to apply. + trace_filter (Optional[str]): Trace filter string to apply. + tree_filter (Optional[str]): Tree filter string to apply. + is_root (Optional[bool]): Filter by root run status. + data_source_type (Optional[str]): Data source type to filter by. + + Returns: + Dict[str, Any]: A dictionary containing the run statistics. + """ # noqa: E501 + from concurrent.futures import ThreadPoolExecutor, as_completed # type: ignore + + project_ids = project_ids or [] + if project_names: + with ThreadPoolExecutor() as executor: + futures = [ + executor.submit(self.read_project, project_name=name) + for name in project_names + ] + for future in as_completed(futures): + project_ids.append(future.result().id) + payload = { + "id": id, + "trace": trace, + "parent_run": parent_run, + "run_type": run_type, + "session": project_ids, + "reference_example": reference_example_ids, + "start_time": start_time, + "end_time": end_time, + "error": error, + "query": query, + "filter": filter, + "trace_filter": trace_filter, + "tree_filter": tree_filter, + "is_root": is_root, + "data_source_type": data_source_type, + } + + # Remove None values from the payload + payload = {k: v for k, v in payload.items() if v is not None} + + response = self.request_with_retries( + "POST", + "/runs/stats", + request_kwargs={ + "data": _dumps_json(payload), + }, + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + def get_run_url( self, *, @@ -1708,6 +1869,10 @@ def get_run_url( ) -> str: """Get the URL for a run. + Not recommended for use within your agent runtime. + More for use interacting with runs after the fact + for data analysis or ETL workloads. + Parameters ---------- run : Run @@ -1744,8 +1909,9 @@ def share_run(self, run_id: ID_TYPE, *, share_id: Optional[ID_TYPE] = None) -> s "run_id": str(run_id_), "share_token": share_id or str(uuid.uuid4()), } - response = self.session.put( - f"{self.api_url}/runs/{run_id_}/share", + response = self.request_with_retries( + "PUT", + f"/runs/{run_id_}/share", headers=self._headers, json=data, ) @@ -1755,8 +1921,9 @@ def share_run(self, run_id: ID_TYPE, *, share_id: Optional[ID_TYPE] = None) -> s def unshare_run(self, run_id: ID_TYPE) -> None: """Delete share link for a run.""" - response = self.session.delete( - f"{self.api_url}/runs/{_as_uuid(run_id, 'run_id')}/share", + response = self.request_with_retries( + "DELETE", + f"/runs/{_as_uuid(run_id, 'run_id')}/share", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -1771,8 +1938,9 @@ def read_run_shared_link(self, run_id: ID_TYPE) -> Optional[str]: Optional[str]: The shared link for the run, or None if the link is not available. """ - response = self.session.get( - f"{self.api_url}/runs/{_as_uuid(run_id, 'run_id')}/share", + response = self.request_with_retries( + "GET", + f"/runs/{_as_uuid(run_id, 'run_id')}/share", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -1786,20 +1954,32 @@ def run_is_shared(self, run_id: ID_TYPE) -> bool: link = self.read_run_shared_link(_as_uuid(run_id, "run_id")) return link is not None - def list_shared_runs( - self, share_token: ID_TYPE, run_ids: Optional[List[str]] = None - ) -> List[ls_schemas.Run]: + def read_shared_run( + self, share_token: Union[ID_TYPE, str], run_id: Optional[ID_TYPE] = None + ) -> ls_schemas.Run: """Get shared runs.""" - params = {"id": run_ids, "share_token": str(share_token)} - response = self.session.get( - f"{self.api_url}/public/{_as_uuid(share_token, 'share_token')}/runs", + _, token_uuid = _parse_token_or_url(share_token, "", kind="run") + path = f"/public/{token_uuid}/run" + if run_id is not None: + path += f"/{_as_uuid(run_id, 'run_id')}" + response = self.request_with_retries( + "GET", + path, headers=self._headers, - params=params, ) ls_utils.raise_for_status_with_text(response) - return [ - ls_schemas.Run(**run, _host_url=self._host_url) for run in response.json() - ] + return ls_schemas.Run(**response.json(), _host_url=self._host_url) + + def list_shared_runs( + self, share_token: Union[ID_TYPE, str], run_ids: Optional[List[str]] = None + ) -> Iterator[ls_schemas.Run]: + """Get shared runs.""" + body = {"id": run_ids} if run_ids else {} + _, token_uuid = _parse_token_or_url(share_token, "", kind="run") + for run in self._get_cursor_paginated_list( + f"/public/{token_uuid}/runs/query", body=body + ): + yield ls_schemas.Run(**run, _host_url=self._host_url) def read_dataset_shared_schema( self, @@ -1825,8 +2005,9 @@ def read_dataset_shared_schema( raise ValueError("Either dataset_id or dataset_name must be given") if dataset_id is None: dataset_id = self.read_dataset(dataset_name=dataset_name).id - response = self.session.get( - f"{self.api_url}/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", + response = self.request_with_retries( + "GET", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -1854,8 +2035,9 @@ def share_dataset( data = { "dataset_id": str(dataset_id), } - response = self.session.put( - f"{self.api_url}/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", + response = self.request_with_retries( + "PUT", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", headers=self._headers, json=data, ) @@ -1868,8 +2050,9 @@ def share_dataset( def unshare_dataset(self, dataset_id: ID_TYPE) -> None: """Delete share link for a dataset.""" - response = self.session.delete( - f"{self.api_url}/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", + response = self.request_with_retries( + "DELETE", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -1879,8 +2062,9 @@ def read_shared_dataset( share_token: str, ) -> ls_schemas.Dataset: """Get shared datasets.""" - response = self.session.get( - f"{self.api_url}/public/{_as_uuid(share_token, 'share_token')}/datasets", + response = self.request_with_retries( + "GET", + f"/public/{_as_uuid(share_token, 'share_token')}/datasets", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -1897,8 +2081,9 @@ def list_shared_examples( params = {} if example_ids is not None: params["id"] = [str(id) for id in example_ids] - response = self.session.get( - f"{self.api_url}/public/{_as_uuid(share_token, 'share_token')}/examples", + response = self.request_with_retries( + "GET", + f"/public/{_as_uuid(share_token, 'share_token')}/examples", headers=self._headers, params=params, ) @@ -1985,13 +2170,15 @@ def create_project( "name": project_name, "extra": extra, "description": description, + "id": str(uuid.uuid4()), } params = {} if upsert: params["upsert"] = True if reference_dataset_id is not None: body["reference_dataset_id"] = reference_dataset_id - response = self.session.post( + response = self.request_with_retries( + "POST", endpoint, headers={**self._headers, "Content-Type": "application/json"}, data=_dumps_json(body), @@ -2040,7 +2227,8 @@ def update_project( "description": description, "end_time": end_time.isoformat() if end_time else None, } - response = self.session.patch( + response = self.request_with_retries( + "PATCH", endpoint, headers={**self._headers, "Content-Type": "application/json"}, data=_dumps_json(body), @@ -2149,7 +2337,7 @@ def get_test_results( project_id: Optional[ID_TYPE] = None, project_name: Optional[str] = None, ) -> "pd.DataFrame": - """Read the record-level information from a test project into a Pandas DF. + """Read the record-level information from an experiment into a Pandas DF. Note: this will fetch whatever data exists in the DB. Results are not immediately available in the DB upon evaluation run completion. @@ -2159,24 +2347,50 @@ def get_test_results( pd.DataFrame A dataframe containing the test results. """ + warnings.warn( + "Function get_test_results is in beta.", UserWarning, stacklevel=2 + ) + from concurrent.futures import ThreadPoolExecutor, as_completed # type: ignore + import pandas as pd # type: ignore runs = self.list_runs( - project_id=project_id, project_name=project_name, is_root=True + project_id=project_id, + project_name=project_name, + is_root=True, + select=[ + "id", + "reference_example_id", + "inputs", + "outputs", + "error", + "feedback_stats", + "start_time", + "end_time", + ], ) - results = [] + results: list[dict] = [] example_ids = [] - for r in runs: - row = { - "example_id": r.reference_example_id, - **{f"input.{k}": v for k, v in r.inputs.items()}, - **{f"outputs.{k}": v for k, v in (r.outputs or {}).items()}, - } - if r.feedback_stats: - for k, v in r.feedback_stats.items(): - row[f"feedback.{k}"] = v.get("avg") - row.update( + + def fetch_examples(batch): + examples = self.list_examples(example_ids=batch) + return [ { + "example_id": example.id, + **{f"reference.{k}": v for k, v in (example.outputs or {}).items()}, + } + for example in examples + ] + + batch_size = 50 + cursor = 0 + with ThreadPoolExecutor() as executor: + futures = [] + for r in runs: + row = { + "example_id": r.reference_example_id, + **{f"input.{k}": v for k, v in r.inputs.items()}, + **{f"outputs.{k}": v for k, v in (r.outputs or {}).items()}, "execution_time": ( (r.end_time - r.start_time).total_seconds() if r.end_time @@ -2185,32 +2399,37 @@ def get_test_results( "error": r.error, "id": r.id, } - ) - if r.reference_example_id: - example_ids.append(r.reference_example_id) - results.append(row) - result = pd.DataFrame(results).set_index("example_id") - batch_size = 100 - example_outputs = [] - for batch in [ - example_ids[i : i + batch_size] - for i in range(0, len(example_ids), batch_size) - ]: - for example in self.list_examples(example_ids=batch): - example_outputs.append( - { - "example_id": example.id, - **{ - f"reference.{k}": v - for k, v in (example.outputs or {}).items() - }, - } - ) + if r.feedback_stats: + row.update( + { + f"feedback.{k}": v.get("avg") + for k, v in r.feedback_stats.items() + } + ) + if r.reference_example_id: + example_ids.append(r.reference_example_id) + else: + logger.warning(f"Run {r.id} has no reference example ID.") + if len(example_ids) % batch_size == 0: + # Ensure not empty + if batch := example_ids[cursor : cursor + batch_size]: + futures.append(executor.submit(fetch_examples, batch)) + cursor += batch_size + results.append(row) + + # Handle any remaining examples + if example_ids[cursor:]: + futures.append(executor.submit(fetch_examples, example_ids[cursor:])) + result_df = pd.DataFrame(results).set_index("example_id") + example_outputs = [ + output for future in as_completed(futures) for output in future.result() + ] if example_outputs: - df = pd.DataFrame(example_outputs).set_index("example_id") - result = df.merge(result, left_index=True, right_index=True) + example_df = pd.DataFrame(example_outputs).set_index("example_id") + result_df = example_df.merge(result_df, left_index=True, right_index=True) + # Flatten dict columns into dot syntax for easier access - return pd.json_normalize(result.to_dict(orient="records")) + return pd.json_normalize(result_df.to_dict(orient="records")) def list_projects( self, @@ -2221,6 +2440,7 @@ def list_projects( reference_dataset_name: Optional[str] = None, reference_free: Optional[bool] = None, limit: Optional[int] = None, + metadata: Optional[Dict[str, Any]] = None, ) -> Iterator[ls_schemas.TracerSession]: """List projects from the LangSmith API. @@ -2240,6 +2460,8 @@ def list_projects( Whether to filter for only projects not associated with a dataset. limit : Optional[int], optional The maximum number of projects to return, by default None + metadata: Optional[Dict[str, Any]], optional + Metadata to filter by. Yields: ------ @@ -2269,6 +2491,8 @@ def list_projects( params["reference_dataset"] = reference_dataset_id if reference_free is not None: params["reference_free"] = reference_free + if metadata is not None: + params["metadata"] = json.dumps(metadata) for i, project in enumerate( self._get_paginated_list("/sessions", params=params) ): @@ -2293,8 +2517,9 @@ def delete_project( project_id = str(self.read_project(project_name=project_name).id) elif project_id is None: raise ValueError("Must provide project_name or project_id") - response = self.session.delete( - self.api_url + f"/sessions/{_as_uuid(project_id, 'project_id')}", + response = self.request_with_retries( + "DELETE", + f"/sessions/{_as_uuid(project_id, 'project_id')}", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -2305,6 +2530,8 @@ def create_dataset( *, description: Optional[str] = None, data_type: ls_schemas.DataType = ls_schemas.DataType.kv, + inputs_schema: Optional[Dict[str, Any]] = None, + outputs_schema: Optional[Dict[str, Any]] = None, ) -> ls_schemas.Dataset: """Create a dataset in the LangSmith API. @@ -2322,17 +2549,28 @@ def create_dataset( Dataset The created dataset. """ - dataset = ls_schemas.DatasetCreate( - name=dataset_name, - description=description, - data_type=data_type, - ) - response = self.session.post( - self.api_url + "/datasets", + dataset: Dict[str, Any] = { + "name": dataset_name, + "data_type": data_type.value, + "created_at": datetime.datetime.now().isoformat(), + } + if description is not None: + dataset["description"] = description + + if inputs_schema is not None: + dataset["inputs_schema_definition"] = inputs_schema + + if outputs_schema is not None: + dataset["outputs_schema_definition"] = outputs_schema + + response = self.request_with_retries( + "POST", + "/datasets", headers={**self._headers, "Content-Type": "application/json"}, - data=dataset.json(), + data=orjson.dumps(dataset), ) ls_utils.raise_for_status_with_text(response) + return ls_schemas.Dataset( **response.json(), _host_url=self._host_url, @@ -2469,8 +2707,9 @@ def diff_dataset_versions( raise ValueError("Must provide either dataset name or ID") dataset_id = self.read_dataset(dataset_name=dataset_name).id dsid = _as_uuid(dataset_id, "dataset_id") - response = self.session.get( - f"{self.api_url}/datasets/{dsid}/versions/diff", + response = self.request_with_retries( + "GET", + f"/datasets/{dsid}/versions/diff", headers=self._headers, params={ "from_version": ( @@ -2577,8 +2816,9 @@ def delete_dataset( dataset_id = self.read_dataset(dataset_name=dataset_name).id if dataset_id is None: raise ValueError("Must provide either dataset name or ID") - response = self.session.delete( - f"{self.api_url}/datasets/{_as_uuid(dataset_id, 'dataset_id')}", + response = self.request_with_retries( + "DELETE", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -2629,8 +2869,9 @@ def update_dataset_tag( dataset_id = self.read_dataset(dataset_name=dataset_name).id if dataset_id is None: raise ValueError("Must provide either dataset name or ID") - response = self.session.put( - f"{self.api_url}/datasets/{_as_uuid(dataset_id, 'dataset_id')}/tags", + response = self.request_with_retries( + "PUT", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/tags", headers=self._headers, json={ "as_of": as_of.isoformat(), @@ -2936,6 +3177,7 @@ def create_examples( inputs: Sequence[Mapping[str, Any]], outputs: Optional[Sequence[Optional[Mapping[str, Any]]]] = None, metadata: Optional[Sequence[Optional[Mapping[str, Any]]]] = None, + splits: Optional[Sequence[Optional[str | List[str]]]] = None, source_run_ids: Optional[Sequence[Optional[ID_TYPE]]] = None, ids: Optional[Sequence[Optional[ID_TYPE]]] = None, dataset_id: Optional[ID_TYPE] = None, @@ -2952,6 +3194,9 @@ def create_examples( The output values for the examples. metadata : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None The metadata for the examples. + split : Optional[Sequence[Optional[str | List[str]]]], default=None + The splits for the examples, which are divisions + of your dataset such as 'train', 'test', or 'validation'. source_run_ids : Optional[Sequence[Optional[ID_TYPE]]], default=None The IDs of the source runs associated with the examples. ids : Optional[Sequence[ID_TYPE]], default=None @@ -2981,20 +3226,23 @@ def create_examples( "outputs": out_, "dataset_id": dataset_id, "metadata": metadata_, - "id": id_, + "split": split_, + "id": id_ or str(uuid.uuid4()), "source_run_id": source_run_id_, } - for in_, out_, metadata_, id_, source_run_id_ in zip( + for in_, out_, metadata_, split_, id_, source_run_id_ in zip( inputs, outputs or [None] * len(inputs), metadata or [None] * len(inputs), + splits or [None] * len(inputs), ids or [None] * len(inputs), source_run_ids or [None] * len(inputs), ) ] - response = self.session.post( - f"{self.api_url}/examples/bulk", + response = self.request_with_retries( + "POST", + "/examples/bulk", headers={**self._headers, "Content-Type": "application/json"}, data=_dumps_json(examples), ) @@ -3009,6 +3257,7 @@ def create_example( created_at: Optional[datetime.datetime] = None, outputs: Optional[Mapping[str, Any]] = None, metadata: Optional[Mapping[str, Any]] = None, + split: Optional[str | List[str]] = None, example_id: Optional[ID_TYPE] = None, ) -> ls_schemas.Example: """Create a dataset example in the LangSmith API. @@ -3030,6 +3279,9 @@ def create_example( The output values for the example. metadata : Mapping[str, Any] or None, default=None The metadata for the example. + split : str or List[str] or None, default=None + The splits for the example, which are divisions + of your dataset such as 'train', 'test', or 'validation'. exemple_id : UUID or None, default=None The ID of the example to create. If not provided, a new example will be created. @@ -3045,16 +3297,16 @@ def create_example( "outputs": outputs, "dataset_id": dataset_id, "metadata": metadata, + "split": split, } if created_at: data["created_at"] = created_at.isoformat() - if example_id: - data["id"] = example_id - example = ls_schemas.ExampleCreate(**data) - response = self.session.post( - f"{self.api_url}/examples", + data["id"] = example_id or str(uuid.uuid4()) + response = self.request_with_retries( + "POST", + "/examples", headers={**self._headers, "Content-Type": "application/json"}, - data=example.json(), + data=_dumps_json({k: v for k, v in data.items() if v is not None}), ) ls_utils.raise_for_status_with_text(response) result = response.json() @@ -3094,9 +3346,13 @@ def list_examples( dataset_name: Optional[str] = None, example_ids: Optional[Sequence[ID_TYPE]] = None, as_of: Optional[Union[datetime.datetime, str]] = None, + splits: Optional[Sequence[str]] = None, inline_s3_urls: bool = True, + *, + offset: int = 0, limit: Optional[int] = None, metadata: Optional[dict] = None, + filter: Optional[str] = None, **kwargs: Any, ) -> Iterator[ls_schemas.Example]: """Retrieve the example rows of the specified dataset. @@ -3112,21 +3368,30 @@ def list_examples( timestamp to retrieve the examples as of. Response examples will only be those that were present at the time of the tagged (or timestamped) version. + splits (List[str], optional): A list of dataset splits, which are + divisions of your dataset such as 'train', 'test', or 'validation'. + Returns examples only from the specified splits. inline_s3_urls (bool, optional): Whether to inline S3 URLs. Defaults to True. + offset (int): The offset to start from. Defaults to 0. limit (int, optional): The maximum number of examples to return. + filter (str, optional): A structured fileter string to apply to + the examples. Yields: Example: The examples. """ params: Dict[str, Any] = { **kwargs, + "offset": offset, "id": example_ids, "as_of": ( as_of.isoformat() if isinstance(as_of, datetime.datetime) else as_of ), + "splits": splits, "inline_s3_urls": inline_s3_urls, "limit": min(limit, 100) if limit is not None else 100, + "filter": filter, } if metadata is not None: params["metadata"] = _dumps_json(metadata) @@ -3148,6 +3413,118 @@ def list_examples( if limit is not None and i + 1 >= limit: break + @warn_beta + def index_dataset( + self, + *, + dataset_id: ID_TYPE, + tag: str = "latest", + **kwargs: Any, + ) -> None: + """Enable dataset indexing. Examples are indexed by their inputs. + + This enables searching for similar examples by inputs with + ``client.similar_examples()``. + + Args: + dataset_id (UUID): The ID of the dataset to index. + tag (str, optional): The version of the dataset to index. If 'latest' + then any updates to the dataset (additions, updates, deletions of + examples) will be reflected in the index. + + Returns: + None + + Raises: + requests.HTTPError + """ # noqa: E501 + dataset_id = _as_uuid(dataset_id, "dataset_id") + resp = self.request_with_retries( + "POST", + f"/datasets/{dataset_id}/index", + headers=self._headers, + data=json.dumps({"tag": tag, **kwargs}), + ) + ls_utils.raise_for_status_with_text(resp) + + # NOTE: dataset_name arg explicitly not supported to avoid extra API calls. + @warn_beta + def similar_examples( + self, + inputs: dict, + /, + *, + limit: int, + dataset_id: ID_TYPE, + **kwargs: Any, + ) -> List[ls_schemas.ExampleSearch]: + r"""Retrieve the dataset examples whose inputs best match the current inputs. + + **Note**: Must have few-shot indexing enabled for the dataset. See + ``client.index_dataset()``. + + Args: + inputs (dict): The inputs to use as a search query. Must match the dataset + input schema. Must be JSON serializable. + limit (int): The maximum number of examples to return. + dataset_id (str or UUID): The ID of the dataset to search over. + kwargs (Any): Additional keyword args to pass as part of request body. + + Returns: + List of ExampleSearch objects. + + Example: + .. code-block:: python + + from langsmith import Client + + client = Client() + client.similar_examples( + {"question": "When would i use the runnable generator"}, + limit=3, + dataset_id="...", + ) + + .. code-block:: pycon + + [ + ExampleSearch( + inputs={'question': 'How do I cache a Chat model? What caches can I use?'}, + outputs={'answer': 'You can use LangChain\'s caching layer for Chat Models. This can save you money by reducing the number of API calls you make to the LLM provider, if you\'re often requesting the same completion multiple times, and speed up your application.\n\n```python\n\nfrom langchain.cache import InMemoryCache\nlangchain.llm_cache = InMemoryCache()\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\')\n\n```\n\nYou can also use SQLite Cache which uses a SQLite database:\n\n```python\n rm .langchain.db\n\nfrom langchain.cache import SQLiteCache\nlangchain.llm_cache = SQLiteCache(database_path=".langchain.db")\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\') \n```\n'}, + metadata=None, + id=UUID('b2ddd1c4-dff6-49ae-8544-f48e39053398'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ExampleSearch( + inputs={'question': "What's a runnable lambda?"}, + outputs={'answer': "A runnable lambda is an object that implements LangChain's `Runnable` interface and runs a callbale (i.e., a function). Note the function must accept a single argument."}, + metadata=None, + id=UUID('f94104a7-2434-4ba7-8293-6a283f4860b4'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ExampleSearch( + inputs={'question': 'Show me how to use RecursiveURLLoader'}, + outputs={'answer': 'The RecursiveURLLoader comes from the langchain.document_loaders.recursive_url_loader module. Here\'s an example of how to use it:\n\n```python\nfrom langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader\n\n# Create an instance of RecursiveUrlLoader with the URL you want to load\nloader = RecursiveUrlLoader(url="https://example.com")\n\n# Load all child links from the URL page\nchild_links = loader.load()\n\n# Print the child links\nfor link in child_links:\n print(link)\n```\n\nMake sure to replace "https://example.com" with the actual URL you want to load. The load() method returns a list of child links found on the URL page. You can iterate over this list to access each child link.'}, + metadata=None, + id=UUID('0308ea70-a803-4181-a37d-39e95f138f8c'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ] + + """ # noqa: E501 + dataset_id = _as_uuid(dataset_id, "dataset_id") + resp = self.request_with_retries( + "POST", + f"/datasets/{dataset_id}/search", + headers=self._headers, + data=json.dumps({"inputs": inputs, "limit": limit, **kwargs}), + ) + ls_utils.raise_for_status_with_text(resp) + examples = [] + for ex in resp.json()["examples"]: + examples.append(ls_schemas.ExampleSearch(**ex, dataset_id=dataset_id)) + return examples + def update_example( self, example_id: ID_TYPE, @@ -3155,6 +3532,7 @@ def update_example( inputs: Optional[Dict[str, Any]] = None, outputs: Optional[Mapping[str, Any]] = None, metadata: Optional[Dict] = None, + split: Optional[str | List[str]] = None, dataset_id: Optional[ID_TYPE] = None, ) -> Dict[str, Any]: """Update a specific example. @@ -3169,6 +3547,9 @@ def update_example( The output values to update. metadata : Dict or None, default=None The metadata to update. + split : str or List[str] or None, default=None + The dataset split to update, such as + 'train', 'test', or 'validation'. dataset_id : UUID or None, default=None The ID of the dataset to update. @@ -3177,16 +3558,85 @@ def update_example( Dict[str, Any] The updated example. """ - example = ls_schemas.ExampleUpdate( + example = dict( inputs=inputs, outputs=outputs, dataset_id=dataset_id, metadata=metadata, + split=split, + ) + response = self.request_with_retries( + "PATCH", + f"/examples/{_as_uuid(example_id, 'example_id')}", + headers={**self._headers, "Content-Type": "application/json"}, + data=_dumps_json({k: v for k, v in example.items() if v is not None}), ) - response = self.session.patch( - f"{self.api_url}/examples/{_as_uuid(example_id, 'example_id')}", + ls_utils.raise_for_status_with_text(response) + return response.json() + + def update_examples( + self, + *, + example_ids: Sequence[ID_TYPE], + inputs: Optional[Sequence[Optional[Dict[str, Any]]]] = None, + outputs: Optional[Sequence[Optional[Mapping[str, Any]]]] = None, + metadata: Optional[Sequence[Optional[Dict]]] = None, + splits: Optional[Sequence[Optional[str | List[str]]]] = None, + dataset_ids: Optional[Sequence[Optional[ID_TYPE]]] = None, + ) -> Dict[str, Any]: + """Update multiple examples. + + Parameters + ---------- + example_ids : Sequence[ID_TYPE] + The IDs of the examples to update. + inputs : Optional[Sequence[Optional[Dict[str, Any]]], default=None + The input values for the examples. + outputs : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None + The output values for the examples. + metadata : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None + The metadata for the examples. + split : Optional[Sequence[Optional[str | List[str]]]], default=None + The splits for the examples, which are divisions + of your dataset such as 'train', 'test', or 'validation'. + dataset_ids : Optional[Sequence[Optional[ID_TYPE]]], default=None + The IDs of the datasets to move the examples to. + + Returns: + ------- + Dict[str, Any] + The response from the server (specifies the number of examples updated). + """ + examples = [ + { + "id": id_, + "inputs": in_, + "outputs": out_, + "dataset_id": dataset_id_, + "metadata": metadata_, + "split": split_, + } + for id_, in_, out_, metadata_, split_, dataset_id_ in zip( + example_ids, + inputs or [None] * len(example_ids), + outputs or [None] * len(example_ids), + metadata or [None] * len(example_ids), + splits or [None] * len(example_ids), + dataset_ids or [None] * len(example_ids), + ) + ] + response = self.request_with_retries( + "PATCH", + "/examples/bulk", headers={**self._headers, "Content-Type": "application/json"}, - data=example.json(exclude_none=True), + data=( + _dumps_json( + [ + {k: v for k, v in example.items() if v is not None} + for example in examples + ] + ) + ), ) ls_utils.raise_for_status_with_text(response) return response.json() @@ -3199,24 +3649,101 @@ def delete_example(self, example_id: ID_TYPE) -> None: example_id : str or UUID The ID of the example to delete. """ - response = self.session.delete( - f"{self.api_url}/examples/{_as_uuid(example_id, 'example_id')}", + response = self.request_with_retries( + "DELETE", + f"/examples/{_as_uuid(example_id, 'example_id')}", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) - def _resolve_run_id( + def list_dataset_splits( self, - run: Union[ls_schemas.Run, ls_schemas.RunBase, str, uuid.UUID], - load_child_runs: bool, - ) -> ls_schemas.Run: - """Resolve the run ID. + *, + dataset_id: Optional[ID_TYPE] = None, + dataset_name: Optional[str] = None, + as_of: Optional[Union[str, datetime.datetime]] = None, + ) -> List[str]: + """Get the splits for a dataset. - Parameters - ---------- - run : Run or RunBase or str or UUID - The run to resolve. - load_child_runs : bool + Args: + dataset_id (ID_TYPE): The ID of the dataset. + as_of (Optional[Union[str, datetime.datetime]], optional): The version + of the dataset to retrieve splits for. Can be a timestamp or a + string tag. Defaults to "latest". + + Returns: + List[str]: The names of this dataset's. + """ + if dataset_id is None: + if dataset_name is None: + raise ValueError("Must provide dataset name or ID") + dataset_id = self.read_dataset(dataset_name=dataset_name).id + params = {} + if as_of is not None: + params["as_of"] = ( + as_of.isoformat() if isinstance(as_of, datetime.datetime) else as_of + ) + + response = self.request_with_retries( + "GET", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/splits", + params=params, + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + + def update_dataset_splits( + self, + *, + dataset_id: Optional[ID_TYPE] = None, + dataset_name: Optional[str] = None, + split_name: str, + example_ids: List[ID_TYPE], + remove: bool = False, + ) -> None: + """Update the splits for a dataset. + + Args: + dataset_id (ID_TYPE): The ID of the dataset to update. + split_name (str): The name of the split to update. + example_ids (List[ID_TYPE]): The IDs of the examples to add to or + remove from the split. + remove (bool, optional): If True, remove the examples from the split. + If False, add the examples to the split. Defaults to False. + + Returns: + None + """ + if dataset_id is None: + if dataset_name is None: + raise ValueError("Must provide dataset name or ID") + dataset_id = self.read_dataset(dataset_name=dataset_name).id + data = { + "split_name": split_name, + "examples": [ + str(_as_uuid(id_, f"example_ids[{i}]")) + for i, id_ in enumerate(example_ids) + ], + "remove": remove, + } + + response = self.request_with_retries( + "PUT", f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/splits", json=data + ) + ls_utils.raise_for_status_with_text(response) + + def _resolve_run_id( + self, + run: Union[ls_schemas.Run, ls_schemas.RunBase, str, uuid.UUID], + load_child_runs: bool, + ) -> ls_schemas.Run: + """Resolve the run ID. + + Parameters + ---------- + run : Run or RunBase or str or UUID + The run to resolve. + load_child_runs : bool Whether to load child runs. Returns: @@ -3272,25 +3799,40 @@ def _resolve_example_id( def _select_eval_results( self, - results: Union[ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults], + results: Union[ + ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults, dict + ], *, fn_name: Optional[str] = None, ) -> List[ls_evaluator.EvaluationResult]: from langsmith.evaluation import evaluator as ls_evaluator # noqa: F811 + def _cast_result( + single_result: Union[ls_evaluator.EvaluationResult, dict], + ) -> ls_evaluator.EvaluationResult: + if isinstance(single_result, dict): + return ls_evaluator.EvaluationResult( + **{ + "key": fn_name, + "comment": single_result.get("reasoning"), + **single_result, + } + ) + return single_result + + def _is_eval_results(results: Any) -> TypeGuard[ls_evaluator.EvaluationResults]: + return isinstance(results, dict) and "results" in results + if isinstance(results, ls_evaluator.EvaluationResult): results_ = [results] + elif _is_eval_results(results): + results_ = [_cast_result(r) for r in results["results"]] elif isinstance(results, dict): - if "results" in results: - results_ = cast(List[ls_evaluator.EvaluationResult], results["results"]) - else: - results_ = [ - ls_evaluator.EvaluationResult(**{"key": fn_name, **results}) - ] + results_ = [_cast_result(cast(dict, results))] else: - raise TypeError( - f"Invalid evaluation result type {type(results)}." - " Expected EvaluationResult or EvaluationResults." + raise ValueError( + f"Invalid evaluation results type: {type(results)}." + " Must be EvaluationResult, EvaluationResults." ) return results_ @@ -3344,7 +3886,7 @@ def evaluate_run( def _log_evaluation_feedback( self, evaluator_response: Union[ - ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults + ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults, dict ], run: Optional[ls_schemas.Run] = None, source_info: Optional[Dict[str, Any]] = None, @@ -3530,8 +4072,10 @@ def create_feedback( ) feedback_source.metadata["__run"] = _run_meta feedback = ls_schemas.FeedbackCreate( - id=feedback_id or uuid.uuid4(), - run_id=run_id, + id=_ensure_uuid(feedback_id), + # If run_id is None, this is interpreted as session-level + # feedback. + run_id=_ensure_uuid(run_id, accept_null=True), key=key, score=score, value=value, @@ -3541,16 +4085,18 @@ def create_feedback( created_at=datetime.datetime.now(datetime.timezone.utc), modified_at=datetime.datetime.now(datetime.timezone.utc), feedback_config=feedback_config, - session_id=project_id, - comparative_experiment_id=comparative_experiment_id, - feedback_group_id=feedback_group_id, + session_id=_ensure_uuid(project_id, accept_null=True), + comparative_experiment_id=_ensure_uuid( + comparative_experiment_id, accept_null=True + ), + feedback_group_id=_ensure_uuid(feedback_group_id, accept_null=True), ) - feedack_block = _dumps_json(feedback.dict(exclude_none=True)) + feedback_block = _dumps_json(feedback.dict(exclude_none=True)) self.request_with_retries( "POST", "/feedback", request_kwargs={ - "data": feedack_block, + "data": feedback_block, }, stop_after_attempt=stop_after_attempt, retry_on=(ls_utils.LangSmithNotFoundError,), @@ -3590,8 +4136,9 @@ def update_feedback( feedback_update["correction"] = correction if comment is not None: feedback_update["comment"] = comment - response = self.session.patch( - self.api_url + f"/feedback/{_as_uuid(feedback_id, 'feedback_id')}", + response = self.request_with_retries( + "PATCH", + f"/feedback/{_as_uuid(feedback_id, 'feedback_id')}", headers={**self._headers, "Content-Type": "application/json"}, data=_dumps_json(feedback_update), ) @@ -3670,8 +4217,9 @@ def delete_feedback(self, feedback_id: ID_TYPE) -> None: feedback_id : str or UUID The ID of the feedback to delete. """ - response = self.session.delete( - f"{self.api_url}/feedback/{_as_uuid(feedback_id, 'feedback_id')}", + response = self.request_with_retries( + "DELETE", + f"/feedback/{_as_uuid(feedback_id, 'feedback_id')}", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -3713,8 +4261,9 @@ def create_feedback_from_token( ) if source_api_url != self.api_url: raise ValueError(f"Invalid source API URL. {source_api_url}") - response = self.session.post( - f"{source_api_url}/feedback/tokens/{_as_uuid(token_uuid)}", + response = self.request_with_retries( + "POST", + f"/feedback/tokens/{_as_uuid(token_uuid)}", data=_dumps_json( { "score": score, @@ -3722,6 +4271,7 @@ def create_feedback_from_token( "correction": correction, "comment": comment, "metadata": metadata, + # TODO: Add ID once the API supports it. } ), headers=self._headers, @@ -3735,6 +4285,7 @@ def create_presigned_feedback_token( *, expiration: Optional[datetime.datetime | datetime.timedelta] = None, feedback_config: Optional[ls_schemas.FeedbackConfig] = None, + feedback_id: Optional[ID_TYPE] = None, ) -> ls_schemas.FeedbackIngestToken: """Create a pre-signed URL to send feedback data to. @@ -3753,6 +4304,8 @@ def create_presigned_feedback_token( this defines how the metric should be interpreted, such as a continuous score (w/ optional bounds), or distribution over categorical values. + feedback_id: The ID of the feedback to create. If not provided, a new + feedback will be created. Returns: The pre-signed URL for uploading feedback data. @@ -3761,6 +4314,7 @@ def create_presigned_feedback_token( "run_id": run_id, "feedback_key": feedback_key, "feedback_config": feedback_config, + "id": feedback_id or str(uuid.uuid4()), } if expiration is None: body["expires_in"] = ls_schemas.TimeDeltaInput( @@ -3787,6 +4341,106 @@ def create_presigned_feedback_token( ls_utils.raise_for_status_with_text(response) return ls_schemas.FeedbackIngestToken(**response.json()) + def create_presigned_feedback_tokens( + self, + run_id: ID_TYPE, + feedback_keys: Sequence[str], + *, + expiration: Optional[datetime.datetime | datetime.timedelta] = None, + feedback_configs: Optional[ + Sequence[Optional[ls_schemas.FeedbackConfig]] + ] = None, + ) -> Sequence[ls_schemas.FeedbackIngestToken]: + """Create a pre-signed URL to send feedback data to. + + This is useful for giving browser-based clients a way to upload + feedback data directly to LangSmith without accessing the + API key. + + Args: + run_id: + feedback_key: + expiration: The expiration time of the pre-signed URL. + Either a datetime or a timedelta offset from now. + Default to 3 hours. + feedback_config: FeedbackConfig or None. + If creating a feedback_key for the first time, + this defines how the metric should be interpreted, + such as a continuous score (w/ optional bounds), + or distribution over categorical values. + + Returns: + The pre-signed URL for uploading feedback data. + """ + # validate + if feedback_configs is not None and len(feedback_keys) != len(feedback_configs): + raise ValueError( + "The length of feedback_keys and feedback_configs must be the same." + ) + if not feedback_configs: + feedback_configs = [None] * len(feedback_keys) + # build expiry option + expires_in, expires_at = None, None + if expiration is None: + expires_in = ls_schemas.TimeDeltaInput( + days=0, + hours=3, + minutes=0, + ) + elif isinstance(expiration, datetime.datetime): + expires_at = expiration.isoformat() + elif isinstance(expiration, datetime.timedelta): + expires_in = ls_schemas.TimeDeltaInput( + days=expiration.days, + hours=expiration.seconds // 3600, + minutes=(expiration.seconds // 60) % 60, + ) + else: + raise ValueError(f"Unknown expiration type: {type(expiration)}") + # assemble body, one entry per key + body = _dumps_json( + [ + { + "run_id": run_id, + "feedback_key": feedback_key, + "feedback_config": feedback_config, + "expires_in": expires_in, + "expires_at": expires_at, + } + for feedback_key, feedback_config in zip( + feedback_keys, feedback_configs + ) + ] + ) + + def req(api_url: str, api_key: Optional[str]) -> list: + response = self.request_with_retries( + "POST", + f"{api_url}/feedback/tokens", + request_kwargs={ + "data": body, + "headers": { + **self._headers, + X_API_KEY: api_key or self.api_key, + }, + }, + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + + tokens = [] + with cf.ThreadPoolExecutor(max_workers=len(self._write_api_urls)) as executor: + futs = [ + executor.submit(req, api_url, api_key) + for api_url, api_key in self._write_api_urls.items() + ] + for fut in cf.as_completed(futs): + response = fut.result() + tokens.extend( + [ls_schemas.FeedbackIngestToken(**part) for part in response] + ) + return tokens + def list_presigned_feedback_tokens( self, run_id: ID_TYPE, @@ -3854,8 +4508,6 @@ def list_annotation_queues( ): yield ls_schemas.AnnotationQueue( **queue, - _host_url=self._host_url, - _tenant_id=self._get_optional_tenant_id(), ) if limit is not None and i + 1 >= limit: break @@ -3884,7 +4536,7 @@ def create_annotation_queue( body = { "name": name, "description": description, - "id": queue_id, + "id": queue_id or str(uuid.uuid4()), } response = self.request_with_retries( "POST", @@ -3894,8 +4546,6 @@ def create_annotation_queue( ls_utils.raise_for_status_with_text(response) return ls_schemas.AnnotationQueue( **response.json(), - _host_url=self._host_url, - _tenant_id=self._get_optional_tenant_id(), ) def read_annotation_queue(self, queue_id: ID_TYPE) -> ls_schemas.AnnotationQueue: @@ -3937,8 +4587,9 @@ def delete_annotation_queue(self, queue_id: ID_TYPE) -> None: Args: queue_id (ID_TYPE): The ID of the annotation queue to delete. """ - response = self.session.delete( - f"{self.api_url}/annotation-queues/{_as_uuid(queue_id, 'queue_id')}", + response = self.request_with_retries( + "DELETE", + f"/annotation-queues/{_as_uuid(queue_id, 'queue_id')}", headers={"Accept": "application/json", **self._headers}, ) ls_utils.raise_for_status_with_text(response) @@ -4019,7 +4670,7 @@ def create_comparative_experiment( if not reference_dataset: raise ValueError("A reference dataset is required.") body: Dict[str, Any] = { - "id": id, + "id": id or str(uuid.uuid4()), "name": name, "experiment_ids": experiments, "reference_dataset_id": reference_dataset, @@ -4166,6 +4817,13 @@ def _evaluate_strings( evaluation=evaluation_config, ) """ # noqa: E501 + # warn as deprecated and to use `aevaluate` instead + warnings.warn( + "The `arun_on_dataset` method is deprecated and" + " will be removed in a future version." + "Please use the `aevaluate` method instead.", + DeprecationWarning, + ) try: from langchain.smith import arun_on_dataset as _arun_on_dataset except ImportError: @@ -4314,6 +4972,12 @@ def _evaluate_strings( evaluation=evaluation_config, ) """ # noqa: E501 + warnings.warn( + "The `run_on_dataset` method is deprecated and" + " will be removed in a future version." + "Please use the `evaluate` method instead.", + DeprecationWarning, + ) try: from langchain.smith import run_on_dataset as _run_on_dataset except ImportError: @@ -4336,6 +5000,547 @@ def _evaluate_strings( **kwargs, ) + def _current_tenant_is_owner(self, owner: str) -> bool: + """Check if the current workspace has the same handle as owner. + + Args: + owner (str): The owner to check against. + + Returns: + bool: True if the current tenant is the owner, False otherwise. + """ + settings = self._get_settings() + return owner == "-" or settings.tenant_handle == owner + + def _owner_conflict_error( + self, action: str, owner: str + ) -> ls_utils.LangSmithUserError: + return ls_utils.LangSmithUserError( + f"Cannot {action} for another tenant.\n" + f"Current tenant: {self._get_settings().tenant_handle},\n" + f"Requested tenant: {owner}" + ) + + def _get_latest_commit_hash( + self, prompt_owner_and_name: str, limit: int = 1, offset: int = 0 + ) -> Optional[str]: + """Get the latest commit hash for a prompt. + + Args: + prompt_owner_and_name (str): The owner and name of the prompt. + limit (int): The maximum number of commits to fetch. Defaults to 1. + offset (int): The number of commits to skip. Defaults to 0. + + Returns: + Optional[str]: The latest commit hash, or None if no commits are found. + """ + response = self.request_with_retries( + "GET", + f"/commits/{prompt_owner_and_name}/", + params={"limit": limit, "offset": offset}, + ) + commits = response.json()["commits"] + return commits[0]["commit_hash"] if commits else None + + def _like_or_unlike_prompt( + self, prompt_identifier: str, like: bool + ) -> Dict[str, int]: + """Like or unlike a prompt. + + Args: + prompt_identifier (str): The identifier of the prompt. + like (bool): True to like the prompt, False to unlike it. + + Returns: + A dictionary with the key 'likes' and the count of likes as the value. + + Raises: + requests.exceptions.HTTPError: If the prompt is not found or + another error occurs. + """ + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + response = self.request_with_retries( + "POST", f"/likes/{owner}/{prompt_name}", json={"like": like} + ) + response.raise_for_status() + return response.json() + + def _get_prompt_url(self, prompt_identifier: str) -> str: + """Get a URL for a prompt. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + str: The URL for the prompt. + + """ + owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier( + prompt_identifier + ) + + if not self._current_tenant_is_owner(owner): + return f"{self._host_url}/hub/{owner}/{prompt_name}:{commit_hash[:8]}" + + settings = self._get_settings() + return ( + f"{self._host_url}/prompts/{prompt_name}/{commit_hash[:8]}" + f"?organizationId={settings.id}" + ) + + def _prompt_exists(self, prompt_identifier: str) -> bool: + """Check if a prompt exists. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + bool: True if the prompt exists, False otherwise. + """ + prompt = self.get_prompt(prompt_identifier) + return True if prompt else False + + def like_prompt(self, prompt_identifier: str) -> Dict[str, int]: + """Check if a prompt exists. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + A dictionary with the key 'likes' and the count of likes as the value. + + """ + return self._like_or_unlike_prompt(prompt_identifier, like=True) + + def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]: + """Unlike a prompt. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + A dictionary with the key 'likes' and the count of likes as the value. + + """ + return self._like_or_unlike_prompt(prompt_identifier, like=False) + + def list_prompts( + self, + *, + limit: int = 100, + offset: int = 0, + is_public: Optional[bool] = None, + is_archived: Optional[bool] = False, + sort_field: ls_schemas.PromptSortField = ls_schemas.PromptSortField.updated_at, + sort_direction: Literal["desc", "asc"] = "desc", + query: Optional[str] = None, + ) -> ls_schemas.ListPromptsResponse: + """List prompts with pagination. + + Args: + limit (int): The maximum number of prompts to return. Defaults to 100. + offset (int): The number of prompts to skip. Defaults to 0. + is_public (Optional[bool]): Filter prompts by if they are public. + is_archived (Optional[bool]): Filter prompts by if they are archived. + sort_field (ls_schemas.PromptsSortField): The field to sort by. + Defaults to "updated_at". + sort_direction (Literal["desc", "asc"]): The order to sort by. + Defaults to "desc". + query (Optional[str]): Filter prompts by a search query. + + Returns: + ls_schemas.ListPromptsResponse: A response object containing + the list of prompts. + """ + params = { + "limit": limit, + "offset": offset, + "is_public": ( + "true" if is_public else "false" if is_public is not None else None + ), + "is_archived": "true" if is_archived else "false", + "sort_field": sort_field, + "sort_direction": sort_direction, + "query": query, + "match_prefix": "true" if query else None, + } + + response = self.request_with_retries("GET", "/repos/", params=params) + return ls_schemas.ListPromptsResponse(**response.json()) + + def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]: + """Get a specific prompt by its identifier. + + Args: + prompt_identifier (str): The identifier of the prompt. + The identifier should be in the format "prompt_name" or "owner/prompt_name". + + Returns: + Optional[ls_schemas.Prompt]: The prompt object. + + Raises: + requests.exceptions.HTTPError: If the prompt is not found or + another error occurs. + """ + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + try: + response = self.request_with_retries("GET", f"/repos/{owner}/{prompt_name}") + return ls_schemas.Prompt(**response.json()["repo"]) + except ls_utils.LangSmithNotFoundError: + return None + + def create_prompt( + self, + prompt_identifier: str, + *, + description: Optional[str] = None, + readme: Optional[str] = None, + tags: Optional[Sequence[str]] = None, + is_public: bool = False, + ) -> ls_schemas.Prompt: + """Create a new prompt. + + Does not attach prompt object, just creates an empty prompt. + + Args: + prompt_name (str): The name of the prompt. + description (Optional[str]): A description of the prompt. + readme (Optional[str]): A readme for the prompt. + tags (Optional[Sequence[str]]): A list of tags for the prompt. + is_public (bool): Whether the prompt should be public. Defaults to False. + + Returns: + ls_schemas.Prompt: The created prompt object. + + Raises: + ValueError: If the current tenant is not the owner. + HTTPError: If the server request fails. + """ + settings = self._get_settings() + if is_public and not settings.tenant_handle: + raise ls_utils.LangSmithUserError( + "Cannot create a public prompt without first\n" + "creating a LangChain Hub handle. " + "You can add a handle by creating a public prompt at:\n" + "https://smith.langchain.com/prompts" + ) + + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + if not self._current_tenant_is_owner(owner=owner): + raise self._owner_conflict_error("create a prompt", owner) + + json: Dict[str, Union[str, bool, Sequence[str]]] = { + "repo_handle": prompt_name, + "description": description or "", + "readme": readme or "", + "tags": tags or [], + "is_public": is_public, + } + + response = self.request_with_retries("POST", "/repos/", json=json) + response.raise_for_status() + return ls_schemas.Prompt(**response.json()["repo"]) + + def create_commit( + self, + prompt_identifier: str, + object: Any, + *, + parent_commit_hash: Optional[str] = None, + ) -> str: + """Create a commit for an existing prompt. + + Args: + prompt_identifier (str): The identifier of the prompt. + object (Any): The LangChain object to commit. + parent_commit_hash (Optional[str]): The hash of the parent commit. + Defaults to latest commit. + + Returns: + str: The url of the prompt commit. + + Raises: + HTTPError: If the server request fails. + ValueError: If the prompt does not exist. + """ + if not self._prompt_exists(prompt_identifier): + raise ls_utils.LangSmithNotFoundError( + "Prompt does not exist, you must create it first." + ) + + try: + from langchain_core.load.dump import dumps + except ImportError: + raise ImportError( + "The client.create_commit function requires the langchain_core" + "package to run.\nInstall with `pip install langchain_core`" + ) + + json_object = dumps(object) + manifest_dict = json.loads(json_object) + + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + prompt_owner_and_name = f"{owner}/{prompt_name}" + + if parent_commit_hash == "latest" or parent_commit_hash is None: + parent_commit_hash = self._get_latest_commit_hash(prompt_owner_and_name) + + request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} + response = self.request_with_retries( + "POST", f"/commits/{prompt_owner_and_name}", json=request_dict + ) + + commit_hash = response.json()["commit"]["commit_hash"] + + return self._get_prompt_url(f"{prompt_owner_and_name}:{commit_hash}") + + def update_prompt( + self, + prompt_identifier: str, + *, + description: Optional[str] = None, + readme: Optional[str] = None, + tags: Optional[Sequence[str]] = None, + is_public: Optional[bool] = None, + is_archived: Optional[bool] = None, + ) -> Dict[str, Any]: + """Update a prompt's metadata. + + To update the content of a prompt, use push_prompt or create_commit instead. + + Args: + prompt_identifier (str): The identifier of the prompt to update. + description (Optional[str]): New description for the prompt. + readme (Optional[str]): New readme for the prompt. + tags (Optional[Sequence[str]]): New list of tags for the prompt. + is_public (Optional[bool]): New public status for the prompt. + is_archived (Optional[bool]): New archived status for the prompt. + + Returns: + Dict[str, Any]: The updated prompt data as returned by the server. + + Raises: + ValueError: If the prompt_identifier is empty. + HTTPError: If the server request fails. + """ + settings = self._get_settings() + if is_public and not settings.tenant_handle: + raise ValueError( + "Cannot create a public prompt without first\n" + "creating a LangChain Hub handle. " + "You can add a handle by creating a public prompt at:\n" + "https://smith.langchain.com/prompts" + ) + + json: Dict[str, Union[str, bool, Sequence[str]]] = {} + + if description is not None: + json["description"] = description + if readme is not None: + json["readme"] = readme + if is_public is not None: + json["is_public"] = is_public + if is_archived is not None: + json["is_archived"] = is_archived + if tags is not None: + json["tags"] = tags + + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + response = self.request_with_retries( + "PATCH", f"/repos/{owner}/{prompt_name}", json=json + ) + response.raise_for_status() + return response.json() + + def delete_prompt(self, prompt_identifier: str) -> None: + """Delete a prompt. + + Args: + prompt_identifier (str): The identifier of the prompt to delete. + + Returns: + bool: True if the prompt was successfully deleted, False otherwise. + + Raises: + ValueError: If the current tenant is not the owner of the prompt. + """ + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + if not self._current_tenant_is_owner(owner): + raise self._owner_conflict_error("delete a prompt", owner) + + response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") + response.raise_for_status() + + def pull_prompt_commit( + self, + prompt_identifier: str, + *, + include_model: Optional[bool] = False, + ) -> ls_schemas.PromptCommit: + """Pull a prompt object from the LangSmith API. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + ls_schemas.PromptObject: The prompt object. + + Raises: + ValueError: If no commits are found for the prompt. + """ + owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier( + prompt_identifier + ) + use_optimization = ls_utils.is_version_greater_or_equal( + self.info.version, "0.5.23" + ) + + if not use_optimization and commit_hash == "latest": + latest_commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}") + if latest_commit_hash is None: + raise ValueError("No commits found") + else: + commit_hash = latest_commit_hash + + response = self.request_with_retries( + "GET", + ( + f"/commits/{owner}/{prompt_name}/{commit_hash}" + f"{'?include_model=true' if include_model else ''}" + ), + ) + return ls_schemas.PromptCommit( + **{"owner": owner, "repo": prompt_name, **response.json()} + ) + + def pull_prompt( + self, prompt_identifier: str, *, include_model: Optional[bool] = False + ) -> Any: + """Pull a prompt and return it as a LangChain PromptTemplate. + + This method requires `langchain_core`. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + Any: The prompt object in the specified format. + """ + try: + from langchain_core.load.load import loads + from langchain_core.prompts import BasePromptTemplate + from langchain_core.runnables.base import RunnableSequence + except ImportError: + raise ImportError( + "The client.pull_prompt function requires the langchain_core" + "package to run.\nInstall with `pip install langchain_core`" + ) + + prompt_object = self.pull_prompt_commit( + prompt_identifier, include_model=include_model + ) + prompt = loads(json.dumps(prompt_object.manifest)) + + if ( + isinstance(prompt, BasePromptTemplate) + or isinstance(prompt, RunnableSequence) + and isinstance(prompt.first, BasePromptTemplate) + ): + prompt_template = ( + prompt + if isinstance(prompt, BasePromptTemplate) + else ( + prompt.first + if isinstance(prompt, RunnableSequence) + and isinstance(prompt.first, BasePromptTemplate) + else None + ) + ) + if prompt_template is None: + raise ls_utils.LangSmithError( + "Prompt object is not a valid prompt template." + ) + + if prompt_template.metadata is None: + prompt_template.metadata = {} + prompt_template.metadata.update( + { + "lc_hub_owner": prompt_object.owner, + "lc_hub_repo": prompt_object.repo, + "lc_hub_commit_hash": prompt_object.commit_hash, + } + ) + + return prompt + + def push_prompt( + self, + prompt_identifier: str, + *, + object: Optional[Any] = None, + parent_commit_hash: str = "latest", + is_public: bool = False, + description: Optional[str] = None, + readme: Optional[str] = None, + tags: Optional[Sequence[str]] = None, + ) -> str: + """Push a prompt to the LangSmith API. + + Can be used to update prompt metadata or prompt content. + + If the prompt does not exist, it will be created. + If the prompt exists, it will be updated. + + Args: + prompt_identifier (str): The identifier of the prompt. + object (Optional[Any]): The LangChain object to push. + parent_commit_hash (str): The parent commit hash. + Defaults to "latest". + is_public (bool): Whether the prompt should be public. Defaults to False. + description (Optional[str]): A description of the prompt. + Defaults to an empty string. + readme (Optional[str]): A readme for the prompt. + Defaults to an empty string. + tags (Optional[Sequence[str]]): A list of tags for the prompt. + Defaults to an empty list. + + Returns: + str: The URL of the prompt. + + """ + # Create or update prompt metadata + if self._prompt_exists(prompt_identifier): + if any( + param is not None + for param in [parent_commit_hash, is_public, description, readme, tags] + ): + self.update_prompt( + prompt_identifier, + description=description, + readme=readme, + tags=tags, + is_public=is_public, + ) + else: + self.create_prompt( + prompt_identifier, + is_public=is_public, + description=description, + readme=readme, + tags=tags, + ) + + if object is None: + return self._get_prompt_url(prompt_identifier=prompt_identifier) + + # Create a commit with the new manifest + url = self.create_commit( + prompt_identifier, + object, + parent_commit_hash=parent_commit_hash, + ) + return url + def _tracing_thread_drain_queue( tracing_queue: Queue, limit: int = 100, block: bool = True @@ -4483,3 +5688,83 @@ def _tracing_sub_thread_func( tracing_queue, limit=size_limit, block=False ): _tracing_thread_handle_batch(client, tracing_queue, next_batch) + + +def convert_prompt_to_openai_format( + messages: Any, + model_kwargs: Optional[Dict[str, Any]] = None, +) -> dict: + """Convert a prompt to OpenAI format. + + Requires the `langchain_openai` package to be installed. + + Args: + messages (Any): The messages to convert. + model_kwargs (Optional[Dict[str, Any]]): Model configuration arguments including + `stop` and any other required arguments. Defaults to None. + + Returns: + dict: The prompt in OpenAI format. + + Raises: + ImportError: If the `langchain_openai` package is not installed. + ls_utils.LangSmithError: If there is an error during the conversion process. + """ + try: + from langchain_openai import ChatOpenAI + except ImportError: + raise ImportError( + "The convert_prompt_to_openai_format function requires the langchain_openai" + "package to run.\nInstall with `pip install langchain_openai`" + ) + + openai = ChatOpenAI() + + model_kwargs = model_kwargs or {} + stop = model_kwargs.pop("stop", None) + + try: + return openai._get_request_payload(messages, stop=stop, **model_kwargs) + except Exception as e: + raise ls_utils.LangSmithError(f"Error converting to OpenAI format: {e}") + + +def convert_prompt_to_anthropic_format( + messages: Any, + model_kwargs: Optional[Dict[str, Any]] = None, +) -> dict: + """Convert a prompt to Anthropic format. + + Requires the `langchain_anthropic` package to be installed. + + Args: + messages (Any): The messages to convert. + model_kwargs (Optional[Dict[str, Any]]): + Model configuration arguments including `model_name` and `stop`. + Defaults to None. + + Returns: + dict: The prompt in Anthropic format. + """ + try: + from langchain_anthropic import ChatAnthropic + except ImportError: + raise ImportError( + "The convert_prompt_to_anthropic_format function requires the " + "langchain_anthropic package to run.\n" + "Install with `pip install langchain_anthropic`" + ) + + model_kwargs = model_kwargs or {} + model_name = model_kwargs.pop("model_name", "claude-3-haiku-20240307") + stop = model_kwargs.pop("stop", None) + timeout = model_kwargs.pop("timeout", None) + + anthropic = ChatAnthropic( + model_name=model_name, timeout=timeout, stop=stop, **model_kwargs + ) + + try: + return anthropic._get_request_payload(messages, stop=stop) + except Exception as e: + raise ls_utils.LangSmithError(f"Error converting to Anthropic format: {e}") diff --git a/python/langsmith/env/_git.py b/python/langsmith/env/_git.py index 705f53c1e..ce598285f 100644 --- a/python/langsmith/env/_git.py +++ b/python/langsmith/env/_git.py @@ -47,7 +47,7 @@ def get_git_info(remote: str = "origin") -> GitInfo: dirty=None, tags=None, repo_name=None, - ) + ) return { "remote_url": exec_git(["remote", "get-url", remote]), diff --git a/python/langsmith/env/_runtime_env.py b/python/langsmith/env/_runtime_env.py index 5646263c2..354f0eca1 100644 --- a/python/langsmith/env/_runtime_env.py +++ b/python/langsmith/env/_runtime_env.py @@ -1,4 +1,5 @@ """Environment information.""" + import functools import logging import os @@ -77,6 +78,7 @@ def get_runtime_environment() -> dict: "py_implementation": platform.python_implementation(), "runtime_version": platform.python_version(), "langchain_version": get_langchain_environment(), + "langchain_core_version": get_langchain_core_version(), **shas, } @@ -91,6 +93,16 @@ def get_langchain_environment() -> Optional[str]: return None +@functools.lru_cache(maxsize=1) +def get_langchain_core_version() -> Optional[str]: + try: + import langchain_core # type: ignore + + return langchain_core.__version__ + except ImportError: + return None + + @functools.lru_cache(maxsize=1) def get_docker_version() -> Optional[str]: import subprocess @@ -138,9 +150,9 @@ def get_docker_environment() -> dict: compose_command = _get_compose_command() return { "docker_version": get_docker_version(), - "docker_compose_command": " ".join(compose_command) - if compose_command is not None - else None, + "docker_compose_command": ( + " ".join(compose_command) if compose_command is not None else None + ), "docker_compose_version": get_docker_compose_version(), } @@ -164,6 +176,7 @@ def get_langchain_env_var_metadata() -> dict: "LANGCHAIN_TRACING_V2", "LANGCHAIN_PROJECT", "LANGCHAIN_SESSION", + "LANGSMITH_RUNS_ENDPOINTS", } langchain_metadata = { k: v @@ -187,7 +200,7 @@ def get_langchain_env_var_metadata() -> dict: def _get_default_revision_id() -> Optional[str]: """Get the default revision ID based on `git describe`.""" try: - return exec_git(["describe", "--tags", "--dirty"]) + return exec_git(["describe", "--tags", "--always", "--dirty"]) except BaseException: return None diff --git a/python/langsmith/evaluation/_arunner.py b/python/langsmith/evaluation/_arunner.py index eb344fdbe..7cc50bffa 100644 --- a/python/langsmith/evaluation/_arunner.py +++ b/python/langsmith/evaluation/_arunner.py @@ -17,6 +17,7 @@ List, Optional, Sequence, + TypeVar, Union, cast, ) @@ -26,7 +27,6 @@ from langsmith import run_trees, schemas from langsmith import utils as ls_utils from langsmith._internal import _aiter as aitertools -from langsmith.beta import warn_beta from langsmith.evaluation._runner import ( AEVALUATOR_T, DATA_T, @@ -35,6 +35,7 @@ ExperimentResultRow, _ExperimentManagerMixin, _ForwardResults, + _load_examples_map, _load_experiment, _load_tqdm, _load_traces, @@ -50,7 +51,6 @@ ATARGET_T = Callable[[dict], Awaitable[dict]] -@warn_beta async def aevaluate( target: Union[ATARGET_T, AsyncIterable[dict]], /, @@ -61,6 +61,7 @@ async def aevaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, + num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, ) -> AsyncExperimentResults: @@ -81,6 +82,9 @@ async def aevaluate( description (Optional[str]): A description of the experiment. max_concurrency (Optional[int]): The maximum number of concurrent evaluations to run. Defaults to None. + num_repetitions (int): The number of times to run the evaluation. + Each item in the dataset will be run and evaluated this many times. + Defaults to 1. client (Optional[langsmith.Client]): The LangSmith client to use. Defaults to None. blocking (bool): Whether to block until the evaluation is complete. @@ -225,12 +229,12 @@ async def aevaluate( experiment_prefix=experiment_prefix, description=description, max_concurrency=max_concurrency, + num_repetitions=num_repetitions, client=client, blocking=blocking, ) -@warn_beta async def aevaluate_existing( experiment: Union[str, uuid.UUID], /, @@ -310,17 +314,12 @@ async def aevaluate_existing( """ # noqa: E501 client = client or langsmith.Client() - project = _load_experiment(experiment, client) - runs = _load_traces(experiment, client, load_nested=load_nested) - data = [ - example - for example in client.list_examples( - dataset_id=project.reference_dataset_id, - as_of=project.metadata.get("dataset_version"), - ) - ] - runs = sorted(runs, key=lambda r: str(r.reference_example_id)) - data = sorted(data, key=lambda d: str(d.id)) + project = await aitertools.aio_to_thread(_load_experiment, experiment, client) + runs = await aitertools.aio_to_thread( + _load_traces, experiment, client, load_nested=load_nested + ) + data_map = await aitertools.aio_to_thread(_load_examples_map, client, project) + data = [data_map[run.reference_example_id] for run in runs] return await _aevaluate( runs, data=data, @@ -343,6 +342,7 @@ async def _aevaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, + num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, experiment: Optional[schemas.TracerSession] = None, @@ -352,7 +352,8 @@ async def _aevaluate( ) client = client or langsmith.Client() runs = None if is_async_target else cast(Iterable[schemas.Run], target) - experiment_, runs = _resolve_experiment( + experiment_, runs = await aitertools.aio_to_thread( + _resolve_experiment, experiment, runs, client, @@ -363,6 +364,7 @@ async def _aevaluate( metadata=metadata, experiment=experiment_ or experiment_prefix, description=description, + num_repetitions=num_repetitions, runs=runs, ).astart() cache_dir = ls_utils.get_cache_dir(None) @@ -423,6 +425,7 @@ def __init__( evaluation_results: Optional[AsyncIterable[EvaluationResults]] = None, summary_results: Optional[AsyncIterable[EvaluationResults]] = None, description: Optional[str] = None, + num_repetitions: int = 1, ): super().__init__( experiment=experiment, @@ -437,10 +440,16 @@ def __init__( ) self._evaluation_results = evaluation_results self._summary_results = summary_results + self._num_repetitions = num_repetitions - def aget_examples(self) -> AsyncIterator[schemas.Example]: + async def aget_examples(self) -> AsyncIterator[schemas.Example]: if self._examples is None: self._examples = _aresolve_data(self._data, client=self.client) + if self._num_repetitions > 1: + self._examples = async_chain_from_iterable( + aitertools.atee(self._examples, self._num_repetitions) + ) + self._examples, examples_iter = aitertools.atee( aitertools.ensure_async_iterator(self._examples), 2, lock=asyncio.Lock() ) @@ -450,7 +459,7 @@ async def get_dataset_id(self) -> str: if self._experiment is None or not getattr( self._experiment, "reference_dataset_id", None ): - example = await aitertools.py_anext(self.aget_examples()) + example = await aitertools.py_anext(await self.aget_examples()) if example is None: raise ValueError("No examples found in the dataset.") return str(example.dataset_id) @@ -467,7 +476,7 @@ async def aget_runs(self) -> AsyncIterator[schemas.Run]: async def aget_evaluation_results(self) -> AsyncIterator[EvaluationResults]: if self._evaluation_results is None: - async for _ in self.aget_examples(): + async for _ in await self.aget_examples(): yield {"results": []} else: self._evaluation_results, evaluation_results = aitertools.atee( @@ -479,13 +488,14 @@ async def aget_evaluation_results(self) -> AsyncIterator[EvaluationResults]: yield result async def astart(self) -> _AsyncExperimentManager: - first_example = await aitertools.py_anext(self.aget_examples()) + first_example = await aitertools.py_anext(await self.aget_examples()) if not first_example: raise ValueError("No examples found in the dataset.") project = self._get_project(first_example) self._print_experiment_start(project, first_example) + self._metadata["num_repetitions"] = self._num_repetitions return self.__class__( - self.aget_examples(), + await self.aget_examples(), experiment=project, metadata=self._metadata, client=self.client, @@ -535,7 +545,7 @@ async def awith_summary_evaluators( wrapped_evaluators = _wrap_summary_evaluators(summary_evaluators) aggregate_feedback_gen = self._aapply_summary_evaluators(wrapped_evaluators) return _AsyncExperimentManager( - self.aget_examples(), + await self.aget_examples(), experiment=self._experiment, metadata=self._metadata, client=self.client, @@ -546,7 +556,7 @@ async def awith_summary_evaluators( async def aget_results(self) -> AsyncIterator[ExperimentResultRow]: async for run, example, evaluation_results in aitertools.async_zip( - self.aget_runs(), self.aget_examples(), self.aget_evaluation_results() + self.aget_runs(), await self.aget_examples(), self.aget_evaluation_results() ): yield ExperimentResultRow( run=run, @@ -573,7 +583,7 @@ async def _apredict( fn = _ensure_async_traceable(target) async def predict_all(): - async for example in self.aget_examples(): + async for example in await self.aget_examples(): # Yield the coroutine to be awaited later yield _aforward( fn, example, self.experiment_name, self._metadata, self.client @@ -612,7 +622,12 @@ async def _arun_evaluators( **{"experiment": self.experiment_name}, } with rh.tracing_context( - **{**current_context, "project_name": "evaluators", "metadata": metadata} + **{ + **current_context, + "project_name": "evaluators", + "metadata": metadata, + "enabled": True, + } ): run = current_results["run"] example = current_results["example"] @@ -645,7 +660,7 @@ async def _aapply_summary_evaluators( self, summary_evaluators: Sequence[SUMMARY_EVALUATOR_T] ) -> AsyncIterator[EvaluationResults]: runs, examples = [], [] - async_examples = aitertools.ensure_async_iterator(self.aget_examples()) + async_examples = aitertools.ensure_async_iterator(await self.aget_examples()) async for run, example in aitertools.async_zip( self.aget_runs(), async_examples ): @@ -666,11 +681,11 @@ async def _aapply_summary_evaluators( **current_context, "project_name": "evaluators", "metadata": metadata, + "enabled": True, } ): for evaluator in summary_evaluators: try: - # TODO: Support async evaluators summary_eval_result = evaluator(runs, examples) flattened_results = self.client._select_eval_results( summary_eval_result, @@ -680,7 +695,8 @@ async def _aapply_summary_evaluators( for result in flattened_results: feedback = result.dict(exclude={"target_run_id"}) evaluator_info = feedback.pop("evaluator_info", None) - self.client.create_feedback( + await aitertools.aio_to_thread( + self.client.create_feedback, **feedback, run_id=None, project_id=project_id, @@ -694,7 +710,7 @@ async def _aapply_summary_evaluators( async def _get_dataset_version(self) -> Optional[str]: modified_at = [] - async for example in self.aget_examples(): + async for example in await self.aget_examples(): if example.modified_at: # Should always be defined in practice when fetched, # but the typing permits None @@ -703,6 +719,22 @@ async def _get_dataset_version(self) -> Optional[str]: max_modified_at = max(modified_at) if modified_at else None return max_modified_at.isoformat() if max_modified_at else None + async def _get_dataset_splits(self) -> Optional[list[str]]: + splits = set() + async for example in await self.aget_examples(): + if ( + example.metadata + and example.metadata.get("dataset_split") + and isinstance(example.metadata["dataset_split"], list) + ): + for split in example.metadata["dataset_split"]: + if isinstance(split, str): + splits.add(split) + else: + splits.add("base") + + return list(splits) + async def _aend(self) -> None: experiment = self._experiment if experiment is None: @@ -710,6 +742,7 @@ async def _aend(self) -> None: project_metadata = self._get_experiment_metadata() project_metadata["dataset_version"] = await self._get_dataset_version() + project_metadata["dataset_splits"] = await self._get_dataset_splits() self.client.update_project( experiment.id, end_time=datetime.datetime.now(datetime.timezone.utc), @@ -768,7 +801,7 @@ async def wait(self) -> None: async def _aforward( - fn: rh.SupportsLangsmithExtra[Awaitable], + fn: rh.SupportsLangsmithExtra[[dict], Awaitable], example: schemas.Example, experiment_name: str, metadata: dict, @@ -780,33 +813,36 @@ def _get_run(r: run_trees.RunTree) -> None: nonlocal run run = r - try: - await fn( - example.inputs, - langsmith_extra=rh.LangSmithExtra( - reference_example_id=example.id, - on_end=_get_run, - project_name=experiment_name, - metadata={ - **metadata, - "example_version": ( - example.modified_at.isoformat() - if example.modified_at - else example.created_at.isoformat() - ), - }, - client=client, - ), + with rh.tracing_context(enabled=True): + try: + await fn( + example.inputs, + langsmith_extra=rh.LangSmithExtra( + reference_example_id=example.id, + on_end=_get_run, + project_name=experiment_name, + metadata={ + **metadata, + "example_version": ( + example.modified_at.isoformat() + if example.modified_at + else example.created_at.isoformat() + ), + }, + client=client, + ), + ) + except Exception as e: + logger.error(f"Error running target function: {e}") + return _ForwardResults( + run=cast(schemas.Run, run), + example=example, ) - except Exception as e: - logger.error(f"Error running target function: {e}") - return _ForwardResults( - run=cast(schemas.Run, run), - example=example, - ) -def _ensure_async_traceable(target: ATARGET_T) -> rh.SupportsLangsmithExtra[Awaitable]: +def _ensure_async_traceable( + target: ATARGET_T, +) -> rh.SupportsLangsmithExtra[[dict], Awaitable]: if not asyncio.iscoroutinefunction(target): raise ValueError( "Target must be an async function. For sync functions, use evaluate." @@ -823,3 +859,15 @@ def _aresolve_data( if isinstance(data, AsyncIterable): return aitertools.ensure_async_iterator(data) return aitertools.ensure_async_iterator(_resolve_data(data, client=client)) + + +T = TypeVar("T") + + +async def async_chain_from_iterable( + iterable: Iterable[AsyncIterable[T]], +) -> AsyncIterator[T]: + """Chain multiple async iterables.""" + for sub_iterable in iterable: + async for item in sub_iterable: + yield item diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index 3c07ed165..f85dcc482 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -55,14 +55,23 @@ DATA_T = Union[str, uuid.UUID, Iterable[schemas.Example]] # Summary evaluator runs over the whole dataset # and reports aggregate metric(s) -SUMMARY_EVALUATOR_T = Callable[ - [Sequence[schemas.Run], Sequence[schemas.Example]], - Union[EvaluationResult, EvaluationResults], +SUMMARY_EVALUATOR_T = Union[ + Callable[ + [Sequence[schemas.Run], Sequence[schemas.Example]], + Union[EvaluationResult, EvaluationResults], + ], + Callable[ + [List[schemas.Run], List[schemas.Example]], + Union[EvaluationResult, EvaluationResults], + ], ] # Row-level evaluator EVALUATOR_T = Union[ RunEvaluator, - Callable[[schemas.Run, Optional[schemas.Example]], EvaluationResult], + Callable[ + [schemas.Run, Optional[schemas.Example]], + Union[EvaluationResult, EvaluationResults], + ], ] AEVALUATOR_T = Union[ Callable[ @@ -82,6 +91,7 @@ def evaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, + num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, ) -> ExperimentResults: @@ -106,6 +116,9 @@ def evaluate( Defaults to None. blocking (bool): Whether to block until the evaluation is complete. Defaults to True. + num_repetitions (int): The number of times to run the evaluation. + Each item in the dataset will be run and evaluated this many times. + Defaults to 1. Returns: ExperimentResults: The results of the evaluation. @@ -186,6 +199,7 @@ def evaluate( Using the `evaluate` API with an off-the-shelf LangChain evaluator: >>> from langsmith.evaluation import LangChainStringEvaluator + >>> from langchain_openai import ChatOpenAI >>> def prepare_criteria_data(run: Run, example: Example): ... return { ... "prediction": run.outputs["output"], @@ -205,6 +219,7 @@ def evaluate( ... "usefulness": "The prediction is useful if it is correct" ... " and/or asks a useful followup question." ... }, + ... "llm": ChatOpenAI(model="gpt-4o"), ... }, ... prepare_data=prepare_criteria_data, ... ), @@ -241,6 +256,7 @@ def evaluate( experiment_prefix=experiment_prefix, description=description, max_concurrency=max_concurrency, + num_repetitions=num_repetitions, client=client, blocking=blocking, ) @@ -321,14 +337,8 @@ def evaluate_existing( client = client or langsmith.Client() project = _load_experiment(experiment, client) runs = _load_traces(experiment, client, load_nested=load_nested) - data = list( - client.list_examples( - dataset_id=project.reference_dataset_id, - as_of=project.metadata.get("dataset_version"), - ) - ) - runs = sorted(runs, key=lambda r: str(r.reference_example_id)) - data = sorted(data, key=lambda d: str(d.id)) + data_map = _load_examples_map(client, project) + data = [data_map[cast(uuid.UUID, run.reference_example_id)] for run in runs] return _evaluate( runs, data=data, @@ -338,6 +348,7 @@ def evaluate_existing( max_concurrency=max_concurrency, client=client, blocking=blocking, + experiment=project, ) @@ -513,6 +524,8 @@ def evaluate_comparative( View the evaluation results for experiment:... >>> results_1.wait() >>> results_2.wait() + >>> import time + >>> time.sleep(10) # Wait for the traces to be fully processed Finally, you would compare the two prompts directly: >>> import json @@ -678,7 +691,9 @@ def evaluate_and_submit_feedback( return result tqdm = _load_tqdm() - with cf.ThreadPoolExecutor(max_workers=max_concurrency or 1) as executor: + with ls_utils.ContextThreadPoolExecutor( + max_workers=max_concurrency or 1 + ) as executor: futures = [] for example_id, runs_list in tqdm(runs_dict.items()): results[example_id] = { @@ -766,6 +781,7 @@ def _evaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, + num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, experiment: Optional[schemas.TracerSession] = None, @@ -785,6 +801,7 @@ def _evaluate( metadata=metadata, experiment=experiment_ or experiment_prefix, description=description, + num_repetitions=num_repetitions, # If provided, we don't need to create a new experiment. runs=runs, # Create or resolve the experiment. @@ -857,6 +874,18 @@ def _load_traces( return results +def _load_examples_map( + client: langsmith.Client, project: schemas.TracerSession +) -> Dict[uuid.UUID, schemas.Example]: + return { + e.id: e + for e in client.list_examples( + dataset_id=project.reference_dataset_id, + as_of=project.metadata.get("dataset_version"), + ) + } + + IT = TypeVar("IT") @@ -981,6 +1010,7 @@ class _ExperimentManager(_ExperimentManagerMixin): Args: data (DATA_T): The data used for the experiment. Can be a dataset name or ID OR a generator of examples. + num_repetitions (int): The number of times to run over the data. runs (Optional[Iterable[schemas.Run]]): The runs associated with the experiment predictions. experiment (Optional[schemas.TracerSession]): The tracer session @@ -1006,6 +1036,7 @@ def __init__( evaluation_results: Optional[Iterable[EvaluationResults]] = None, summary_results: Optional[Iterable[EvaluationResults]] = None, description: Optional[str] = None, + num_repetitions: int = 1, ): super().__init__( experiment=experiment, @@ -1018,11 +1049,16 @@ def __init__( self._runs = runs self._evaluation_results = evaluation_results self._summary_results = summary_results + self._num_repetitions = num_repetitions @property def examples(self) -> Iterable[schemas.Example]: if self._examples is None: self._examples = _resolve_data(self._data, client=self.client) + if self._num_repetitions > 1: + self._examples = itertools.chain.from_iterable( + itertools.tee(self._examples, self._num_repetitions) + ) self._examples, examples_iter = itertools.tee(self._examples) return examples_iter @@ -1056,6 +1092,7 @@ def start(self) -> _ExperimentManager: first_example = next(itertools.islice(self.examples, 1)) project = self._get_project(first_example) self._print_experiment_start(project, first_example) + self._metadata["num_repetitions"] = self._num_repetitions return self.__class__( self.examples, experiment=project, @@ -1174,7 +1211,7 @@ def _predict( ) else: - with cf.ThreadPoolExecutor(max_concurrency) as executor: + with ls_utils.ContextThreadPoolExecutor(max_concurrency) as executor: futures = [ executor.submit( _forward, @@ -1206,7 +1243,12 @@ def _run_evaluators( }, } with rh.tracing_context( - **{**current_context, "project_name": "evaluators", "metadata": metadata} + **{ + **current_context, + "project_name": "evaluators", + "metadata": metadata, + "enabled": True, + } ): run = current_results["run"] example = current_results["example"] @@ -1247,10 +1289,13 @@ def _score( (e.g. from a previous prediction step) """ if max_concurrency == 0: + context = copy_context() for current_results in self.get_results(): - yield self._run_evaluators(evaluators, current_results) + yield context.run(self._run_evaluators, evaluators, current_results) else: - with cf.ThreadPoolExecutor(max_workers=max_concurrency) as executor: + with ls_utils.ContextThreadPoolExecutor( + max_workers=max_concurrency + ) as executor: futures = [] for current_results in self.get_results(): futures.append( @@ -1272,7 +1317,7 @@ def _apply_summary_evaluators( runs.append(run) examples.append(example) aggregate_feedback = [] - with cf.ThreadPoolExecutor() as executor: + with ls_utils.ContextThreadPoolExecutor() as executor: project_id = self._get_experiment().id current_context = rh.get_tracing_context() metadata = { @@ -1322,6 +1367,23 @@ def _get_dataset_version(self) -> Optional[str]: max_modified_at = max(modified_at) if modified_at else None return max_modified_at.isoformat() if max_modified_at else None + def _get_dataset_splits(self) -> Optional[list[str]]: + examples = list(self.examples) + splits = set() + for example in examples: + if ( + example.metadata + and example.metadata.get("dataset_split") + and isinstance(example.metadata["dataset_split"], list) + ): + for split in example.metadata["dataset_split"]: + if isinstance(split, str): + splits.add(split) + else: + splits.add("base") + + return list(splits) + def _end(self) -> None: experiment = self._experiment if experiment is None: @@ -1329,6 +1391,7 @@ def _end(self) -> None: project_metadata = self._get_experiment_metadata() project_metadata["dataset_version"] = self._get_dataset_version() + project_metadata["dataset_splits"] = self._get_dataset_splits() self.client.update_project( experiment.id, end_time=datetime.datetime.now(datetime.timezone.utc), @@ -1364,7 +1427,7 @@ def _wrapper_inner( def _wrapper_super_inner( runs_: str, examples_: str ) -> Union[EvaluationResult, EvaluationResults]: - return evaluator(runs, examples) + return evaluator(list(runs), list(examples)) return _wrapper_super_inner( f"Runs[] (Length={len(runs)})", f"Examples[] (Length={len(examples)})" @@ -1396,30 +1459,31 @@ def _get_run(r: run_trees.RunTree) -> None: nonlocal run run = r - try: - fn( - example.inputs, - langsmith_extra=rh.LangSmithExtra( - reference_example_id=example.id, - on_end=_get_run, - project_name=experiment_name, - metadata={ - **metadata, - "example_version": ( - example.modified_at.isoformat() - if example.modified_at - else example.created_at.isoformat() - ), - }, - client=client, - ), + with rh.tracing_context(enabled=True): + try: + fn( + example.inputs, + langsmith_extra=rh.LangSmithExtra( + reference_example_id=example.id, + on_end=_get_run, + project_name=experiment_name, + metadata={ + **metadata, + "example_version": ( + example.modified_at.isoformat() + if example.modified_at + else example.created_at.isoformat() + ), + }, + client=client, + ), + ) + except Exception as e: + logger.error(f"Error running target function: {e}") + return _ForwardResults( + run=cast(schemas.Run, run), + example=example, ) - except Exception as e: - logger.error(f"Error running target function: {e}") - return _ForwardResults( - run=cast(schemas.Run, run), - example=example, - ) def _resolve_data( @@ -1433,12 +1497,14 @@ def _resolve_data( return data -def _ensure_traceable(target: TARGET_T) -> rh.SupportsLangsmithExtra: +def _ensure_traceable( + target: TARGET_T | rh.SupportsLangsmithExtra[[dict], dict], +) -> rh.SupportsLangsmithExtra[[dict], dict]: """Ensure the target function is traceable.""" if not callable(target): raise ValueError("Target must be a callable function.") if rh.is_traceable_function(target): - fn = cast(rh.SupportsLangsmithExtra, target) + fn = target else: fn = rh.traceable(name="Target")(target) return fn @@ -1455,7 +1521,7 @@ def _resolve_experiment( if experiment is not None: if not experiment.name: raise ValueError("Experiment name must be defined if provided.") - return experiment, None + return experiment, runs # If we have runs, that means the experiment was already started. if runs is not None: if runs is not None: diff --git a/python/langsmith/evaluation/evaluator.py b/python/langsmith/evaluation/evaluator.py index 79c700feb..47797e646 100644 --- a/python/langsmith/evaluation/evaluator.py +++ b/python/langsmith/evaluation/evaluator.py @@ -22,7 +22,7 @@ try: from pydantic.v1 import BaseModel, Field, ValidationError # type: ignore[import] except ImportError: - from pydantic import BaseModel, Field, ValidationError + from pydantic import BaseModel, Field, ValidationError # type: ignore[assignment] from functools import wraps @@ -181,9 +181,8 @@ def __init__( self.afunc = run_helpers.ensure_traceable(func) self._name = getattr(func, "__name__", "DynamicRunEvaluator") else: - self.func = cast( - run_helpers.SupportsLangsmithExtra[_RUNNABLE_OUTPUT], - run_helpers.ensure_traceable(func), + self.func = run_helpers.ensure_traceable( + cast(Callable[[Run, Optional[Example]], _RUNNABLE_OUTPUT], func) ) self._name = getattr(func, "__name__", "DynamicRunEvaluator") @@ -383,9 +382,14 @@ def __init__( self.afunc = run_helpers.ensure_traceable(func) self._name = getattr(func, "__name__", "DynamicRunEvaluator") else: - self.func = cast( - run_helpers.SupportsLangsmithExtra[_COMPARISON_OUTPUT], - run_helpers.ensure_traceable(func), + self.func = run_helpers.ensure_traceable( + cast( + Callable[ + [Sequence[Run], Optional[Example]], + _COMPARISON_OUTPUT, + ], + func, + ) ) self._name = getattr(func, "__name__", "DynamicRunEvaluator") diff --git a/python/langsmith/evaluation/integrations/_langchain.py b/python/langsmith/evaluation/integrations/_langchain.py index 510e79c12..9478ef653 100644 --- a/python/langsmith/evaluation/integrations/_langchain.py +++ b/python/langsmith/evaluation/integrations/_langchain.py @@ -44,6 +44,7 @@ class LangChainStringEvaluator: Converting a LangChainStringEvaluator to a RunEvaluator: >>> from langsmith.evaluation import LangChainStringEvaluator + >>> from langchain_openai import ChatOpenAI >>> evaluator = LangChainStringEvaluator( ... "criteria", ... config={ @@ -51,6 +52,7 @@ class LangChainStringEvaluator: ... "usefulness": "The prediction is useful if" ... " it is correct and/or asks a useful followup question." ... }, + ... "llm": ChatOpenAI(model="gpt-4o"), ... }, ... ) >>> run_evaluator = evaluator.as_run_evaluator() @@ -111,6 +113,7 @@ class LangChainStringEvaluator: ... "accuracy": "Score 1: Completely inaccurate\nScore 5: Somewhat accurate\nScore 10: Completely accurate" ... }, ... "normalize_by": 10, + ... "llm": ChatAnthropic(model="claude-3-opus-20240229"), ... }, ... prepare_data=prepare_data, ... ) diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py new file mode 100644 index 000000000..3ae7b333c --- /dev/null +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -0,0 +1,292 @@ +"""Contains the LLMEvaluator class for building LLM-as-a-judge evaluators.""" + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast + +from pydantic import BaseModel + +from langsmith._internal._beta_decorator import warn_beta +from langsmith.evaluation import EvaluationResult, EvaluationResults, RunEvaluator +from langsmith.schemas import Example, Run + + +class CategoricalScoreConfig(BaseModel): + """Configuration for a categorical score.""" + + key: str + choices: List[str] + description: str + include_explanation: bool = False + explanation_description: Optional[str] = None + + +class ContinuousScoreConfig(BaseModel): + """Configuration for a continuous score.""" + + key: str + min: float = 0 + max: float = 1 + description: str + include_explanation: bool = False + explanation_description: Optional[str] = None + + +def _create_score_json_schema( + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], +) -> dict: + properties: Dict[str, Any] = {} + if isinstance(score_config, CategoricalScoreConfig): + properties["score"] = { + "type": "string", + "enum": score_config.choices, + "description": f"The score for the evaluation, one of " + f"{', '.join(score_config.choices)}.", + } + elif isinstance(score_config, ContinuousScoreConfig): + properties["score"] = { + "type": "number", + "minimum": score_config.min, + "maximum": score_config.max, + "description": f"The score for the evaluation, between " + f"{score_config.min} and {score_config.max}, inclusive.", + } + else: + raise ValueError("Invalid score type. Must be 'categorical' or 'continuous'") + + if score_config.include_explanation: + properties["explanation"] = { + "type": "string", + "description": ( + "The explanation for the score." + if score_config.explanation_description is None + else score_config.explanation_description + ), + } + + return { + "title": score_config.key, + "description": score_config.description, + "type": "object", + "properties": properties, + "required": ( + ["score", "explanation"] if score_config.include_explanation else ["score"] + ), + } + + +class LLMEvaluator(RunEvaluator): + """A class for building LLM-as-a-judge evaluators.""" + + def __init__( + self, + *, + prompt_template: Union[str, List[Tuple[str, str]]], + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], + map_variables: Optional[Callable[[Run, Optional[Example]], dict]] = None, + model_name: str = "gpt-4o", + model_provider: str = "openai", + **kwargs, + ): + """Initialize the LLMEvaluator. + + Args: + prompt_template (Union[str, List[Tuple[str, str]]): The prompt + template to use for the evaluation. If a string is provided, it is + assumed to be a human / user message. + score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]): + The configuration for the score, either categorical or continuous. + map_variables (Optional[Callable[[Run, Example], dict]], optional): + A function that maps the run and example to the variables in the + prompt. Defaults to None. If None, it is assumed that the prompt + only requires 'input', 'output', and 'expected'. + model_name (Optional[str], optional): The model to use for the evaluation. + Defaults to "gpt-4o". + model_provider (Optional[str], optional): The model provider to use + for the evaluation. Defaults to "openai". + """ + try: + from langchain.chat_models import init_chat_model + except ImportError as e: + raise ImportError( + "LLMEvaluator requires langchain to be installed. " + "Please install langchain by running `pip install langchain`." + ) from e + + chat_model = init_chat_model( + model=model_name, model_provider=model_provider, **kwargs + ) + + self._initialize(prompt_template, score_config, map_variables, chat_model) + + @classmethod + def from_model( + cls, + model: Any, + *, + prompt_template: Union[str, List[Tuple[str, str]]], + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], + map_variables: Optional[Callable[[Run, Optional[Example]], dict]] = None, + ): + """Create an LLMEvaluator instance from a BaseChatModel instance. + + Args: + model (BaseChatModel): The chat model instance to use for the evaluation. + prompt_template (Union[str, List[Tuple[str, str]]): The prompt + template to use for the evaluation. If a string is provided, it is + assumed to be a system message. + score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]): + The configuration for the score, either categorical or continuous. + map_variables (Optional[Callable[[Run, Example]], dict]], optional): + A function that maps the run and example to the variables in the + prompt. Defaults to None. If None, it is assumed that the prompt + only requires 'input', 'output', and 'expected'. + + Returns: + LLMEvaluator: An instance of LLMEvaluator. + """ + instance = cls.__new__(cls) + instance._initialize(prompt_template, score_config, map_variables, model) + return instance + + def _initialize( + self, + prompt_template: Union[str, List[Tuple[str, str]]], + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], + map_variables: Optional[Callable[[Run, Optional[Example]], dict]], + chat_model: Any, + ): + """Shared initialization code for __init__ and from_model. + + Args: + prompt_template (Union[str, List[Tuple[str, str]]): The prompt template. + score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]): + The score configuration. + map_variables (Optional[Callable[[Run, Example]], dict]]): + Function to map variables. + chat_model (BaseChatModel): The chat model instance. + """ + try: + from langchain_core.language_models.chat_models import BaseChatModel + from langchain_core.prompts import ChatPromptTemplate + except ImportError as e: + raise ImportError( + "LLMEvaluator requires langchain-core to be installed. " + "Please install langchain-core by running `pip install langchain-core`." + ) from e + + if not ( + isinstance(chat_model, BaseChatModel) + and hasattr(chat_model, "with_structured_output") + ): + raise ValueError( + "chat_model must be an instance of " + "BaseLanguageModel and support structured output." + ) + + if isinstance(prompt_template, str): + self.prompt = ChatPromptTemplate.from_messages([("human", prompt_template)]) + else: + self.prompt = ChatPromptTemplate.from_messages(prompt_template) + + if set(self.prompt.input_variables) - {"input", "output", "expected"}: + if not map_variables: + raise ValueError( + "map_inputs must be provided if the prompt template contains " + "variables other than 'input', 'output', and 'expected'" + ) + self.map_variables = map_variables + + self.score_config = score_config + self.score_schema = _create_score_json_schema(self.score_config) + + chat_model = chat_model.with_structured_output(self.score_schema) + self.runnable = self.prompt | chat_model + + @warn_beta + def evaluate_run( + self, run: Run, example: Optional[Example] = None + ) -> Union[EvaluationResult, EvaluationResults]: + """Evaluate a run.""" + variables = self._prepare_variables(run, example) + output: dict = cast(dict, self.runnable.invoke(variables)) + return self._parse_output(output) + + @warn_beta + async def aevaluate_run( + self, run: Run, example: Optional[Example] = None + ) -> Union[EvaluationResult, EvaluationResults]: + """Asynchronously evaluate a run.""" + variables = self._prepare_variables(run, example) + output: dict = cast(dict, await self.runnable.ainvoke(variables)) + return self._parse_output(output) + + def _prepare_variables(self, run: Run, example: Optional[Example]) -> dict: + """Prepare variables for model invocation.""" + if self.map_variables: + return self.map_variables(run, example) + + variables = {} + if "input" in self.prompt.input_variables: + if len(run.inputs) == 0: + raise ValueError( + "No input keys are present in run.inputs but the prompt " + "requires 'input'." + ) + if len(run.inputs) != 1: + raise ValueError( + "Multiple input keys are present in run.inputs. Please provide " + "a map_variables function." + ) + variables["input"] = list(run.inputs.values())[0] + + if "output" in self.prompt.input_variables: + if not run.outputs: + raise ValueError( + "No output keys are present in run.outputs but the prompt " + "requires 'output'." + ) + if len(run.outputs) == 0: + raise ValueError( + "No output keys are present in run.outputs but the prompt " + "requires 'output'." + ) + if len(run.outputs) != 1: + raise ValueError( + "Multiple output keys are present in run.outputs. Please " + "provide a map_variables function." + ) + variables["output"] = list(run.outputs.values())[0] + + if "expected" in self.prompt.input_variables: + if not example or not example.outputs: + raise ValueError( + "No example or example outputs is provided but the prompt " + "requires 'expected'." + ) + if len(example.outputs) == 0: + raise ValueError( + "No output keys are present in example.outputs but the prompt " + "requires 'expected'." + ) + if len(example.outputs) != 1: + raise ValueError( + "Multiple output keys are present in example.outputs. Please " + "provide a map_variables function." + ) + variables["expected"] = list(example.outputs.values())[0] + + return variables + + def _parse_output(self, output: dict) -> Union[EvaluationResult, EvaluationResults]: + """Parse the model output into an evaluation result.""" + if isinstance(self.score_config, CategoricalScoreConfig): + value = output["score"] + explanation = output.get("explanation", None) + return EvaluationResult( + key=self.score_config.key, value=value, comment=explanation + ) + elif isinstance(self.score_config, ContinuousScoreConfig): + score = output["score"] + explanation = output.get("explanation", None) + return EvaluationResult( + key=self.score_config.key, score=score, comment=explanation + ) diff --git a/python/langsmith/evaluation/string_evaluator.py b/python/langsmith/evaluation/string_evaluator.py index 749795604..423ddedba 100644 --- a/python/langsmith/evaluation/string_evaluator.py +++ b/python/langsmith/evaluation/string_evaluator.py @@ -35,4 +35,4 @@ def evaluate_run( run_input = run.inputs[self.input_key] run_output = run.outputs[self.prediction_key] grading_results = self.grading_function(run_input, run_output, answer) - return EvaluationResult(key=self.evaluation_name, **grading_results) + return EvaluationResult(**{"key": self.evaluation_name, **grading_results}) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 478d119d8..05f2534fe 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -25,6 +25,7 @@ Mapping, Optional, Protocol, + Sequence, Tuple, Type, TypedDict, @@ -35,12 +36,17 @@ runtime_checkable, ) +from typing_extensions import ParamSpec, TypeGuard + from langsmith import client as ls_client from langsmith import run_trees, utils from langsmith._internal import _aiter as aitertools +from langsmith.env import _runtime_env if TYPE_CHECKING: - from langchain.schema.runnable import Runnable + from types import TracebackType + + from langchain_core.runnables import Runnable LOGGER = logging.getLogger(__name__) _PARENT_RUN_TREE = contextvars.ContextVar[Optional[run_trees.RunTree]]( @@ -143,7 +149,9 @@ def tracing_context( get_run_tree_context = get_current_run_tree -def is_traceable_function(func: Callable) -> bool: +def is_traceable_function( + func: Callable[P, R], +) -> TypeGuard[SupportsLangsmithExtra[P, R]]: """Check if a function is @traceable decorated.""" return ( _is_traceable_function(func) @@ -152,12 +160,11 @@ def is_traceable_function(func: Callable) -> bool: ) -def ensure_traceable(func: Callable[..., R]) -> Callable[..., R]: +def ensure_traceable(func: Callable[P, R]) -> SupportsLangsmithExtra[P, R]: """Ensure that a function is traceable.""" - return cast( - SupportsLangsmithExtra, - (func if is_traceable_function(func) else traceable()(func)), - ) + if is_traceable_function(func): + return func + return traceable()(func) def is_async(func: Callable) -> bool: @@ -170,6 +177,7 @@ def is_async(func: Callable) -> bool: class LangSmithExtra(TypedDict, total=False): """Any additional info to be injected into the run dynamically.""" + name: Optional[str] reference_example_id: Optional[ls_client.ID_TYPE] run_extra: Optional[Dict] parent: Optional[Union[run_trees.RunTree, str, Mapping]] @@ -183,10 +191,11 @@ class LangSmithExtra(TypedDict, total=False): R = TypeVar("R", covariant=True) +P = ParamSpec("P") @runtime_checkable -class SupportsLangsmithExtra(Protocol, Generic[R]): +class SupportsLangsmithExtra(Protocol, Generic[P, R]): """Implementations of this Protoc accept an optional langsmith_extra parameter. Args: @@ -201,9 +210,9 @@ class SupportsLangsmithExtra(Protocol, Generic[R]): def __call__( self, - *args: Any, + *args: P.args, langsmith_extra: Optional[LangSmithExtra] = None, - **kwargs: Any, + **kwargs: P.kwargs, ) -> R: """Call the instance when it is called as a function. @@ -222,8 +231,8 @@ def __call__( @overload def traceable( - func: Callable[..., R], -) -> Callable[..., R]: ... + func: Callable[P, R], +) -> SupportsLangsmithExtra[P, R]: ... @overload @@ -234,10 +243,12 @@ def traceable( metadata: Optional[Mapping[str, Any]] = None, tags: Optional[List[str]] = None, client: Optional[ls_client.Client] = None, - reduce_fn: Optional[Callable] = None, + reduce_fn: Optional[Callable[[Sequence], dict]] = None, project_name: Optional[str] = None, process_inputs: Optional[Callable[[dict], dict]] = None, -) -> Callable[[Callable[..., R]], SupportsLangsmithExtra[R]]: ... + process_outputs: Optional[Callable[..., dict]] = None, + _invocation_params_fn: Optional[Callable[[dict], dict]] = None, +) -> Callable[[Callable[P, R]], SupportsLangsmithExtra[P, R]]: ... def traceable( @@ -261,7 +272,11 @@ def traceable( called, and the run itself will be stuck in a pending state. project_name: The name of the project to log the run to. Defaults to None, which will use the default project. - process_inputs: A function to filter the inputs to the run. Defaults to None. + process_inputs: Custom serialization / processing function for inputs. + Defaults to None. + process_outputs: Custom serialization / processing function for outputs. + Defaults to None. + Returns: @@ -370,7 +385,7 @@ def manual_extra_function(x): manual_extra_function(5, langsmith_extra={"metadata": {"version": "1.0"}}) - """ + """ # noqa: E501 run_type: ls_client.RUN_TYPE_T = ( args[0] if args and isinstance(args[0], str) @@ -404,7 +419,23 @@ def manual_extra_function(x): project_name=kwargs.pop("project_name", None), run_type=run_type, process_inputs=kwargs.pop("process_inputs", None), + invocation_params_fn=kwargs.pop("_invocation_params_fn", None), ) + outputs_processor = kwargs.pop("process_outputs", None) + + def _on_run_end( + container: _TraceableContainer, + outputs: Optional[Any] = None, + error: Optional[BaseException] = None, + ) -> None: + """Handle the end of run.""" + try: + if outputs_processor is not None: + outputs = outputs_processor(outputs) + _container_end(container, outputs=outputs, error=error) + except BaseException as e: + LOGGER.warning(f"Unable to process trace outputs: {repr(e)}") + if kwargs: warnings.warn( f"The following keyword arguments are not recognized and will be ignored: " @@ -413,6 +444,10 @@ def manual_extra_function(x): ) def decorator(func: Callable): + func_sig = inspect.signature(func) + func_accepts_parent_run = func_sig.parameters.get("run_tree", None) is not None + func_accepts_config = func_sig.parameters.get("config", None) is not None + @functools.wraps(func) async def async_wrapper( *args: Any, @@ -420,22 +455,22 @@ async def async_wrapper( **kwargs: Any, ) -> Any: """Async version of wrapper function.""" - run_container = _setup_run( + run_container = await aitertools.aio_to_thread( + _setup_run, func, container_input=container_input, langsmith_extra=langsmith_extra, args=args, kwargs=kwargs, ) - func_accepts_parent_run = ( - inspect.signature(func).parameters.get("run_tree", None) is not None - ) + try: - accepts_context = aitertools.accepts_context(asyncio.create_task) + accepts_context = aitertools.asyncio_accepts_context() if func_accepts_parent_run: - fr_coro = func(*args, run_tree=run_container["new_run"], **kwargs) - else: - fr_coro = func(*args, **kwargs) + kwargs["run_tree"] = run_container["new_run"] + if not func_accepts_config: + kwargs.pop("config", None) + fr_coro = func(*args, **kwargs) if accepts_context: function_result = await asyncio.create_task( # type: ignore[call-arg] fr_coro, context=run_container["context"] @@ -447,38 +482,40 @@ async def async_wrapper( ): function_result = await fr_coro except BaseException as e: - _container_end(run_container, error=e) + # shield from cancellation, given we're catching all exceptions + await asyncio.shield( + aitertools.aio_to_thread(_on_run_end, run_container, error=e) + ) raise e - _container_end(run_container, outputs=function_result) + await aitertools.aio_to_thread( + _on_run_end, run_container, outputs=function_result + ) return function_result @functools.wraps(func) async def async_generator_wrapper( *args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any ) -> AsyncGenerator: - run_container = _setup_run( + run_container = await aitertools.aio_to_thread( + _setup_run, func, container_input=container_input, langsmith_extra=langsmith_extra, args=args, kwargs=kwargs, ) - func_accepts_parent_run = ( - inspect.signature(func).parameters.get("run_tree", None) is not None - ) results: List[Any] = [] try: if func_accepts_parent_run: - async_gen_result = func( - *args, run_tree=run_container["new_run"], **kwargs - ) - else: + kwargs["run_tree"] = run_container["new_run"] # TODO: Nesting is ambiguous if a nested traceable function is only # called mid-generation. Need to explicitly accept run_tree to get # around this. - async_gen_result = func(*args, **kwargs) + if not func_accepts_config: + kwargs.pop("config", None) + async_gen_result = func(*args, **kwargs) # Can't iterate through if it's a coroutine - accepts_context = aitertools.accepts_context(asyncio.create_task) + accepts_context = aitertools.asyncio_accepts_context() if inspect.iscoroutine(async_gen_result): if accepts_context: async_gen_result = await asyncio.create_task( @@ -519,7 +556,9 @@ async def async_generator_wrapper( except StopAsyncIteration: pass except BaseException as e: - _container_end(run_container, error=e) + await asyncio.shield( + aitertools.aio_to_thread(_on_run_end, run_container, error=e) + ) raise e if results: if reduce_fn: @@ -532,7 +571,9 @@ async def async_generator_wrapper( function_result = results else: function_result = None - _container_end(run_container, outputs=function_result) + await aitertools.aio_to_thread( + _on_run_end, run_container, outputs=function_result + ) @functools.wraps(func) def wrapper( @@ -553,17 +594,14 @@ def wrapper( ) try: if func_accepts_parent_run: - function_result = run_container["context"].run( - func, *args, run_tree=run_container["new_run"], **kwargs - ) - else: - function_result = run_container["context"].run( - func, *args, **kwargs - ) + kwargs["run_tree"] = run_container["new_run"] + if not func_accepts_config: + kwargs.pop("config", None) + function_result = run_container["context"].run(func, *args, **kwargs) except BaseException as e: - _container_end(run_container, error=e) + _on_run_end(run_container, error=e) raise e - _container_end(run_container, outputs=function_result) + _on_run_end(run_container, outputs=function_result) return function_result @functools.wraps(func) @@ -583,16 +621,13 @@ def generator_wrapper( results: List[Any] = [] try: if func_accepts_parent_run: - generator_result = run_container["context"].run( - func, *args, run_tree=run_container["new_run"], **kwargs - ) - else: + kwargs["run_tree"] = run_container["new_run"] # TODO: Nesting is ambiguous if a nested traceable function is only # called mid-generation. Need to explicitly accept run_tree to get # around this. - generator_result = run_container["context"].run( - func, *args, **kwargs - ) + if not func_accepts_config: + kwargs.pop("config", None) + generator_result = run_container["context"].run(func, *args, **kwargs) try: while True: item = run_container["context"].run(next, generator_result) @@ -616,7 +651,7 @@ def generator_wrapper( pass except BaseException as e: - _container_end(run_container, error=e) + _on_run_end(run_container, error=e) raise e if results: if reduce_fn: @@ -629,7 +664,7 @@ def generator_wrapper( function_result = results else: function_result = None - _container_end(run_container, outputs=function_result) + _on_run_end(run_container, outputs=function_result) if inspect.isasyncgenfunction(func): selected_wrapper: Callable = async_generator_wrapper @@ -643,6 +678,26 @@ def generator_wrapper( else: selected_wrapper = wrapper setattr(selected_wrapper, "__langsmith_traceable__", True) + sig = inspect.signature(selected_wrapper) + if not sig.parameters.get("config"): + sig = sig.replace( + parameters=[ + *( + param + for param in sig.parameters.values() + if param.kind != inspect.Parameter.VAR_KEYWORD + ), + inspect.Parameter( + "config", inspect.Parameter.KEYWORD_ONLY, default=None + ), + *( + param + for param in sig.parameters.values() + if param.kind == inspect.Parameter.VAR_KEYWORD + ), + ] + ) + selected_wrapper.__signature__ = sig # type: ignore[attr-defined] return selected_wrapper # If the decorator is called with no arguments, then it's being used as a @@ -653,89 +708,289 @@ def generator_wrapper( return decorator -@contextlib.contextmanager -def trace( - name: str, - run_type: ls_client.RUN_TYPE_T = "chain", - *, - inputs: Optional[Dict] = None, - extra: Optional[Dict] = None, - project_name: Optional[str] = None, - parent: Optional[Union[run_trees.RunTree, str, Mapping]] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Mapping[str, Any]] = None, - client: Optional[ls_client.Client] = None, - run_id: Optional[ls_client.ID_TYPE] = None, - reference_example_id: Optional[ls_client.ID_TYPE] = None, - exceptions_to_handle: Optional[Tuple[Type[BaseException], ...]] = None, - **kwargs: Any, -) -> Generator[run_trees.RunTree, None, None]: - """Context manager for creating a run tree.""" - if kwargs: - # In case someone was passing an executor before. - warnings.warn( - "The `trace` context manager no longer supports the following kwargs: " - f"{sorted(kwargs.keys())}.", - DeprecationWarning, - ) - outer_tags = _TAGS.get() - outer_metadata = _METADATA.get() - outer_project = _PROJECT_NAME.get() or utils.get_tracer_project() - parent_run_ = _get_parent_run( - {"parent": parent, "run_tree": kwargs.get("run_tree")} - ) +class trace: + """Manage a langsmith run in context. + + This class can be used as both a synchronous and asynchronous context manager. + + Parameters: + ----------- + name : str + Name of the run + run_type : ls_client.RUN_TYPE_T, optional + Type of run (e.g., "chain", "llm", "tool"). Defaults to "chain". + inputs : Optional[Dict], optional + Initial input data for the run + project_name : Optional[str], optional + Associates the run with a specific project, overriding defaults + parent : Optional[Union[run_trees.RunTree, str, Mapping]], optional + Parent run, accepts RunTree, dotted order string, or tracing headers + tags : Optional[List[str]], optional + Categorization labels for the run + metadata : Optional[Mapping[str, Any]], optional + Arbitrary key-value pairs for run annotation + client : Optional[ls_client.Client], optional + LangSmith client for specifying a different tenant, + setting custom headers, or modifying API endpoint + run_id : Optional[ls_client.ID_TYPE], optional + Preset identifier for the run + reference_example_id : Optional[ls_client.ID_TYPE], optional + You typically won't set this. It associates this run with a dataset example. + This is only valid for root runs (not children) in an evaluation context. + exceptions_to_handle : Optional[Tuple[Type[BaseException], ...]], optional + Typically not set. Exception types to ignore in what is sent up to LangSmith + extra : Optional[Dict], optional + Typically not set. Use 'metadata' instead. Extra data to be sent to LangSmith. - # Merge and set context variables - tags_ = sorted(set((tags or []) + (outer_tags or []))) - _TAGS.set(tags_) - metadata = {**(metadata or {}), **(outer_metadata or {}), "ls_method": "trace"} - _METADATA.set(metadata) + Examples: + --------- + Synchronous usage: + >>> with trace("My Operation", run_type="tool", tags=["important"]) as run: + ... result = "foo" # Do some_operation() + ... run.metadata["some-key"] = "some-value" + ... run.end(outputs={"result": result}) + + Asynchronous usage: + >>> async def main(): + ... async with trace("Async Operation", run_type="tool", tags=["async"]) as run: + ... result = "foo" # Can await some_async_operation() + ... run.metadata["some-key"] = "some-value" + ... # "end" just adds the outputs and sets error to None + ... # The actual patching of the run happens when the context exits + ... run.end(outputs={"result": result}) + >>> asyncio.run(main()) + + Allowing pytest.skip in a test: + >>> import sys + >>> import pytest + >>> with trace("OS-Specific Test", exceptions_to_handle=(pytest.skip.Exception,)): + ... if sys.platform == "win32": + ... pytest.skip("Not supported on Windows") + ... result = "foo" # e.g., do some unix_specific_operation() + """ - extra_outer = extra or {} - extra_outer["metadata"] = metadata + def __init__( + self, + name: str, + run_type: ls_client.RUN_TYPE_T = "chain", + *, + inputs: Optional[Dict] = None, + extra: Optional[Dict] = None, + project_name: Optional[str] = None, + parent: Optional[Union[run_trees.RunTree, str, Mapping]] = None, + tags: Optional[List[str]] = None, + metadata: Optional[Mapping[str, Any]] = None, + client: Optional[ls_client.Client] = None, + run_id: Optional[ls_client.ID_TYPE] = None, + reference_example_id: Optional[ls_client.ID_TYPE] = None, + exceptions_to_handle: Optional[Tuple[Type[BaseException], ...]] = None, + **kwargs: Any, + ): + """Initialize the trace context manager. - project_name_ = project_name or outer_project - if parent_run_ is not None: - new_run = parent_run_.create_child( - name=name, - run_id=run_id, - run_type=run_type, - extra=extra_outer, - inputs=inputs, - tags=tags_, - ) - else: - new_run = run_trees.RunTree( - name=name, - id=run_id or uuid.uuid4(), - reference_example_id=reference_example_id, - run_type=run_type, - extra=extra_outer, - project_name=project_name_, - inputs=inputs or {}, - tags=tags_, - client=client, + Warns if unsupported kwargs are passed. + """ + if kwargs: + warnings.warn( + "The `trace` context manager no longer supports the following kwargs: " + f"{sorted(kwargs.keys())}.", + DeprecationWarning, + ) + self.name = name + self.run_type = run_type + self.inputs = inputs + self.extra = extra + self.project_name = project_name + self.parent = parent + # The run tree is deprecated. Keeping for backwards compat. + # Will fully merge within parent later. + self.run_tree = kwargs.get("run_tree") + self.tags = tags + self.metadata = metadata + self.client = client + self.run_id = run_id + self.reference_example_id = reference_example_id + self.exceptions_to_handle = exceptions_to_handle + self.new_run: Optional[run_trees.RunTree] = None + self.old_ctx: Optional[dict] = None + + def _setup(self) -> run_trees.RunTree: + """Set up the tracing context and create a new run. + + This method initializes the tracing context, merges tags and metadata, + creates a new run (either as a child of an existing run or as a new root run), + and sets up the necessary context variables. + + Returns: + run_trees.RunTree: The newly created run. + """ + self.old_ctx = get_tracing_context() + enabled = utils.tracing_is_enabled(self.old_ctx) + + outer_tags = _TAGS.get() + outer_metadata = _METADATA.get() + parent_run_ = _get_parent_run( + { + "parent": self.parent, + "run_tree": self.run_tree, + "client": self.client, + } ) - new_run.post() - _PARENT_RUN_TREE.set(new_run) - _PROJECT_NAME.set(project_name_) - try: - yield new_run - except (Exception, KeyboardInterrupt, BaseException) as e: - if exceptions_to_handle and isinstance(e, exceptions_to_handle): - tb = None + + tags_ = sorted(set((self.tags or []) + (outer_tags or []))) + metadata = { + **(self.metadata or {}), + **(outer_metadata or {}), + "ls_method": "trace", + } + + extra_outer = self.extra or {} + extra_outer["metadata"] = metadata + + project_name_ = _get_project_name(self.project_name) + + if parent_run_ is not None and enabled: + self.new_run = parent_run_.create_child( + name=self.name, + run_id=self.run_id, + run_type=self.run_type, + extra=extra_outer, + inputs=self.inputs, + tags=tags_, + ) else: - tb = utils._format_exc() - tb = f"{e.__class__.__name__}: {e}\n\n{tb}" - new_run.end(error=tb) - new_run.patch() - raise e - finally: - _PARENT_RUN_TREE.set(parent_run_) - _PROJECT_NAME.set(outer_project) - _TAGS.set(outer_tags) - _METADATA.set(outer_metadata) - new_run.patch() + self.new_run = run_trees.RunTree( + name=self.name, + id=ls_client._ensure_uuid(self.run_id), + reference_example_id=ls_client._ensure_uuid( + self.reference_example_id, accept_null=True + ), + run_type=self.run_type, + extra=extra_outer, + project_name=project_name_ or "default", + inputs=self.inputs or {}, + tags=tags_, + client=self.client, # type: ignore[arg-type] + ) + + if enabled: + self.new_run.post() + _TAGS.set(tags_) + _METADATA.set(metadata) + _PARENT_RUN_TREE.set(self.new_run) + _PROJECT_NAME.set(project_name_) + + return self.new_run + + def _teardown( + self, + exc_type: Optional[Type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + """Clean up the tracing context and finalize the run. + + This method handles exceptions, ends the run if necessary, + patches the run if it's not disabled, and resets the tracing context. + + Args: + exc_type: The type of the exception that occurred, if any. + exc_value: The exception instance that occurred, if any. + traceback: The traceback object associated with the exception, if any. + """ + if self.new_run is None: + return + if exc_type is not None: + if self.exceptions_to_handle and issubclass( + exc_type, self.exceptions_to_handle + ): + tb = None + else: + tb = utils._format_exc() + tb = f"{exc_type.__name__}: {exc_value}\n\n{tb}" + self.new_run.end(error=tb) + if self.old_ctx is not None: + enabled = utils.tracing_is_enabled(self.old_ctx) + if enabled: + self.new_run.patch() + + _set_tracing_context(self.old_ctx) + else: + warnings.warn("Tracing context was not set up properly.", RuntimeWarning) + + def __enter__(self) -> run_trees.RunTree: + """Enter the context manager synchronously. + + Returns: + run_trees.RunTree: The newly created run. + """ + return self._setup() + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + """Exit the context manager synchronously. + + Args: + exc_type: The type of the exception that occurred, if any. + exc_value: The exception instance that occurred, if any. + traceback: The traceback object associated with the exception, if any. + """ + self._teardown(exc_type, exc_value, traceback) + + async def __aenter__(self) -> run_trees.RunTree: + """Enter the context manager asynchronously. + + Returns: + run_trees.RunTree: The newly created run. + """ + ctx = copy_context() + result = await aitertools.aio_to_thread(self._setup, __ctx=ctx) + # Set the context for the current thread + _set_tracing_context(get_tracing_context(ctx)) + return result + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + """Exit the context manager asynchronously. + + Args: + exc_type: The type of the exception that occurred, if any. + exc_value: The exception instance that occurred, if any. + traceback: The traceback object associated with the exception, if any. + """ + ctx = copy_context() + if exc_type is not None: + await asyncio.shield( + aitertools.aio_to_thread( + self._teardown, exc_type, exc_value, traceback, __ctx=ctx + ) + ) + else: + await aitertools.aio_to_thread( + self._teardown, exc_type, exc_value, traceback, __ctx=ctx + ) + _set_tracing_context(get_tracing_context(ctx)) + + +def _get_project_name(project_name: Optional[str]) -> Optional[str]: + prt = _PARENT_RUN_TREE.get() + return ( + # Maintain tree consistency first + _PROJECT_NAME.get() + or (prt.session_name if prt else None) + # Then check the passed in value + or project_name + # fallback to the default for the environment + or utils.get_tracer_project() + ) def as_runnable(traceable_fn: Callable) -> Runnable: @@ -760,17 +1015,12 @@ def as_runnable(traceable_fn: Callable) -> Runnable: >>> runnable = as_runnable(my_function) """ try: - from langchain.callbacks.manager import ( - AsyncCallbackManager, - CallbackManager, - ) - from langchain.callbacks.tracers.langchain import LangChainTracer - from langchain.schema.runnable import RunnableConfig, RunnableLambda - from langchain.schema.runnable.utils import Input, Output + from langchain_core.runnables import RunnableConfig, RunnableLambda + from langchain_core.runnables.utils import Input, Output except ImportError as e: raise ImportError( - "as_runnable requires langchain to be installed. " - "You can install it with `pip install langchain`." + "as_runnable requires langchain-core to be installed. " + "You can install it with `pip install langchain-core`." ) from e if not is_traceable_function(traceable_fn): try: @@ -820,28 +1070,6 @@ def __init__( ), ) - @staticmethod - def _configure_run_tree(callback_manager: Any) -> Optional[run_trees.RunTree]: - run_tree: Optional[run_trees.RunTree] = None - if isinstance(callback_manager, (CallbackManager, AsyncCallbackManager)): - lc_tracers = [ - handler - for handler in callback_manager.handlers - if isinstance(handler, LangChainTracer) - ] - if lc_tracers: - lc_tracer = lc_tracers[0] - run_tree = run_trees.RunTree( - id=callback_manager.parent_run_id, - session_name=lc_tracer.project_name, - name="Wrapping", - run_type="chain", - inputs={}, - tags=callback_manager.tags, - extra={"metadata": callback_manager.metadata}, - ) - return run_tree - @staticmethod def _wrap_sync( func: Callable[..., Output], @@ -849,9 +1077,7 @@ def _wrap_sync( """Wrap a synchronous function to make it asynchronous.""" def wrap_traceable(inputs: dict, config: RunnableConfig) -> Any: - run_tree = RunnableTraceable._configure_run_tree( - config.get("callbacks") - ) + run_tree = run_trees.RunTree.from_runnable_config(cast(dict, config)) return func(**inputs, langsmith_extra={"run_tree": run_tree}) return cast(Callable[[Input, RunnableConfig], Output], wrap_traceable) @@ -872,9 +1098,7 @@ def _wrap_async( afunc_ = cast(Callable[..., Awaitable[Output]], afunc) async def awrap_traceable(inputs: dict, config: RunnableConfig) -> Any: - run_tree = RunnableTraceable._configure_run_tree( - config.get("callbacks") - ) + run_tree = run_trees.RunTree.from_runnable_config(cast(dict, config)) return await afunc_(**inputs, langsmith_extra={"run_tree": run_tree}) return cast( @@ -921,13 +1145,14 @@ class _ContainerInput(TypedDict, total=False): project_name: Optional[str] run_type: ls_client.RUN_TYPE_T process_inputs: Optional[Callable[[dict], dict]] + invocation_params_fn: Optional[Callable[[dict], dict]] def _container_end( container: _TraceableContainer, outputs: Optional[Any] = None, error: Optional[BaseException] = None, -): +) -> None: """End the run.""" run_tree = container.get("new_run") if run_tree is None: @@ -940,11 +1165,6 @@ def _container_end( error_ = f"{repr(error)}\n\n{stacktrace}" run_tree.end(outputs=outputs_, error=error_) run_tree.patch() - if error: - try: - LOGGER.info(f"See trace: {run_tree.get_url()}") - except Exception: - pass on_end = container.get("on_end") if on_end is not None and callable(on_end): try: @@ -962,18 +1182,54 @@ def _collect_extra(extra_outer: dict, langsmith_extra: LangSmithExtra) -> dict: return extra_inner -def _get_parent_run(langsmith_extra: LangSmithExtra) -> Optional[run_trees.RunTree]: +def _get_parent_run( + langsmith_extra: LangSmithExtra, + config: Optional[dict] = None, +) -> Optional[run_trees.RunTree]: parent = langsmith_extra.get("parent") if isinstance(parent, run_trees.RunTree): return parent if isinstance(parent, dict): - return run_trees.RunTree.from_headers(parent) + return run_trees.RunTree.from_headers( + parent, + client=langsmith_extra.get("client"), + # Precedence: headers -> cvar -> explicit -> env var + project_name=_get_project_name(langsmith_extra.get("project_name")), + ) if isinstance(parent, str): - return run_trees.RunTree.from_dotted_order(parent) + dort = run_trees.RunTree.from_dotted_order( + parent, + client=langsmith_extra.get("client"), + # Precedence: cvar -> explicit -> env var + project_name=_get_project_name(langsmith_extra.get("project_name")), + ) + return dort run_tree = langsmith_extra.get("run_tree") if run_tree: return run_tree - return get_current_run_tree() + crt = get_current_run_tree() + if _runtime_env.get_langchain_core_version() is not None: + if rt := run_trees.RunTree.from_runnable_config( + config, client=langsmith_extra.get("client") + ): + # Still need to break ties when alternating between traceable and + # LanChain code. + # Nesting: LC -> LS -> LS, we want to still use LS as the parent + # Otherwise would look like LC -> {LS, LS} (siblings) + if ( + not crt # Simple LC -> LS + # Let user override if manually passed in or invoked in a + # RunnableSequence. This is a naive check. + or (config is not None and config.get("callbacks")) + # If the LangChain dotted order is more nested than the LangSmith + # dotted order, use the LangChain run as the parent. + # Note that this condition shouldn't be triggered in later + # versions of core, since we also update the run_tree context + # vars when updating the RunnableConfig context var. + or rt.dotted_order > crt.dotted_order + ): + return rt + return crt def _setup_run( @@ -985,29 +1241,30 @@ def _setup_run( ) -> _TraceableContainer: """Create a new run or create_child() if run is passed in kwargs.""" extra_outer = container_input.get("extra_outer") or {} - name = container_input.get("name") metadata = container_input.get("metadata") tags = container_input.get("tags") client = container_input.get("client") run_type = container_input.get("run_type") or "chain" outer_project = _PROJECT_NAME.get() langsmith_extra = langsmith_extra or LangSmithExtra() - parent_run_ = _get_parent_run(langsmith_extra) + name = langsmith_extra.get("name") or container_input.get("name") + client_ = langsmith_extra.get("client", client) + parent_run_ = _get_parent_run( + {**langsmith_extra, "client": client_}, kwargs.get("config") + ) project_cv = _PROJECT_NAME.get() selected_project = ( project_cv # From parent trace + or ( + parent_run_.session_name if parent_run_ else None + ) # from parent run attempt 2 (not managed by traceable) or langsmith_extra.get("project_name") # at invocation time or container_input["project_name"] # at decorator time or utils.get_tracer_project() # default ) reference_example_id = langsmith_extra.get("reference_example_id") id_ = langsmith_extra.get("run_id") - if ( - not project_cv - and not reference_example_id - and not parent_run_ - and not utils.tracing_is_enabled() - ): + if not parent_run_ and not utils.tracing_is_enabled(): utils.log_once( logging.DEBUG, "LangSmith tracing is enabled, returning original function." ) @@ -1037,6 +1294,16 @@ def _setup_run( metadata_["ls_method"] = "traceable" extra_inner["metadata"] = metadata_ inputs = _get_inputs_safe(signature, *args, **kwargs) + invocation_params_fn = container_input.get("invocation_params_fn") + if invocation_params_fn: + try: + invocation_params = { + k: v for k, v in invocation_params_fn(inputs).items() if v is not None + } + if invocation_params and isinstance(invocation_params, dict): + metadata_.update(invocation_params) + except BaseException as e: + LOGGER.error(f"Failed to infer invocation params for {name_}: {e}") process_inputs = container_input.get("process_inputs") if process_inputs: try: @@ -1046,7 +1313,6 @@ def _setup_run( tags_ = (langsmith_extra.get("tags") or []) + (outer_tags or []) context.run(_TAGS.set, tags_) tags_ += tags or [] - client_ = langsmith_extra.get("client", client) if parent_run_ is not None: new_run = parent_run_.create_child( name=name_, @@ -1063,7 +1329,7 @@ def _setup_run( ) else: new_run = run_trees.RunTree( - id=id_, + id=ls_client._ensure_uuid(id_), name=name_, serialized={ "name": name, @@ -1072,11 +1338,13 @@ def _setup_run( }, inputs=inputs, run_type=run_type, - reference_example_id=reference_example_id, - project_name=selected_project, + reference_example_id=ls_client._ensure_uuid( + reference_example_id, accept_null=True + ), + project_name=selected_project, # type: ignore[arg-type] extra=extra_inner, tags=tags_, - client=client_, + client=client_, # type: ignore ) try: new_run.post() diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index 03b59ba7c..66887ada6 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -11,18 +11,34 @@ try: from pydantic.v1 import Field, root_validator, validator # type: ignore[import] except ImportError: - from pydantic import Field, root_validator, validator + from pydantic import ( # type: ignore[assignment, no-redef] + Field, + root_validator, + validator, + ) +import threading import urllib.parse from langsmith import schemas as ls_schemas from langsmith import utils -from langsmith.client import ID_TYPE, RUN_TYPE_T, Client, _dumps_json +from langsmith.client import ID_TYPE, RUN_TYPE_T, Client, _dumps_json, _ensure_uuid logger = logging.getLogger(__name__) LANGSMITH_PREFIX = "langsmith-" LANGSMITH_DOTTED_ORDER = f"{LANGSMITH_PREFIX}trace" +_CLIENT: Optional[Client] = None +_LOCK = threading.Lock() + + +def _get_client() -> Client: + global _CLIENT + if _CLIENT is None: + with _LOCK: + if _CLIENT is None: + _CLIENT = Client() + return _CLIENT class RunTree(ls_schemas.RunBase): @@ -43,7 +59,7 @@ class RunTree(ls_schemas.RunBase): ) session_id: Optional[UUID] = Field(default=None, alias="project_id") extra: Dict = Field(default_factory=dict) - client: Client = Field(default_factory=Client, exclude=True) + client: Client = Field(default_factory=_get_client, exclude=True) dotted_order: str = Field( default="", description="The order of the run in the tree." ) @@ -60,7 +76,7 @@ class Config: def validate_client(cls, v: Optional[Client]) -> Client: """Ensure the client is specified.""" if v is None: - return Client() + return _get_client() return v @root_validator(pre=True) @@ -78,6 +94,12 @@ def infer_defaults(cls, values: dict) -> dict: else: values["trace_id"] = values["id"] cast(dict, values.setdefault("extra", {})) + if values.get("events") is None: + values["events"] = [] + if values.get("tags") is None: + values["tags"] = [] + if values.get("outputs") is None: + values["outputs"] = {} return values @root_validator(pre=False) @@ -202,7 +224,7 @@ def create_child( serialized_ = serialized or {"name": name} run = RunTree( name=name, - id=run_id or uuid4(), + id=_ensure_uuid(run_id), serialized=serialized_, inputs=inputs or {}, outputs=outputs or {}, @@ -213,7 +235,7 @@ def create_child( end_time=end_time, extra=extra or {}, parent_run=self, - session_name=self.session_name, + project_name=self.session_name, client=self.client, tags=tags, ) @@ -221,20 +243,17 @@ def create_child( return run def _get_dicts_safe(self): - try: - return self.dict(exclude={"child_runs"}, exclude_none=True) - except TypeError: - # Things like generators cannot be copied - self_dict = self.dict( - exclude={"child_runs", "inputs", "outputs"}, exclude_none=True - ) - if self.inputs: - # shallow copy - self_dict["inputs"] = self.inputs.copy() - if self.outputs: - # shallow copy - self_dict["outputs"] = self.outputs.copy() - return self_dict + # Things like generators cannot be copied + self_dict = self.dict( + exclude={"child_runs", "inputs", "outputs"}, exclude_none=True + ) + if self.inputs is not None: + # shallow copy. deep copying will occur in the client + self_dict["inputs"] = self.inputs.copy() + if self.outputs is not None: + # shallow copy; deep copying will occur in the client + self_dict["outputs"] = self.outputs.copy() + return self_dict def post(self, exclude_child_runs: bool = True) -> None: """Post the run tree to the API asynchronously.""" @@ -286,6 +305,66 @@ def from_dotted_order( } return cast(RunTree, cls.from_headers(headers, **kwargs)) + @classmethod + def from_runnable_config( + cls, + config: Optional[dict], + **kwargs: Any, + ) -> Optional[RunTree]: + """Create a new 'child' span from the provided runnable config. + + Requires langchain to be installed. + + Returns: + Optional[RunTree]: The new span or None if + no parent span information is found. + """ + try: + from langchain_core.callbacks.manager import ( + AsyncCallbackManager, + CallbackManager, + ) + from langchain_core.runnables import RunnableConfig, ensure_config + from langchain_core.tracers.langchain import LangChainTracer + except ImportError as e: + raise ImportError( + "RunTree.from_runnable_config requires langchain-core to be installed. " + "You can install it with `pip install langchain-core`." + ) from e + config_ = ensure_config( + cast(RunnableConfig, config) if isinstance(config, dict) else None + ) + if ( + (cb := config_.get("callbacks")) + and isinstance(cb, (CallbackManager, AsyncCallbackManager)) + and cb.parent_run_id + and ( + tracer := next( + (t for t in cb.handlers if isinstance(t, LangChainTracer)), + None, + ) + ) + ): + if (run := tracer.run_map.get(str(cb.parent_run_id))) and run.dotted_order: + dotted_order = run.dotted_order + kwargs["run_type"] = run.run_type + kwargs["inputs"] = run.inputs + kwargs["outputs"] = run.outputs + kwargs["start_time"] = run.start_time + kwargs["end_time"] = run.end_time + kwargs["tags"] = sorted(set(run.tags or [] + kwargs.get("tags", []))) + extra_ = kwargs.setdefault("extra", {}) + metadata_ = extra_.setdefault("metadata", {}) + metadata_.update(run.metadata) + elif hasattr(tracer, "order_map") and cb.parent_run_id in tracer.order_map: + dotted_order = tracer.order_map[cb.parent_run_id][1] + else: + return None + kwargs["client"] = tracer.client + kwargs["project_name"] = tracer.project_name + return RunTree.from_dotted_order(dotted_order, **kwargs) + return None + @classmethod def from_headers(cls, headers: Dict[str, str], **kwargs: Any) -> Optional[RunTree]: """Create a new 'parent' span from the provided headers. @@ -310,6 +389,9 @@ def from_headers(cls, headers: Dict[str, str], **kwargs: Any) -> Optional[RunTre init_args["trace_id"] = trace_id init_args["id"] = parsed_dotted_order[-1][1] init_args["dotted_order"] = parent_dotted_order + if len(parsed_dotted_order) >= 2: + # Has a parent + init_args["parent_run_id"] = parsed_dotted_order[-2][1] # All placeholders. We assume the source process # handles the life-cycle of the run. init_args["start_time"] = init_args.get("start_time") or datetime.now( @@ -328,6 +410,8 @@ def from_headers(cls, headers: Dict[str, str], **kwargs: Any) -> Optional[RunTre init_args["extra"]["metadata"] = metadata tags = sorted(set(baggage.tags + init_args.get("tags", []))) init_args["tags"] = tags + if baggage.project_name: + init_args["project_name"] = baggage.project_name return RunTree(**init_args) @@ -339,6 +423,7 @@ def to_headers(self) -> Dict[str, str]: baggage = _Baggage( metadata=self.extra.get("metadata", {}), tags=self.tags, + project_name=self.session_name, ) headers["baggage"] = baggage.to_header() return headers @@ -351,10 +436,12 @@ def __init__( self, metadata: Optional[Dict[str, str]] = None, tags: Optional[List[str]] = None, + project_name: Optional[str] = None, ): """Initialize the Baggage object.""" self.metadata = metadata or {} self.tags = tags or [] + self.project_name = project_name @classmethod def from_header(cls, header_value: Optional[str]) -> _Baggage: @@ -363,6 +450,7 @@ def from_header(cls, header_value: Optional[str]) -> _Baggage: return cls() metadata = {} tags = [] + project_name = None try: for item in header_value.split(","): key, value = item.split("=", 1) @@ -370,10 +458,12 @@ def from_header(cls, header_value: Optional[str]) -> _Baggage: metadata = json.loads(urllib.parse.unquote(value)) elif key == f"{LANGSMITH_PREFIX}tags": tags = urllib.parse.unquote(value).split(",") + elif key == f"{LANGSMITH_PREFIX}project": + project_name = urllib.parse.unquote(value) except Exception as e: logger.warning(f"Error parsing baggage header: {e}") - return cls(metadata=metadata, tags=tags) + return cls(metadata=metadata, tags=tags, project_name=project_name) def to_header(self) -> str: """Return the Baggage object as a header value.""" @@ -388,6 +478,10 @@ def to_header(self) -> str: items.append( f"{LANGSMITH_PREFIX}tags={urllib.parse.quote(serialized_tags)}" ) + if self.project_name: + items.append( + f"{LANGSMITH_PREFIX}project={urllib.parse.quote(self.project_name)}" + ) return ",".join(items) diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 78c849bbc..3da1d4650 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -63,6 +63,7 @@ class ExampleCreate(ExampleBase): id: Optional[UUID] created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) + split: Optional[Union[str, List[str]]] = None class Example(ExampleBase): @@ -98,6 +99,12 @@ def url(self) -> Optional[str]: return None +class ExampleSearch(ExampleBase): + """Example returned via search.""" + + id: UUID + + class ExampleUpdate(BaseModel): """Update class for Example.""" @@ -105,6 +112,7 @@ class ExampleUpdate(BaseModel): inputs: Optional[Dict[str, Any]] = None outputs: Optional[Dict[str, Any]] = None metadata: Optional[Dict[str, Any]] = None + split: Optional[Union[str, List[str]]] = None class Config: """Configuration class for the schema.""" @@ -133,13 +141,6 @@ class Config: frozen = True -class DatasetCreate(DatasetBase): - """Dataset create model.""" - - id: Optional[UUID] = None - created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) - - class Dataset(DatasetBase): """Dataset ORM model.""" @@ -149,6 +150,8 @@ class Dataset(DatasetBase): example_count: Optional[int] = None session_count: Optional[int] = None last_session_start_time: Optional[datetime] = None + inputs_schema: Optional[Dict[str, Any]] = None + outputs_schema: Optional[Dict[str, Any]] = None _host_url: Optional[str] = PrivateAttr(default=None) _tenant_id: Optional[UUID] = PrivateAttr(default=None) _public_path: Optional[str] = PrivateAttr(default=None) @@ -161,6 +164,12 @@ def __init__( **kwargs: Any, ) -> None: """Initialize a Dataset object.""" + if "inputs_schema_definition" in kwargs: + kwargs["inputs_schema"] = kwargs.pop("inputs_schema_definition") + + if "outputs_schema_definition" in kwargs: + kwargs["outputs_schema"] = kwargs.pop("outputs_schema_definition") + super().__init__(**kwargs) self._host_url = _host_url self._tenant_id = _tenant_id @@ -571,6 +580,18 @@ class TracerSessionResult(TracerSession): """Feedback stats for the project.""" run_facets: Optional[List[Dict[str, Any]]] """Facets for the runs in the project.""" + total_cost: Optional[Decimal] + """The total estimated LLM cost associated with the completion tokens.""" + prompt_cost: Optional[Decimal] + """The estimated cost associated with the prompt (input) tokens.""" + completion_cost: Optional[Decimal] + """The estimated cost associated with the completion tokens.""" + first_token_p50: Optional[timedelta] + """The median (50th percentile) time to process the first token.""" + first_token_p99: Optional[timedelta] + """The 99th percentile time to process the first token.""" + error_rate: Optional[float] + """The error rate for the project.""" @runtime_checkable @@ -653,6 +674,18 @@ class LangSmithInfo(BaseModel): Example.update_forward_refs() +class LangSmithSettings(BaseModel): + """Settings for the LangSmith tenant.""" + + id: str + """The ID of the tenant.""" + display_name: str + """The display name of the tenant.""" + created_at: datetime + """The creation time of the tenant.""" + tenant_handle: Optional[str] = None + + class FeedbackIngestToken(BaseModel): """Represents the schema for a feedback ingest token. @@ -730,3 +763,97 @@ def metadata(self) -> dict[str, Any]: if self.extra is None or "metadata" not in self.extra: return {} return self.extra["metadata"] + + +class PromptCommit(BaseModel): + """Represents a Prompt with a manifest. + + Attributes: + owner (str): The handle of the owner of the prompt. + repo (str): The name of the prompt. + commit_hash (str): The commit hash of the prompt. + manifest (Dict[str, Any]): The manifest of the prompt. + examples (List[dict]): The list of examples. + """ + + owner: str + """The handle of the owner of the prompt.""" + repo: str + """The name of the prompt.""" + commit_hash: str + """The commit hash of the prompt.""" + manifest: Dict[str, Any] + """The manifest of the prompt.""" + examples: List[dict] + """The list of examples.""" + + +class Prompt(BaseModel): + """Represents a Prompt with metadata.""" + + repo_handle: str + """The name of the prompt.""" + description: Optional[str] = None + """The description of the prompt.""" + readme: Optional[str] = None + """The README of the prompt.""" + id: str + """The ID of the prompt.""" + tenant_id: str + """The tenant ID of the prompt owner.""" + created_at: datetime + """The creation time of the prompt.""" + updated_at: datetime + """The last update time of the prompt.""" + is_public: bool + """Whether the prompt is public.""" + is_archived: bool + """Whether the prompt is archived.""" + tags: List[str] + """The tags associated with the prompt.""" + original_repo_id: Optional[str] = None + """The ID of the original prompt, if forked.""" + upstream_repo_id: Optional[str] = None + """The ID of the upstream prompt, if forked.""" + owner: Optional[str] + """The handle of the owner of the prompt.""" + full_name: str + """The full name of the prompt. (owner + repo_handle)""" + num_likes: int + """The number of likes.""" + num_downloads: int + """The number of downloads.""" + num_views: int + """The number of views.""" + liked_by_auth_user: bool + """Whether the prompt is liked by the authenticated user.""" + last_commit_hash: Optional[str] = None + """The hash of the last commit.""" + num_commits: int + """The number of commits.""" + original_repo_full_name: Optional[str] = None + """The full name of the original prompt, if forked.""" + upstream_repo_full_name: Optional[str] = None + """The full name of the upstream prompt, if forked.""" + + +class ListPromptsResponse(BaseModel): + """A list of prompts with metadata.""" + + repos: List[Prompt] + """The list of prompts.""" + total: int + """The total number of prompts.""" + + +class PromptSortField(str, Enum): + """Enum for sorting fields for prompts.""" + + num_downloads = "num_downloads" + """Number of downloads.""" + num_views = "num_views" + """Number of views.""" + updated_at = "updated_at" + """Last updated time.""" + num_likes = "num_likes" + """Number of likes.""" diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 0217ab4e4..0d3552dcc 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -1,6 +1,10 @@ """Generic utility functions.""" +from __future__ import annotations + import contextlib +import contextvars +import copy import enum import functools import logging @@ -10,20 +14,26 @@ import sys import threading import traceback +from concurrent.futures import Future, ThreadPoolExecutor from typing import ( Any, Callable, Dict, Generator, + Iterable, + Iterator, List, Mapping, Optional, Sequence, Tuple, + TypeVar, Union, + cast, ) import requests +from typing_extensions import ParamSpec from urllib3.util import Retry from langsmith import schemas as ls_schemas @@ -63,13 +73,21 @@ class LangSmithConnectionError(LangSmithError): """Couldn't connect to the LangSmith API.""" -def tracing_is_enabled() -> bool: +def tracing_is_enabled(ctx: Optional[dict] = None) -> bool: """Return True if tracing is enabled.""" - from langsmith.run_helpers import get_tracing_context + from langsmith.run_helpers import get_current_run_tree, get_tracing_context - tc = get_tracing_context() + tc = ctx or get_tracing_context() + # You can manually override the environment using context vars. + # Check that first. + # Doing this before checking the run tree lets us + # disable a branch within a trace. if tc["enabled"] is not None: return tc["enabled"] + # Next check if we're mid-trace + if get_current_run_tree(): + return True + # Finally, check the global environment var_result = get_env_var("TRACING_V2", default=get_env_var("TRACING", default="")) return var_result == "true" @@ -497,3 +515,175 @@ def _format_exc() -> str: tb_lines = traceback.format_exception(*sys.exc_info()) filtered_lines = [line for line in tb_lines if "langsmith/" not in line] return "".join(filtered_lines) + + +T = TypeVar("T") + + +def _middle_copy( + val: T, memo: Dict[int, Any], max_depth: int = 4, _depth: int = 0 +) -> T: + cls = type(val) + + copier = getattr(cls, "__deepcopy__", None) + if copier is not None: + try: + return copier(memo) + except BaseException: + pass + if _depth >= max_depth: + return val + if isinstance(val, dict): + return { # type: ignore[return-value] + _middle_copy(k, memo, max_depth, _depth + 1): _middle_copy( + v, memo, max_depth, _depth + 1 + ) + for k, v in val.items() + } + if isinstance(val, list): + return [_middle_copy(item, memo, max_depth, _depth + 1) for item in val] # type: ignore[return-value] + if isinstance(val, tuple): + return tuple(_middle_copy(item, memo, max_depth, _depth + 1) for item in val) # type: ignore[return-value] + if isinstance(val, set): + return {_middle_copy(item, memo, max_depth, _depth + 1) for item in val} # type: ignore[return-value] + + return val + + +def deepish_copy(val: T) -> T: + """Deep copy a value with a compromise for uncopyable objects. + + Args: + val: The value to be deep copied. + + Returns: + The deep copied value. + """ + memo: Dict[int, Any] = {} + try: + return copy.deepcopy(val, memo) + except BaseException as e: + # Generators, locks, etc. cannot be copied + # and raise a TypeError (mentioning pickling, since the dunder methods) + # are re-used for copying. We'll try to do a compromise and copy + # what we can + _LOGGER.debug("Failed to deepcopy input: %s", repr(e)) + return _middle_copy(val, memo) + + +def is_version_greater_or_equal(current_version: str, target_version: str) -> bool: + """Check if the current version is greater or equal to the target version.""" + from packaging import version + + current = version.parse(current_version) + target = version.parse(target_version) + return current >= target + + +def parse_prompt_identifier(identifier: str) -> Tuple[str, str, str]: + """Parse a string in the format of owner/name:hash, name:hash, owner/name, or name. + + Args: + identifier (str): The prompt identifier to parse. + + Returns: + Tuple[str, str, str]: A tuple containing (owner, name, hash). + + Raises: + ValueError: If the identifier doesn't match the expected formats. + """ + if ( + not identifier + or identifier.count("/") > 1 + or identifier.startswith("/") + or identifier.endswith("/") + ): + raise ValueError(f"Invalid identifier format: {identifier}") + + parts = identifier.split(":", 1) + owner_name = parts[0] + commit = parts[1] if len(parts) > 1 else "latest" + + if "/" in owner_name: + owner, name = owner_name.split("/", 1) + if not owner or not name: + raise ValueError(f"Invalid identifier format: {identifier}") + return owner, name, commit + else: + if not owner_name: + raise ValueError(f"Invalid identifier format: {identifier}") + return "-", owner_name, commit + + +P = ParamSpec("P") + + +class ContextThreadPoolExecutor(ThreadPoolExecutor): + """ThreadPoolExecutor that copies the context to the child thread.""" + + def submit( # type: ignore[override] + self, + func: Callable[P, T], + *args: P.args, + **kwargs: P.kwargs, + ) -> Future[T]: + """Submit a function to the executor. + + Args: + func (Callable[..., T]): The function to submit. + *args (Any): The positional arguments to the function. + **kwargs (Any): The keyword arguments to the function. + + Returns: + Future[T]: The future for the function. + """ + return super().submit( + cast( + Callable[..., T], + functools.partial( + contextvars.copy_context().run, func, *args, **kwargs + ), + ) + ) + + def map( + self, + fn: Callable[..., T], + *iterables: Iterable[Any], + timeout: Optional[float] = None, + chunksize: int = 1, + ) -> Iterator[T]: + """Return an iterator equivalent to stdlib map. + + Each function will receive it's own copy of the context from the parent thread. + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + contexts = [contextvars.copy_context() for _ in range(len(iterables[0]))] # type: ignore[arg-type] + + def _wrapped_fn(*args: Any) -> T: + return contexts.pop().run(fn, *args) + + return super().map( + _wrapped_fn, + *iterables, + timeout=timeout, + chunksize=chunksize, + ) diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index 4fe214b13..07b317324 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -57,6 +57,23 @@ def _strip_not_given(d: dict) -> dict: return d +def _infer_invocation_params(model_type: str, kwargs: dict): + stripped = _strip_not_given(kwargs) + + stop = stripped.get("stop") + if stop and isinstance(stop, str): + stop = [stop] + + return { + "ls_provider": "openai", + "ls_model_type": model_type, + "ls_model_name": stripped.get("model", None), + "ls_temperature": stripped.get("temperature", None), + "ls_max_tokens": stripped.get("max_tokens", None), + "ls_stop": stop, + } + + def _reduce_choices(choices: List[Choice]) -> dict: reversed_choices = list(reversed(choices)) message: Dict[str, Any] = { @@ -97,13 +114,11 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"][ - "name" - ] += chunk.function.name + fn_ = message["tool_calls"][index]["function"] + fn_["name"] += chunk.function.name if chunk.function.arguments: - message["tool_calls"][index]["function"][ - "arguments" - ] += chunk.function.arguments + fn_ = message["tool_calls"][index]["function"] + fn_["arguments"] += chunk.function.arguments return { "index": choices[0].index, "finish_reason": next( @@ -150,6 +165,7 @@ def _get_wrapper( name: str, reduce_fn: Callable, tracing_extra: Optional[TracingExtra] = None, + invocation_params_fn: Optional[Callable] = None, ) -> Callable: textra = tracing_extra or {} @@ -160,6 +176,7 @@ def create(*args, stream: bool = False, **kwargs): run_type="llm", reduce_fn=reduce_fn if stream else None, process_inputs=_strip_not_given, + _invocation_params_fn=invocation_params_fn, **textra, ) @@ -173,6 +190,7 @@ async def acreate(*args, stream: bool = False, **kwargs): run_type="llm", reduce_fn=reduce_fn if stream else None, process_inputs=_strip_not_given, + _invocation_params_fn=invocation_params_fn, **textra, ) if stream: @@ -191,13 +209,23 @@ class TracingExtra(TypedDict, total=False): client: Optional[ls_client.Client] -def wrap_openai(client: C, *, tracing_extra: Optional[TracingExtra] = None) -> C: +def wrap_openai( + client: C, + *, + tracing_extra: Optional[TracingExtra] = None, + chat_name: str = "ChatOpenAI", + completions_name: str = "OpenAI", +) -> C: """Patch the OpenAI client to make it traceable. Args: client (Union[OpenAI, AsyncOpenAI]): The client to patch. tracing_extra (Optional[TracingExtra], optional): Extra tracing information. Defaults to None. + chat_name (str, optional): The run name for the chat completions endpoint. + Defaults to "ChatOpenAI". + completions_name (str, optional): The run name for the completions endpoint. + Defaults to "OpenAI". Returns: Union[OpenAI, AsyncOpenAI]: The patched client. @@ -205,14 +233,16 @@ def wrap_openai(client: C, *, tracing_extra: Optional[TracingExtra] = None) -> C """ client.chat.completions.create = _get_wrapper( # type: ignore[method-assign] client.chat.completions.create, - "ChatOpenAI", + chat_name, _reduce_chat, tracing_extra=tracing_extra, + invocation_params_fn=functools.partial(_infer_invocation_params, "chat"), ) client.completions.create = _get_wrapper( # type: ignore[method-assign] client.completions.create, - "OpenAI", + completions_name, _reduce_completions, tracing_extra=tracing_extra, + invocation_params_fn=functools.partial(_infer_invocation_params, "text"), ) return client diff --git a/python/poetry.lock b/python/poetry.lock index da2c1e35d..3d4d1374c 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1,14 +1,28 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anyio" -version = "4.3.0" +version = "4.4.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, - {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, ] [package.dependencies] @@ -24,52 +38,52 @@ trio = ["trio (>=0.23)"] [[package]] name = "attrs" -version = "23.2.0" +version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "black" -version = "24.3.0" +version = "24.8.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-24.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395"}, - {file = "black-24.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995"}, - {file = "black-24.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"}, - {file = "black-24.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0"}, - {file = "black-24.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9"}, - {file = "black-24.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597"}, - {file = "black-24.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d"}, - {file = "black-24.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5"}, - {file = "black-24.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f"}, - {file = "black-24.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11"}, - {file = "black-24.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4"}, - {file = "black-24.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5"}, - {file = "black-24.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837"}, - {file = "black-24.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd"}, - {file = "black-24.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213"}, - {file = "black-24.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959"}, - {file = "black-24.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb"}, - {file = "black-24.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7"}, - {file = "black-24.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7"}, - {file = "black-24.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f"}, - {file = "black-24.3.0-py3-none-any.whl", hash = "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93"}, - {file = "black-24.3.0.tar.gz", hash = "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f"}, + {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"}, + {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"}, + {file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"}, + {file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"}, + {file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"}, + {file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"}, + {file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"}, + {file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"}, + {file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"}, + {file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"}, + {file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"}, + {file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"}, + {file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"}, + {file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"}, + {file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"}, + {file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"}, + {file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"}, + {file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"}, + {file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"}, + {file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"}, + {file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"}, + {file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"}, ] [package.dependencies] @@ -89,13 +103,13 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "certifi" -version = "2024.2.2" +version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, - {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, ] [[package]] @@ -224,63 +238,83 @@ files = [ [[package]] name = "coverage" -version = "7.4.4" +version = "7.6.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2"}, - {file = "coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c"}, - {file = "coverage-7.4.4-cp310-cp310-win32.whl", hash = "sha256:598825b51b81c808cb6f078dcb972f96af96b078faa47af7dfcdf282835baa8d"}, - {file = "coverage-7.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:09ef9199ed6653989ebbcaacc9b62b514bb63ea2f90256e71fea3ed74bd8ff6f"}, - {file = "coverage-7.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f9f50e7ef2a71e2fae92774c99170eb8304e3fdf9c8c3c7ae9bab3e7229c5cf"}, - {file = "coverage-7.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:623512f8ba53c422fcfb2ce68362c97945095b864cda94a92edbaf5994201083"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0513b9508b93da4e1716744ef6ebc507aff016ba115ffe8ecff744d1322a7b63"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40209e141059b9370a2657c9b15607815359ab3ef9918f0196b6fccce8d3230f"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a2b2b78c78293782fd3767d53e6474582f62443d0504b1554370bde86cc8227"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:73bfb9c09951125d06ee473bed216e2c3742f530fc5acc1383883125de76d9cd"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f384c3cc76aeedce208643697fb3e8437604b512255de6d18dae3f27655a384"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54eb8d1bf7cacfbf2a3186019bcf01d11c666bd495ed18717162f7eb1e9dd00b"}, - {file = "coverage-7.4.4-cp311-cp311-win32.whl", hash = "sha256:cac99918c7bba15302a2d81f0312c08054a3359eaa1929c7e4b26ebe41e9b286"}, - {file = "coverage-7.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:b14706df8b2de49869ae03a5ccbc211f4041750cd4a66f698df89d44f4bd30ec"}, - {file = "coverage-7.4.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:201bef2eea65e0e9c56343115ba3814e896afe6d36ffd37bab783261db430f76"}, - {file = "coverage-7.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41c9c5f3de16b903b610d09650e5e27adbfa7f500302718c9ffd1c12cf9d6818"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d898fe162d26929b5960e4e138651f7427048e72c853607f2b200909794ed978"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ea79bb50e805cd6ac058dfa3b5c8f6c040cb87fe83de10845857f5535d1db70"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce4b94265ca988c3f8e479e741693d143026632672e3ff924f25fab50518dd51"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:00838a35b882694afda09f85e469c96367daa3f3f2b097d846a7216993d37f4c"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fdfafb32984684eb03c2d83e1e51f64f0906b11e64482df3c5db936ce3839d48"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:69eb372f7e2ece89f14751fbcbe470295d73ed41ecd37ca36ed2eb47512a6ab9"}, - {file = "coverage-7.4.4-cp312-cp312-win32.whl", hash = "sha256:137eb07173141545e07403cca94ab625cc1cc6bc4c1e97b6e3846270e7e1fea0"}, - {file = "coverage-7.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:d71eec7d83298f1af3326ce0ff1d0ea83c7cb98f72b577097f9083b20bdaf05e"}, - {file = "coverage-7.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d5ae728ff3b5401cc320d792866987e7e7e880e6ebd24433b70a33b643bb0384"}, - {file = "coverage-7.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc4f1358cb0c78edef3ed237ef2c86056206bb8d9140e73b6b89fbcfcbdd40e1"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8130a2aa2acb8788e0b56938786c33c7c98562697bf9f4c7d6e8e5e3a0501e4a"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf271892d13e43bc2b51e6908ec9a6a5094a4df1d8af0bfc360088ee6c684409"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4cdc86d54b5da0df6d3d3a2f0b710949286094c3a6700c21e9015932b81447e"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ae71e7ddb7a413dd60052e90528f2f65270aad4b509563af6d03d53e979feafd"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:38dd60d7bf242c4ed5b38e094baf6401faa114fc09e9e6632374388a404f98e7"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa5b1c1bfc28384f1f53b69a023d789f72b2e0ab1b3787aae16992a7ca21056c"}, - {file = "coverage-7.4.4-cp38-cp38-win32.whl", hash = "sha256:dfa8fe35a0bb90382837b238fff375de15f0dcdb9ae68ff85f7a63649c98527e"}, - {file = "coverage-7.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:b2991665420a803495e0b90a79233c1433d6ed77ef282e8e152a324bbbc5e0c8"}, - {file = "coverage-7.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b799445b9f7ee8bf299cfaed6f5b226c0037b74886a4e11515e569b36fe310d"}, - {file = "coverage-7.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b4d33f418f46362995f1e9d4f3a35a1b6322cb959c31d88ae56b0298e1c22357"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadacf9a2f407a4688d700e4ebab33a7e2e408f2ca04dbf4aef17585389eff3e"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c95949560050d04d46b919301826525597f07b33beba6187d04fa64d47ac82e"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff7687ca3d7028d8a5f0ebae95a6e4827c5616b31a4ee1192bdfde697db110d4"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5fc1de20b2d4a061b3df27ab9b7c7111e9a710f10dc2b84d33a4ab25065994ec"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c74880fc64d4958159fbd537a091d2a585448a8f8508bf248d72112723974cbd"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:742a76a12aa45b44d236815d282b03cfb1de3b4323f3e4ec933acfae08e54ade"}, - {file = "coverage-7.4.4-cp39-cp39-win32.whl", hash = "sha256:d89d7b2974cae412400e88f35d86af72208e1ede1a541954af5d944a8ba46c57"}, - {file = "coverage-7.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:9ca28a302acb19b6af89e90f33ee3e1906961f94b54ea37de6737b7ca9d8827c"}, - {file = "coverage-7.4.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677"}, - {file = "coverage-7.4.4.tar.gz", hash = "sha256:c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, + {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, + {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, + {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, + {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, + {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, + {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, + {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, + {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, + {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, + {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, + {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, + {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, + {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, + {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, + {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, + {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, ] [package.dependencies] @@ -291,13 +325,13 @@ toml = ["tomli"] [[package]] name = "dataclasses-json" -version = "0.6.4" +version = "0.6.7" description = "Easily serialize dataclasses to and from JSON." optional = false -python-versions = ">=3.7,<4.0" +python-versions = "<4.0,>=3.7" files = [ - {file = "dataclasses_json-0.6.4-py3-none-any.whl", hash = "sha256:f90578b8a3177f7552f4e1a6e535e84293cd5da421fcce0642d49c0d7bdf8df2"}, - {file = "dataclasses_json-0.6.4.tar.gz", hash = "sha256:73696ebf24936560cca79a2430cbc4f3dd23ac7bf46ed17f38e5e5e7657a6377"}, + {file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"}, + {file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"}, ] [package.dependencies] @@ -317,13 +351,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] @@ -345,13 +379,13 @@ testing = ["hatch", "pre-commit", "pytest", "tox"] [[package]] name = "fastapi" -version = "0.110.1" +version = "0.110.3" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" files = [ - {file = "fastapi-0.110.1-py3-none-any.whl", hash = "sha256:5df913203c482f820d31f48e635e022f8cbfe7350e4830ef05a3163925b1addc"}, - {file = "fastapi-0.110.1.tar.gz", hash = "sha256:6feac43ec359dfe4f45b2c18ec8c94edb8dc2dfc461d417d9e626590c071baad"}, + {file = "fastapi-0.110.3-py3-none-any.whl", hash = "sha256:fd7600612f755e4050beb74001310b5a7e1796d149c2ee363124abdfa0289d32"}, + {file = "fastapi-0.110.3.tar.gz", hash = "sha256:555700b0159379e94fdbfc6bb66a0f1c43f4cf7060f25239af3d84b63a656626"}, ] [package.dependencies] @@ -360,17 +394,17 @@ starlette = ">=0.37.2,<0.38.0" typing-extensions = ">=4.8.0" [package.extras] -all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +all = ["email_validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] [[package]] name = "freezegun" -version = "1.4.0" +version = "1.5.1" description = "Let your Python tests travel through time" optional = false python-versions = ">=3.7" files = [ - {file = "freezegun-1.4.0-py3-none-any.whl", hash = "sha256:55e0fc3c84ebf0a96a5aa23ff8b53d70246479e9a68863f1fcac5a3e52f19dd6"}, - {file = "freezegun-1.4.0.tar.gz", hash = "sha256:10939b0ba0ff5adaecf3b06a5c2f73071d9678e507c5eaedb23c761d56ac774b"}, + {file = "freezegun-1.5.1-py3-none-any.whl", hash = "sha256:bf111d7138a8abe55ab48a71755673dbaa4ab87f4cff5634a4442dfec34c15f1"}, + {file = "freezegun-1.5.1.tar.gz", hash = "sha256:b29dedfcda6d5e8e083ce71b2b542753ad48cfec44037b3fc79702e2980a89e9"}, ] [package.dependencies] @@ -454,15 +488,85 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "jiter" +version = "0.5.0" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jiter-0.5.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b599f4e89b3def9a94091e6ee52e1d7ad7bc33e238ebb9c4c63f211d74822c3f"}, + {file = "jiter-0.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a063f71c4b06225543dddadbe09d203dc0c95ba352d8b85f1221173480a71d5"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acc0d5b8b3dd12e91dd184b87273f864b363dfabc90ef29a1092d269f18c7e28"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c22541f0b672f4d741382a97c65609332a783501551445ab2df137ada01e019e"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63314832e302cc10d8dfbda0333a384bf4bcfce80d65fe99b0f3c0da8945a91a"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a25fbd8a5a58061e433d6fae6d5298777c0814a8bcefa1e5ecfff20c594bd749"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:503b2c27d87dfff5ab717a8200fbbcf4714516c9d85558048b1fc14d2de7d8dc"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d1f3d27cce923713933a844872d213d244e09b53ec99b7a7fdf73d543529d6d"}, + {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c95980207b3998f2c3b3098f357994d3fd7661121f30669ca7cb945f09510a87"}, + {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:afa66939d834b0ce063f57d9895e8036ffc41c4bd90e4a99631e5f261d9b518e"}, + {file = "jiter-0.5.0-cp310-none-win32.whl", hash = "sha256:f16ca8f10e62f25fd81d5310e852df6649af17824146ca74647a018424ddeccf"}, + {file = "jiter-0.5.0-cp310-none-win_amd64.whl", hash = "sha256:b2950e4798e82dd9176935ef6a55cf6a448b5c71515a556da3f6b811a7844f1e"}, + {file = "jiter-0.5.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4c8e1ed0ef31ad29cae5ea16b9e41529eb50a7fba70600008e9f8de6376d553"}, + {file = "jiter-0.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c6f16e21276074a12d8421692515b3fd6d2ea9c94fd0734c39a12960a20e85f3"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5280e68e7740c8c128d3ae5ab63335ce6d1fb6603d3b809637b11713487af9e6"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:583c57fc30cc1fec360e66323aadd7fc3edeec01289bfafc35d3b9dcb29495e4"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26351cc14507bdf466b5f99aba3df3143a59da75799bf64a53a3ad3155ecded9"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829df14d656b3fb87e50ae8b48253a8851c707da9f30d45aacab2aa2ba2d614"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42a4bdcf7307b86cb863b2fb9bb55029b422d8f86276a50487982d99eed7c6e"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04d461ad0aebf696f8da13c99bc1b3e06f66ecf6cfd56254cc402f6385231c06"}, + {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6375923c5f19888c9226582a124b77b622f8fd0018b843c45eeb19d9701c403"}, + {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cec323a853c24fd0472517113768c92ae0be8f8c384ef4441d3632da8baa646"}, + {file = "jiter-0.5.0-cp311-none-win32.whl", hash = "sha256:aa1db0967130b5cab63dfe4d6ff547c88b2a394c3410db64744d491df7f069bb"}, + {file = "jiter-0.5.0-cp311-none-win_amd64.whl", hash = "sha256:aa9d2b85b2ed7dc7697597dcfaac66e63c1b3028652f751c81c65a9f220899ae"}, + {file = "jiter-0.5.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9f664e7351604f91dcdd557603c57fc0d551bc65cc0a732fdacbf73ad335049a"}, + {file = "jiter-0.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:044f2f1148b5248ad2c8c3afb43430dccf676c5a5834d2f5089a4e6c5bbd64df"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:702e3520384c88b6e270c55c772d4bd6d7b150608dcc94dea87ceba1b6391248"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:528d742dcde73fad9d63e8242c036ab4a84389a56e04efd854062b660f559544"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cf80e5fe6ab582c82f0c3331df27a7e1565e2dcf06265afd5173d809cdbf9ba"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:44dfc9ddfb9b51a5626568ef4e55ada462b7328996294fe4d36de02fce42721f"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c451f7922992751a936b96c5f5b9bb9312243d9b754c34b33d0cb72c84669f4e"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:308fce789a2f093dca1ff91ac391f11a9f99c35369117ad5a5c6c4903e1b3e3a"}, + {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7f5ad4a7c6b0d90776fdefa294f662e8a86871e601309643de30bf94bb93a64e"}, + {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ea189db75f8eca08807d02ae27929e890c7d47599ce3d0a6a5d41f2419ecf338"}, + {file = "jiter-0.5.0-cp312-none-win32.whl", hash = "sha256:e3bbe3910c724b877846186c25fe3c802e105a2c1fc2b57d6688b9f8772026e4"}, + {file = "jiter-0.5.0-cp312-none-win_amd64.whl", hash = "sha256:a586832f70c3f1481732919215f36d41c59ca080fa27a65cf23d9490e75b2ef5"}, + {file = "jiter-0.5.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f04bc2fc50dc77be9d10f73fcc4e39346402ffe21726ff41028f36e179b587e6"}, + {file = "jiter-0.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6f433a4169ad22fcb550b11179bb2b4fd405de9b982601914ef448390b2954f3"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad4a6398c85d3a20067e6c69890ca01f68659da94d74c800298581724e426c7e"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6baa88334e7af3f4d7a5c66c3a63808e5efbc3698a1c57626541ddd22f8e4fbf"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ece0a115c05efca597c6d938f88c9357c843f8c245dbbb53361a1c01afd7148"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:335942557162ad372cc367ffaf93217117401bf930483b4b3ebdb1223dbddfa7"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649b0ee97a6e6da174bffcb3c8c051a5935d7d4f2f52ea1583b5b3e7822fbf14"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4be354c5de82157886ca7f5925dbda369b77344b4b4adf2723079715f823989"}, + {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5206144578831a6de278a38896864ded4ed96af66e1e63ec5dd7f4a1fce38a3a"}, + {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8120c60f8121ac3d6f072b97ef0e71770cc72b3c23084c72c4189428b1b1d3b6"}, + {file = "jiter-0.5.0-cp38-none-win32.whl", hash = "sha256:6f1223f88b6d76b519cb033a4d3687ca157c272ec5d6015c322fc5b3074d8a5e"}, + {file = "jiter-0.5.0-cp38-none-win_amd64.whl", hash = "sha256:c59614b225d9f434ea8fc0d0bec51ef5fa8c83679afedc0433905994fb36d631"}, + {file = "jiter-0.5.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0af3838cfb7e6afee3f00dc66fa24695199e20ba87df26e942820345b0afc566"}, + {file = "jiter-0.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:550b11d669600dbc342364fd4adbe987f14d0bbedaf06feb1b983383dcc4b961"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:489875bf1a0ffb3cb38a727b01e6673f0f2e395b2aad3c9387f94187cb214bbf"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b250ca2594f5599ca82ba7e68785a669b352156260c5362ea1b4e04a0f3e2389"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ea18e01f785c6667ca15407cd6dabbe029d77474d53595a189bdc813347218e"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:462a52be85b53cd9bffd94e2d788a09984274fe6cebb893d6287e1c296d50653"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92cc68b48d50fa472c79c93965e19bd48f40f207cb557a8346daa020d6ba973b"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1c834133e59a8521bc87ebcad773608c6fa6ab5c7a022df24a45030826cf10bc"}, + {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab3a71ff31cf2d45cb216dc37af522d335211f3a972d2fe14ea99073de6cb104"}, + {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cccd3af9c48ac500c95e1bcbc498020c87e1781ff0345dd371462d67b76643eb"}, + {file = "jiter-0.5.0-cp39-none-win32.whl", hash = "sha256:368084d8d5c4fc40ff7c3cc513c4f73e02c85f6009217922d0823a48ee7adf61"}, + {file = "jiter-0.5.0-cp39-none-win_amd64.whl", hash = "sha256:ce03f7b4129eb72f1687fa11300fbf677b02990618428934662406d2a76742a1"}, + {file = "jiter-0.5.0.tar.gz", hash = "sha256:1d916ba875bcab5c5f7d927df998c4cb694d27dceddf3392e58beaf10563368a"}, +] + [[package]] name = "marshmallow" -version = "3.21.1" +version = "3.21.3" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" files = [ - {file = "marshmallow-3.21.1-py3-none-any.whl", hash = "sha256:f085493f79efb0644f270a9bf2892843142d80d7174bbbd2f3713f2a589dc633"}, - {file = "marshmallow-3.21.1.tar.gz", hash = "sha256:4e65e9e0d80fc9e609574b9983cf32579f305c718afb30d7233ab818571768c3"}, + {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, + {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, ] [package.dependencies] @@ -470,7 +574,7 @@ packaging = ">=17.0" [package.extras] dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.2.6)", "sphinx-issues (==4.0.0)", "sphinx-version-warning (==1.1.2)"] +docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] tests = ["pytest", "pytz", "simplejson"] [[package]] @@ -574,44 +678,44 @@ files = [ [[package]] name = "mypy" -version = "1.9.0" +version = "1.11.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, + {file = "mypy-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a32fc80b63de4b5b3e65f4be82b4cfa362a46702672aa6a0f443b4689af7008c"}, + {file = "mypy-1.11.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1952f5ea8a5a959b05ed5f16452fddadbaae48b5d39235ab4c3fc444d5fd411"}, + {file = "mypy-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1e30dc3bfa4e157e53c1d17a0dad20f89dc433393e7702b813c10e200843b03"}, + {file = "mypy-1.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2c63350af88f43a66d3dfeeeb8d77af34a4f07d760b9eb3a8697f0386c7590b4"}, + {file = "mypy-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:a831671bad47186603872a3abc19634f3011d7f83b083762c942442d51c58d58"}, + {file = "mypy-1.11.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7b6343d338390bb946d449677726edf60102a1c96079b4f002dedff375953fc5"}, + {file = "mypy-1.11.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4fe9f4e5e521b458d8feb52547f4bade7ef8c93238dfb5bbc790d9ff2d770ca"}, + {file = "mypy-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:886c9dbecc87b9516eff294541bf7f3655722bf22bb898ee06985cd7269898de"}, + {file = "mypy-1.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fca4a60e1dd9fd0193ae0067eaeeb962f2d79e0d9f0f66223a0682f26ffcc809"}, + {file = "mypy-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:0bd53faf56de9643336aeea1c925012837432b5faf1701ccca7fde70166ccf72"}, + {file = "mypy-1.11.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f39918a50f74dc5969807dcfaecafa804fa7f90c9d60506835036cc1bc891dc8"}, + {file = "mypy-1.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bc71d1fb27a428139dd78621953effe0d208aed9857cb08d002280b0422003a"}, + {file = "mypy-1.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b868d3bcff720dd7217c383474008ddabaf048fad8d78ed948bb4b624870a417"}, + {file = "mypy-1.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a707ec1527ffcdd1c784d0924bf5cb15cd7f22683b919668a04d2b9c34549d2e"}, + {file = "mypy-1.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:64f4a90e3ea07f590c5bcf9029035cf0efeae5ba8be511a8caada1a4893f5525"}, + {file = "mypy-1.11.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:749fd3213916f1751fff995fccf20c6195cae941dc968f3aaadf9bb4e430e5a2"}, + {file = "mypy-1.11.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b639dce63a0b19085213ec5fdd8cffd1d81988f47a2dec7100e93564f3e8fb3b"}, + {file = "mypy-1.11.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c956b49c5d865394d62941b109728c5c596a415e9c5b2be663dd26a1ff07bc0"}, + {file = "mypy-1.11.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45df906e8b6804ef4b666af29a87ad9f5921aad091c79cc38e12198e220beabd"}, + {file = "mypy-1.11.1-cp38-cp38-win_amd64.whl", hash = "sha256:d44be7551689d9d47b7abc27c71257adfdb53f03880841a5db15ddb22dc63edb"}, + {file = "mypy-1.11.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2684d3f693073ab89d76da8e3921883019ea8a3ec20fa5d8ecca6a2db4c54bbe"}, + {file = "mypy-1.11.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79c07eb282cb457473add5052b63925e5cc97dfab9812ee65a7c7ab5e3cb551c"}, + {file = "mypy-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11965c2f571ded6239977b14deebd3f4c3abd9a92398712d6da3a772974fad69"}, + {file = "mypy-1.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a2b43895a0f8154df6519706d9bca8280cda52d3d9d1514b2d9c3e26792a0b74"}, + {file = "mypy-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:1a81cf05975fd61aec5ae16501a091cfb9f605dc3e3c878c0da32f250b74760b"}, + {file = "mypy-1.11.1-py3-none-any.whl", hash = "sha256:0624bdb940255d2dd24e829d99a13cfeb72e4e9031f9492148f410ed30bcab54"}, + {file = "mypy-1.11.1.tar.gz", hash = "sha256:f404a0b069709f18bbdb702eb3dcfe51910602995de00bd39cea3050b5772d08"}, ] [package.dependencies] mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -632,141 +736,157 @@ files = [ [[package]] name = "numpy" -version = "1.26.4" +version = "2.0.1" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, - {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, - {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, - {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, - {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, + {file = "numpy-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fbb536eac80e27a2793ffd787895242b7f18ef792563d742c2d673bfcb75134"}, + {file = "numpy-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:69ff563d43c69b1baba77af455dd0a839df8d25e8590e79c90fcbe1499ebde42"}, + {file = "numpy-2.0.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:1b902ce0e0a5bb7704556a217c4f63a7974f8f43e090aff03fcf262e0b135e02"}, + {file = "numpy-2.0.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:f1659887361a7151f89e79b276ed8dff3d75877df906328f14d8bb40bb4f5101"}, + {file = "numpy-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4658c398d65d1b25e1760de3157011a80375da861709abd7cef3bad65d6543f9"}, + {file = "numpy-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4127d4303b9ac9f94ca0441138acead39928938660ca58329fe156f84b9f3015"}, + {file = "numpy-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e5eeca8067ad04bc8a2a8731183d51d7cbaac66d86085d5f4766ee6bf19c7f87"}, + {file = "numpy-2.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9adbd9bb520c866e1bfd7e10e1880a1f7749f1f6e5017686a5fbb9b72cf69f82"}, + {file = "numpy-2.0.1-cp310-cp310-win32.whl", hash = "sha256:7b9853803278db3bdcc6cd5beca37815b133e9e77ff3d4733c247414e78eb8d1"}, + {file = "numpy-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81b0893a39bc5b865b8bf89e9ad7807e16717f19868e9d234bdaf9b1f1393868"}, + {file = "numpy-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75b4e316c5902d8163ef9d423b1c3f2f6252226d1aa5cd8a0a03a7d01ffc6268"}, + {file = "numpy-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6e4eeb6eb2fced786e32e6d8df9e755ce5be920d17f7ce00bc38fcde8ccdbf9e"}, + {file = "numpy-2.0.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:a1e01dcaab205fbece13c1410253a9eea1b1c9b61d237b6fa59bcc46e8e89343"}, + {file = "numpy-2.0.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:a8fc2de81ad835d999113ddf87d1ea2b0f4704cbd947c948d2f5513deafe5a7b"}, + {file = "numpy-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a3d94942c331dd4e0e1147f7a8699a4aa47dffc11bf8a1523c12af8b2e91bbe"}, + {file = "numpy-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15eb4eca47d36ec3f78cde0a3a2ee24cf05ca7396ef808dda2c0ddad7c2bde67"}, + {file = "numpy-2.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b83e16a5511d1b1f8a88cbabb1a6f6a499f82c062a4251892d9ad5d609863fb7"}, + {file = "numpy-2.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f87fec1f9bc1efd23f4227becff04bd0e979e23ca50cc92ec88b38489db3b55"}, + {file = "numpy-2.0.1-cp311-cp311-win32.whl", hash = "sha256:36d3a9405fd7c511804dc56fc32974fa5533bdeb3cd1604d6b8ff1d292b819c4"}, + {file = "numpy-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:08458fbf403bff5e2b45f08eda195d4b0c9b35682311da5a5a0a0925b11b9bd8"}, + {file = "numpy-2.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bf4e6f4a2a2e26655717a1983ef6324f2664d7011f6ef7482e8c0b3d51e82ac"}, + {file = "numpy-2.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6fddc5fe258d3328cd8e3d7d3e02234c5d70e01ebe377a6ab92adb14039cb4"}, + {file = "numpy-2.0.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5daab361be6ddeb299a918a7c0864fa8618af66019138263247af405018b04e1"}, + {file = "numpy-2.0.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:ea2326a4dca88e4a274ba3a4405eb6c6467d3ffbd8c7d38632502eaae3820587"}, + {file = "numpy-2.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:529af13c5f4b7a932fb0e1911d3a75da204eff023ee5e0e79c1751564221a5c8"}, + {file = "numpy-2.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6790654cb13eab303d8402354fabd47472b24635700f631f041bd0b65e37298a"}, + {file = "numpy-2.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cbab9fc9c391700e3e1287666dfd82d8666d10e69a6c4a09ab97574c0b7ee0a7"}, + {file = "numpy-2.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:99d0d92a5e3613c33a5f01db206a33f8fdf3d71f2912b0de1739894668b7a93b"}, + {file = "numpy-2.0.1-cp312-cp312-win32.whl", hash = "sha256:173a00b9995f73b79eb0191129f2455f1e34c203f559dd118636858cc452a1bf"}, + {file = "numpy-2.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:bb2124fdc6e62baae159ebcfa368708867eb56806804d005860b6007388df171"}, + {file = "numpy-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bfc085b28d62ff4009364e7ca34b80a9a080cbd97c2c0630bb5f7f770dae9414"}, + {file = "numpy-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8fae4ebbf95a179c1156fab0b142b74e4ba4204c87bde8d3d8b6f9c34c5825ef"}, + {file = "numpy-2.0.1-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:72dc22e9ec8f6eaa206deb1b1355eb2e253899d7347f5e2fae5f0af613741d06"}, + {file = "numpy-2.0.1-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:ec87f5f8aca726117a1c9b7083e7656a9d0d606eec7299cc067bb83d26f16e0c"}, + {file = "numpy-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f682ea61a88479d9498bf2091fdcd722b090724b08b31d63e022adc063bad59"}, + {file = "numpy-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8efc84f01c1cd7e34b3fb310183e72fcdf55293ee736d679b6d35b35d80bba26"}, + {file = "numpy-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3fdabe3e2a52bc4eff8dc7a5044342f8bd9f11ef0934fcd3289a788c0eb10018"}, + {file = "numpy-2.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:24a0e1befbfa14615b49ba9659d3d8818a0f4d8a1c5822af8696706fbda7310c"}, + {file = "numpy-2.0.1-cp39-cp39-win32.whl", hash = "sha256:f9cf5ea551aec449206954b075db819f52adc1638d46a6738253a712d553c7b4"}, + {file = "numpy-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:e9e81fa9017eaa416c056e5d9e71be93d05e2c3c2ab308d23307a8bc4443c368"}, + {file = "numpy-2.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:61728fba1e464f789b11deb78a57805c70b2ed02343560456190d0501ba37b0f"}, + {file = "numpy-2.0.1-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:12f5d865d60fb9734e60a60f1d5afa6d962d8d4467c120a1c0cda6eb2964437d"}, + {file = "numpy-2.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eacf3291e263d5a67d8c1a581a8ebbcfd6447204ef58828caf69a5e3e8c75990"}, + {file = "numpy-2.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2c3a346ae20cfd80b6cfd3e60dc179963ef2ea58da5ec074fd3d9e7a1e7ba97f"}, + {file = "numpy-2.0.1.tar.gz", hash = "sha256:485b87235796410c3519a699cfe1faab097e509e90ebb05dcd098db2ae87e7b3"}, ] [[package]] name = "openai" -version = "1.16.2" +version = "1.40.3" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.16.2-py3-none-any.whl", hash = "sha256:46a435380921e42dae218d04d6dd0e89a30d7f3b9d8a778d5887f78003cf9354"}, - {file = "openai-1.16.2.tar.gz", hash = "sha256:c93d5efe5b73b6cb72c4cd31823852d2e7c84a138c0af3cbe4a8eb32b1164ab2"}, + {file = "openai-1.40.3-py3-none-any.whl", hash = "sha256:09396cb6e2e15c921a5d872bf92841a60a9425da10dcd962b45fe7c4f48f8395"}, + {file = "openai-1.40.3.tar.gz", hash = "sha256:f2ffe907618240938c59d7ccc67dd01dc8c50be203c0077240db6758d2f02480"}, ] [package.dependencies] anyio = ">=3.5.0,<5" distro = ">=1.7.0,<2" httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" pydantic = ">=1.9.0,<3" sniffio = "*" tqdm = ">4" -typing-extensions = ">=4.7,<5" +typing-extensions = ">=4.11,<5" [package.extras] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "orjson" -version = "3.10.0" +version = "3.10.7" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.0-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:47af5d4b850a2d1328660661f0881b67fdbe712aea905dadd413bdea6f792c33"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c90681333619d78360d13840c7235fdaf01b2b129cb3a4f1647783b1971542b6"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:400c5b7c4222cb27b5059adf1fb12302eebcabf1978f33d0824aa5277ca899bd"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5dcb32e949eae80fb335e63b90e5808b4b0f64e31476b3777707416b41682db5"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7d507c7493252c0a0264b5cc7e20fa2f8622b8a83b04d819b5ce32c97cf57b"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e286a51def6626f1e0cc134ba2067dcf14f7f4b9550f6dd4535fd9d79000040b"}, - {file = "orjson-3.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8acd4b82a5f3a3ec8b1dc83452941d22b4711964c34727eb1e65449eead353ca"}, - {file = "orjson-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:30707e646080dd3c791f22ce7e4a2fc2438765408547c10510f1f690bd336217"}, - {file = "orjson-3.10.0-cp310-none-win32.whl", hash = "sha256:115498c4ad34188dcb73464e8dc80e490a3e5e88a925907b6fedcf20e545001a"}, - {file = "orjson-3.10.0-cp310-none-win_amd64.whl", hash = "sha256:6735dd4a5a7b6df00a87d1d7a02b84b54d215fb7adac50dd24da5997ffb4798d"}, - {file = "orjson-3.10.0-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9587053e0cefc284e4d1cd113c34468b7d3f17666d22b185ea654f0775316a26"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bef1050b1bdc9ea6c0d08468e3e61c9386723633b397e50b82fda37b3563d72"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d16c6963ddf3b28c0d461641517cd312ad6b3cf303d8b87d5ef3fa59d6844337"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4251964db47ef090c462a2d909f16c7c7d5fe68e341dabce6702879ec26d1134"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73bbbdc43d520204d9ef0817ac03fa49c103c7f9ea94f410d2950755be2c349c"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:414e5293b82373606acf0d66313aecb52d9c8c2404b1900683eb32c3d042dbd7"}, - {file = "orjson-3.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:feaed5bb09877dc27ed0d37f037ddef6cb76d19aa34b108db270d27d3d2ef747"}, - {file = "orjson-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5127478260db640323cea131ee88541cb1a9fbce051f0b22fa2f0892f44da302"}, - {file = "orjson-3.10.0-cp311-none-win32.whl", hash = "sha256:b98345529bafe3c06c09996b303fc0a21961820d634409b8639bc16bd4f21b63"}, - {file = "orjson-3.10.0-cp311-none-win_amd64.whl", hash = "sha256:658ca5cee3379dd3d37dbacd43d42c1b4feee99a29d847ef27a1cb18abdfb23f"}, - {file = "orjson-3.10.0-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4329c1d24fd130ee377e32a72dc54a3c251e6706fccd9a2ecb91b3606fddd998"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef0f19fdfb6553342b1882f438afd53c7cb7aea57894c4490c43e4431739c700"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4f60db24161534764277f798ef53b9d3063092f6d23f8f962b4a97edfa997a0"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1de3fd5c7b208d836f8ecb4526995f0d5877153a4f6f12f3e9bf11e49357de98"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f93e33f67729d460a177ba285002035d3f11425ed3cebac5f6ded4ef36b28344"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:237ba922aef472761acd697eef77fef4831ab769a42e83c04ac91e9f9e08fa0e"}, - {file = "orjson-3.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98c1bfc6a9bec52bc8f0ab9b86cc0874b0299fccef3562b793c1576cf3abb570"}, - {file = "orjson-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:30d795a24be16c03dca0c35ca8f9c8eaaa51e3342f2c162d327bd0225118794a"}, - {file = "orjson-3.10.0-cp312-none-win32.whl", hash = "sha256:6a3f53dc650bc860eb26ec293dfb489b2f6ae1cbfc409a127b01229980e372f7"}, - {file = "orjson-3.10.0-cp312-none-win_amd64.whl", hash = "sha256:983db1f87c371dc6ffc52931eb75f9fe17dc621273e43ce67bee407d3e5476e9"}, - {file = "orjson-3.10.0-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9a667769a96a72ca67237224a36faf57db0c82ab07d09c3aafc6f956196cfa1b"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade1e21dfde1d37feee8cf6464c20a2f41fa46c8bcd5251e761903e46102dc6b"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:23c12bb4ced1c3308eff7ba5c63ef8f0edb3e4c43c026440247dd6c1c61cea4b"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2d014cf8d4dc9f03fc9f870de191a49a03b1bcda51f2a957943fb9fafe55aac"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eadecaa16d9783affca33597781328e4981b048615c2ddc31c47a51b833d6319"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd583341218826f48bd7c6ebf3310b4126216920853cbc471e8dbeaf07b0b80e"}, - {file = "orjson-3.10.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:90bfc137c75c31d32308fd61951d424424426ddc39a40e367704661a9ee97095"}, - {file = "orjson-3.10.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13b5d3c795b09a466ec9fcf0bd3ad7b85467d91a60113885df7b8d639a9d374b"}, - {file = "orjson-3.10.0-cp38-none-win32.whl", hash = "sha256:5d42768db6f2ce0162544845facb7c081e9364a5eb6d2ef06cd17f6050b048d8"}, - {file = "orjson-3.10.0-cp38-none-win_amd64.whl", hash = "sha256:33e6655a2542195d6fd9f850b428926559dee382f7a862dae92ca97fea03a5ad"}, - {file = "orjson-3.10.0-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4050920e831a49d8782a1720d3ca2f1c49b150953667eed6e5d63a62e80f46a2"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1897aa25a944cec774ce4a0e1c8e98fb50523e97366c637b7d0cddabc42e6643"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9bf565a69e0082ea348c5657401acec3cbbb31564d89afebaee884614fba36b4"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b6ebc17cfbbf741f5c1a888d1854354536f63d84bee537c9a7c0335791bb9009"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2817877d0b69f78f146ab305c5975d0618df41acf8811249ee64231f5953fee"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57d017863ec8aa4589be30a328dacd13c2dc49de1c170bc8d8c8a98ece0f2925"}, - {file = "orjson-3.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:22c2f7e377ac757bd3476ecb7480c8ed79d98ef89648f0176deb1da5cd014eb7"}, - {file = "orjson-3.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e62ba42bfe64c60c1bc84799944f80704e996592c6b9e14789c8e2a303279912"}, - {file = "orjson-3.10.0-cp39-none-win32.whl", hash = "sha256:60c0b1bdbccd959ebd1575bd0147bd5e10fc76f26216188be4a36b691c937077"}, - {file = "orjson-3.10.0-cp39-none-win_amd64.whl", hash = "sha256:175a41500ebb2fdf320bf78e8b9a75a1279525b62ba400b2b2444e274c2c8bee"}, - {file = "orjson-3.10.0.tar.gz", hash = "sha256:ba4d8cac5f2e2cff36bea6b6481cdb92b38c202bcec603d6f5ff91960595a1ed"}, + {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf6ba8ebc8ef5792e2337fb0419f8009729335bb400ece005606336b7fd7bab7"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7cf6222b29fbda9e3a472b41e6a5538b48f2c8f99261eecd60aafbdb60690c"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de817e2f5fc75a9e7dd350c4b0f54617b280e26d1631811a43e7e968fa71e3e9"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348bdd16b32556cf8d7257b17cf2bdb7ab7976af4af41ebe79f9796c218f7e91"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:479fd0844ddc3ca77e0fd99644c7fe2de8e8be1efcd57705b5c92e5186e8a250"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fdf5197a21dd660cf19dfd2a3ce79574588f8f5e2dbf21bda9ee2d2b46924d84"}, + {file = "orjson-3.10.7-cp310-none-win32.whl", hash = "sha256:d374d36726746c81a49f3ff8daa2898dccab6596864ebe43d50733275c629175"}, + {file = "orjson-3.10.7-cp310-none-win_amd64.whl", hash = "sha256:cb61938aec8b0ffb6eef484d480188a1777e67b05d58e41b435c74b9d84e0b9c"}, + {file = "orjson-3.10.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7db8539039698ddfb9a524b4dd19508256107568cdad24f3682d5773e60504a2"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:480f455222cb7a1dea35c57a67578848537d2602b46c464472c995297117fa09"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a9c9b168b3a19e37fe2778c0003359f07822c90fdff8f98d9d2a91b3144d8e0"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8de062de550f63185e4c1c54151bdddfc5625e37daf0aa1e75d2a1293e3b7d9a"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b0dd04483499d1de9c8f6203f8975caf17a6000b9c0c54630cef02e44ee624e"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b58d3795dafa334fc8fd46f7c5dc013e6ad06fd5b9a4cc98cb1456e7d3558bd6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33cfb96c24034a878d83d1a9415799a73dc77480e6c40417e5dda0710d559ee6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e724cebe1fadc2b23c6f7415bad5ee6239e00a69f30ee423f319c6af70e2a5c0"}, + {file = "orjson-3.10.7-cp311-none-win32.whl", hash = "sha256:82763b46053727a7168d29c772ed5c870fdae2f61aa8a25994c7984a19b1021f"}, + {file = "orjson-3.10.7-cp311-none-win_amd64.whl", hash = "sha256:eb8d384a24778abf29afb8e41d68fdd9a156cf6e5390c04cc07bbc24b89e98b5"}, + {file = "orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bdbb61dcc365dd9be94e8f7df91975edc9364d6a78c8f7adb69c1cdff318ec93"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b48b3db6bb6e0a08fa8c83b47bc169623f801e5cc4f24442ab2b6617da3b5313"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23820a1563a1d386414fef15c249040042b8e5d07b40ab3fe3efbfbbcbcb8864"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d352ee8ac1926d6193f602cbe36b1643bbd1bbcb25e3c1a657a4390f3000c9a5"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2d9f990623f15c0ae7ac608103c33dfe1486d2ed974ac3f40b693bad1a22a7b"}, + {file = "orjson-3.10.7-cp312-none-win32.whl", hash = "sha256:7c4c17f8157bd520cdb7195f75ddbd31671997cbe10aee559c2d613592e7d7eb"}, + {file = "orjson-3.10.7-cp312-none-win_amd64.whl", hash = "sha256:1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1"}, + {file = "orjson-3.10.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:77d325ed866876c0fa6492598ec01fe30e803272a6e8b10e992288b009cbe149"}, + {file = "orjson-3.10.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ea2c232deedcb605e853ae1db2cc94f7390ac776743b699b50b071b02bea6fe"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3dcfbede6737fdbef3ce9c37af3fb6142e8e1ebc10336daa05872bfb1d87839c"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11748c135f281203f4ee695b7f80bb1358a82a63905f9f0b794769483ea854ad"}, + {file = "orjson-3.10.7-cp313-none-win32.whl", hash = "sha256:a7e19150d215c7a13f39eb787d84db274298d3f83d85463e61d277bbd7f401d2"}, + {file = "orjson-3.10.7-cp313-none-win_amd64.whl", hash = "sha256:eef44224729e9525d5261cc8d28d6b11cafc90e6bd0be2157bde69a52ec83024"}, + {file = "orjson-3.10.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6ea2b2258eff652c82652d5e0f02bd5e0463a6a52abb78e49ac288827aaa1469"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:430ee4d85841e1483d487e7b81401785a5dfd69db5de01314538f31f8fbf7ee1"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b6146e439af4c2472c56f8540d799a67a81226e11992008cb47e1267a9b3225"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:084e537806b458911137f76097e53ce7bf5806dda33ddf6aaa66a028f8d43a23"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829cf2195838e3f93b70fd3b4292156fc5e097aac3739859ac0dcc722b27ac0"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1193b2416cbad1a769f868b1749535d5da47626ac29445803dae7cc64b3f5c98"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4e6c3da13e5a57e4b3dca2de059f243ebec705857522f188f0180ae88badd354"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c31008598424dfbe52ce8c5b47e0752dca918a4fdc4a2a32004efd9fab41d866"}, + {file = "orjson-3.10.7-cp38-none-win32.whl", hash = "sha256:7122a99831f9e7fe977dc45784d3b2edc821c172d545e6420c375e5a935f5a1c"}, + {file = "orjson-3.10.7-cp38-none-win_amd64.whl", hash = "sha256:a763bc0e58504cc803739e7df040685816145a6f3c8a589787084b54ebc9f16e"}, + {file = "orjson-3.10.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e76be12658a6fa376fcd331b1ea4e58f5a06fd0220653450f0d415b8fd0fbe20"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed350d6978d28b92939bfeb1a0570c523f6170efc3f0a0ef1f1df287cd4f4960"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144888c76f8520e39bfa121b31fd637e18d4cc2f115727865fdf9fa325b10412"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09b2d92fd95ad2402188cf51573acde57eb269eddabaa60f69ea0d733e789fe9"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b24a579123fa884f3a3caadaed7b75eb5715ee2b17ab5c66ac97d29b18fe57f"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591bcfe7512353bd609875ab38050efe3d55e18934e2f18950c108334b4ff"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f4db56635b58cd1a200b0a23744ff44206ee6aa428185e2b6c4a65b3197abdcd"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0fa5886854673222618638c6df7718ea7fe2f3f2384c452c9ccedc70b4a510a5"}, + {file = "orjson-3.10.7-cp39-none-win32.whl", hash = "sha256:8272527d08450ab16eb405f47e0f4ef0e5ff5981c3d82afe0efd25dcbef2bcd2"}, + {file = "orjson-3.10.7-cp39-none-win_amd64.whl", hash = "sha256:974683d4618c0c7dbf4f69c95a979734bf183d0658611760017f6e70a145af58"}, + {file = "orjson-3.10.7.tar.gz", hash = "sha256:75ef0640403f945f3a1f9f6400686560dbfb0fb5b16589ad62cd477043c4eee3"}, ] [[package]] name = "packaging" -version = "24.0" +version = "24.1" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, - {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] [[package]] @@ -797,28 +917,29 @@ files = [ [[package]] name = "platformdirs" -version = "4.2.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, - {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] @@ -855,55 +976,126 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "pydantic" -version = "1.10.15" -description = "Data validation and settings management using python type hints" +version = "2.8.2" +description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-1.10.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22ed12ee588b1df028a2aa5d66f07bf8f8b4c8579c2e96d5a9c1f96b77f3bb55"}, - {file = "pydantic-1.10.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:75279d3cac98186b6ebc2597b06bcbc7244744f6b0b44a23e4ef01e5683cc0d2"}, - {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50f1666a9940d3d68683c9d96e39640f709d7a72ff8702987dab1761036206bb"}, - {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82790d4753ee5d00739d6cb5cf56bceb186d9d6ce134aca3ba7befb1eedbc2c8"}, - {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:d207d5b87f6cbefbdb1198154292faee8017d7495a54ae58db06762004500d00"}, - {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e49db944fad339b2ccb80128ffd3f8af076f9f287197a480bf1e4ca053a866f0"}, - {file = "pydantic-1.10.15-cp310-cp310-win_amd64.whl", hash = "sha256:d3b5c4cbd0c9cb61bbbb19ce335e1f8ab87a811f6d589ed52b0254cf585d709c"}, - {file = "pydantic-1.10.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c3d5731a120752248844676bf92f25a12f6e45425e63ce22e0849297a093b5b0"}, - {file = "pydantic-1.10.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c365ad9c394f9eeffcb30a82f4246c0006417f03a7c0f8315d6211f25f7cb654"}, - {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3287e1614393119c67bd4404f46e33ae3be3ed4cd10360b48d0a4459f420c6a3"}, - {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be51dd2c8596b25fe43c0a4a59c2bee4f18d88efb8031188f9e7ddc6b469cf44"}, - {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6a51a1dd4aa7b3f1317f65493a182d3cff708385327c1c82c81e4a9d6d65b2e4"}, - {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4e316e54b5775d1eb59187f9290aeb38acf620e10f7fd2f776d97bb788199e53"}, - {file = "pydantic-1.10.15-cp311-cp311-win_amd64.whl", hash = "sha256:0d142fa1b8f2f0ae11ddd5e3e317dcac060b951d605fda26ca9b234b92214986"}, - {file = "pydantic-1.10.15-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7ea210336b891f5ea334f8fc9f8f862b87acd5d4a0cbc9e3e208e7aa1775dabf"}, - {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3453685ccd7140715e05f2193d64030101eaad26076fad4e246c1cc97e1bb30d"}, - {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bea1f03b8d4e8e86702c918ccfd5d947ac268f0f0cc6ed71782e4b09353b26f"}, - {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:005655cabc29081de8243126e036f2065bd7ea5b9dff95fde6d2c642d39755de"}, - {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:af9850d98fc21e5bc24ea9e35dd80a29faf6462c608728a110c0a30b595e58b7"}, - {file = "pydantic-1.10.15-cp37-cp37m-win_amd64.whl", hash = "sha256:d31ee5b14a82c9afe2bd26aaa405293d4237d0591527d9129ce36e58f19f95c1"}, - {file = "pydantic-1.10.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5e09c19df304b8123938dc3c53d3d3be6ec74b9d7d0d80f4f4b5432ae16c2022"}, - {file = "pydantic-1.10.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7ac9237cd62947db00a0d16acf2f3e00d1ae9d3bd602b9c415f93e7a9fc10528"}, - {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:584f2d4c98ffec420e02305cf675857bae03c9d617fcfdc34946b1160213a948"}, - {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbc6989fad0c030bd70a0b6f626f98a862224bc2b1e36bfc531ea2facc0a340c"}, - {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d573082c6ef99336f2cb5b667b781d2f776d4af311574fb53d908517ba523c22"}, - {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6bd7030c9abc80134087d8b6e7aa957e43d35714daa116aced57269a445b8f7b"}, - {file = "pydantic-1.10.15-cp38-cp38-win_amd64.whl", hash = "sha256:3350f527bb04138f8aff932dc828f154847fbdc7a1a44c240fbfff1b57f49a12"}, - {file = "pydantic-1.10.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:51d405b42f1b86703555797270e4970a9f9bd7953f3990142e69d1037f9d9e51"}, - {file = "pydantic-1.10.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a980a77c52723b0dc56640ced396b73a024d4b74f02bcb2d21dbbac1debbe9d0"}, - {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67f1a1fb467d3f49e1708a3f632b11c69fccb4e748a325d5a491ddc7b5d22383"}, - {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:676ed48f2c5bbad835f1a8ed8a6d44c1cd5a21121116d2ac40bd1cd3619746ed"}, - {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:92229f73400b80c13afcd050687f4d7e88de9234d74b27e6728aa689abcf58cc"}, - {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2746189100c646682eff0bce95efa7d2e203420d8e1c613dc0c6b4c1d9c1fde4"}, - {file = "pydantic-1.10.15-cp39-cp39-win_amd64.whl", hash = "sha256:394f08750bd8eaad714718812e7fab615f873b3cdd0b9d84e76e51ef3b50b6b7"}, - {file = "pydantic-1.10.15-py3-none-any.whl", hash = "sha256:28e552a060ba2740d0d2aabe35162652c1459a0b9069fe0db7f4ee0e18e74d58"}, - {file = "pydantic-1.10.15.tar.gz", hash = "sha256:ca832e124eda231a60a041da4f013e3ff24949d94a01154b137fc2f2a43c3ffb"}, + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, ] [package.dependencies] -typing-extensions = ">=4.2.0" +annotated-types = ">=0.4.0" +pydantic-core = "2.20.1" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] [package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.20.1" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pytest" @@ -929,13 +1121,13 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no [[package]] name = "pytest-asyncio" -version = "0.21.1" +version = "0.21.2" description = "Pytest support for asyncio" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"}, - {file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"}, + {file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"}, + {file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"}, ] [package.dependencies] @@ -978,6 +1170,20 @@ files = [ packaging = ">=17.1" pytest = ">=7.2" +[[package]] +name = "pytest-socket" +version = "0.7.0" +description = "Pytest Plugin to disable socket calls during tests" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "pytest_socket-0.7.0-py3-none-any.whl", hash = "sha256:7e0f4642177d55d317bbd58fc68c6bd9048d6eadb2d46a89307fa9221336ce45"}, + {file = "pytest_socket-0.7.0.tar.gz", hash = "sha256:71ab048cbbcb085c15a4423b73b619a8b35d6a307f46f78ea46be51b1b7e11b3"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + [[package]] name = "pytest-subtests" version = "0.11.0" @@ -1010,18 +1216,18 @@ watchdog = ">=2.0.0" [[package]] name = "pytest-xdist" -version = "3.5.0" +version = "3.6.1" description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytest-xdist-3.5.0.tar.gz", hash = "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a"}, - {file = "pytest_xdist-3.5.0-py3-none-any.whl", hash = "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24"}, + {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"}, + {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"}, ] [package.dependencies] -execnet = ">=1.1" -pytest = ">=6.2.0" +execnet = ">=2.1" +pytest = ">=7.0.0" [package.extras] psutil = ["psutil (>=3.0)"] @@ -1044,73 +1250,75 @@ six = ">=1.5" [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] name = "requests" -version = "2.31.0" +version = "2.32.3" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -1125,28 +1333,28 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.3.5" +version = "0.3.7" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:aef5bd3b89e657007e1be6b16553c8813b221ff6d92c7526b7e0227450981eac"}, - {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:89b1e92b3bd9fca249153a97d23f29bed3992cff414b222fcd361d763fc53f12"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e55771559c89272c3ebab23326dc23e7f813e492052391fe7950c1a5a139d89"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dabc62195bf54b8a7876add6e789caae0268f34582333cda340497c886111c39"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a05f3793ba25f194f395578579c546ca5d83e0195f992edc32e5907d142bfa3"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dfd3504e881082959b4160ab02f7a205f0fadc0a9619cc481982b6837b2fd4c0"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87258e0d4b04046cf1d6cc1c56fadbf7a880cc3de1f7294938e923234cf9e498"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:712e71283fc7d9f95047ed5f793bc019b0b0a29849b14664a60fd66c23b96da1"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532a90b4a18d3f722c124c513ffb5e5eaff0cc4f6d3aa4bda38e691b8600c9f"}, - {file = "ruff-0.3.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:122de171a147c76ada00f76df533b54676f6e321e61bd8656ae54be326c10296"}, - {file = "ruff-0.3.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d80a6b18a6c3b6ed25b71b05eba183f37d9bc8b16ace9e3d700997f00b74660b"}, - {file = "ruff-0.3.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a7b6e63194c68bca8e71f81de30cfa6f58ff70393cf45aab4c20f158227d5936"}, - {file = "ruff-0.3.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a759d33a20c72f2dfa54dae6e85e1225b8e302e8ac655773aff22e542a300985"}, - {file = "ruff-0.3.5-py3-none-win32.whl", hash = "sha256:9d8605aa990045517c911726d21293ef4baa64f87265896e491a05461cae078d"}, - {file = "ruff-0.3.5-py3-none-win_amd64.whl", hash = "sha256:dc56bb16a63c1303bd47563c60482a1512721053d93231cf7e9e1c6954395a0e"}, - {file = "ruff-0.3.5-py3-none-win_arm64.whl", hash = "sha256:faeeae9905446b975dcf6d4499dc93439b131f1443ee264055c5716dd947af55"}, - {file = "ruff-0.3.5.tar.gz", hash = "sha256:a067daaeb1dc2baf9b82a32dae67d154d95212080c80435eb052d95da647763d"}, + {file = "ruff-0.3.7-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0e8377cccb2f07abd25e84fc5b2cbe48eeb0fea9f1719cad7caedb061d70e5ce"}, + {file = "ruff-0.3.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:15a4d1cc1e64e556fa0d67bfd388fed416b7f3b26d5d1c3e7d192c897e39ba4b"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d28bdf3d7dc71dd46929fafeec98ba89b7c3550c3f0978e36389b5631b793663"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:379b67d4f49774ba679593b232dcd90d9e10f04d96e3c8ce4a28037ae473f7bb"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c060aea8ad5ef21cdfbbe05475ab5104ce7827b639a78dd55383a6e9895b7c51"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:ebf8f615dde968272d70502c083ebf963b6781aacd3079081e03b32adfe4d58a"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d48098bd8f5c38897b03604f5428901b65e3c97d40b3952e38637b5404b739a2"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8a4fda219bf9024692b1bc68c9cff4b80507879ada8769dc7e985755d662ea"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c44e0149f1d8b48c4d5c33d88c677a4aa22fd09b1683d6a7ff55b816b5d074f"}, + {file = "ruff-0.3.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3050ec0af72b709a62ecc2aca941b9cd479a7bf2b36cc4562f0033d688e44fa1"}, + {file = "ruff-0.3.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a29cc38e4c1ab00da18a3f6777f8b50099d73326981bb7d182e54a9a21bb4ff7"}, + {file = "ruff-0.3.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5b15cc59c19edca917f51b1956637db47e200b0fc5e6e1878233d3a938384b0b"}, + {file = "ruff-0.3.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e491045781b1e38b72c91247cf4634f040f8d0cb3e6d3d64d38dcf43616650b4"}, + {file = "ruff-0.3.7-py3-none-win32.whl", hash = "sha256:bc931de87593d64fad3a22e201e55ad76271f1d5bfc44e1a1887edd0903c7d9f"}, + {file = "ruff-0.3.7-py3-none-win_amd64.whl", hash = "sha256:5ef0e501e1e39f35e03c2acb1d1238c595b8bb36cf7a170e7c1df1b73da00e74"}, + {file = "ruff-0.3.7-py3-none-win_arm64.whl", hash = "sha256:789e144f6dc7019d1f92a812891c645274ed08af6037d11fc65fcbc183b7d59f"}, + {file = "ruff-0.3.7.tar.gz", hash = "sha256:d5c1aebee5162c2226784800ae031f660c350e7a3402c4d1f8ea4e97e232e3ba"}, ] [[package]] @@ -1202,13 +1410,13 @@ files = [ [[package]] name = "tqdm" -version = "4.66.3" +version = "4.66.5" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.3-py3-none-any.whl", hash = "sha256:4f41d54107ff9a223dca80b53efe4fb654c67efaba7f47bada3ee9d50e05bd53"}, - {file = "tqdm-4.66.3.tar.gz", hash = "sha256:23097a41eba115ba99ecae40d06444c15d1c0c698d527a01c6c8bd1c5d0647e5"}, + {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, + {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, ] [package.dependencies] @@ -1222,35 +1430,35 @@ telegram = ["requests"] [[package]] name = "types-psutil" -version = "5.9.5.20240316" +version = "5.9.5.20240516" description = "Typing stubs for psutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-psutil-5.9.5.20240316.tar.gz", hash = "sha256:5636f5714bb930c64bb34c4d47a59dc92f9d610b778b5364a31daa5584944848"}, - {file = "types_psutil-5.9.5.20240316-py3-none-any.whl", hash = "sha256:2fdd64ea6e97befa546938f486732624f9255fde198b55e6f00fda236f059f64"}, + {file = "types-psutil-5.9.5.20240516.tar.gz", hash = "sha256:bb296f59fc56458891d0feb1994717e548a1bcf89936a2877df8792b822b4696"}, + {file = "types_psutil-5.9.5.20240516-py3-none-any.whl", hash = "sha256:83146ded949a10167d9895e567b3b71e53ebc5e23fd8363eab62b3c76cce7b89"}, ] [[package]] name = "types-pytz" -version = "2024.1.0.20240203" +version = "2024.1.0.20240417" description = "Typing stubs for pytz" optional = false python-versions = ">=3.8" files = [ - {file = "types-pytz-2024.1.0.20240203.tar.gz", hash = "sha256:c93751ee20dfc6e054a0148f8f5227b9a00b79c90a4d3c9f464711a73179c89e"}, - {file = "types_pytz-2024.1.0.20240203-py3-none-any.whl", hash = "sha256:9679eef0365db3af91ef7722c199dbb75ee5c1b67e3c4dd7bfbeb1b8a71c21a3"}, + {file = "types-pytz-2024.1.0.20240417.tar.gz", hash = "sha256:6810c8a1f68f21fdf0f4f374a432487c77645a0ac0b31de4bf4690cf21ad3981"}, + {file = "types_pytz-2024.1.0.20240417-py3-none-any.whl", hash = "sha256:8335d443310e2db7b74e007414e74c4f53b67452c0cb0d228ca359ccfba59659"}, ] [[package]] name = "types-pyyaml" -version = "6.0.12.20240311" +version = "6.0.12.20240808" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.8" files = [ - {file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"}, - {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"}, + {file = "types-PyYAML-6.0.12.20240808.tar.gz", hash = "sha256:b8f76ddbd7f65440a8bda5526a9607e4c7a322dc2f8e1a8c405644f9a6f4b9af"}, + {file = "types_PyYAML-6.0.12.20240808-py3-none-any.whl", hash = "sha256:deda34c5c655265fc517b546c902aa6eed2ef8d3e921e4765fe606fe2afe8d35"}, ] [[package]] @@ -1269,13 +1477,13 @@ types-urllib3 = "*" [[package]] name = "types-requests" -version = "2.31.0.20240406" +version = "2.32.0.20240712" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.31.0.20240406.tar.gz", hash = "sha256:4428df33c5503945c74b3f42e82b181e86ec7b724620419a2966e2de604ce1a1"}, - {file = "types_requests-2.31.0.20240406-py3-none-any.whl", hash = "sha256:6216cdac377c6b9a040ac1c0404f7284bd13199c0e1bb235f4324627e8898cf5"}, + {file = "types-requests-2.32.0.20240712.tar.gz", hash = "sha256:90c079ff05e549f6bf50e02e910210b98b8ff1ebdd18e19c873cd237737c1358"}, + {file = "types_requests-2.32.0.20240712-py3-none-any.whl", hash = "sha256:f754283e152c752e46e70942fa2a146b5bc70393522257bb85bd1ef7e019dcc3"}, ] [package.dependencies] @@ -1283,13 +1491,13 @@ urllib3 = ">=2" [[package]] name = "types-tqdm" -version = "4.66.0.20240106" +version = "4.66.0.20240417" description = "Typing stubs for tqdm" optional = false python-versions = ">=3.8" files = [ - {file = "types-tqdm-4.66.0.20240106.tar.gz", hash = "sha256:7acf4aade5bad3ded76eb829783f9961b1c2187948eaa6dd1ae8644dff95a938"}, - {file = "types_tqdm-4.66.0.20240106-py3-none-any.whl", hash = "sha256:7459b0f441b969735685645a5d8480f7912b10d05ab45f99a2db8a8e45cb550b"}, + {file = "types-tqdm-4.66.0.20240417.tar.gz", hash = "sha256:16dce9ef522ea8d40e4f5b8d84dd8a1166eefc13ceee7a7e158bf0f1a1421a31"}, + {file = "types_tqdm-4.66.0.20240417-py3-none-any.whl", hash = "sha256:248aef1f9986b7b8c2c12b3cb4399fc17dba0a29e7e3f3f9cd704babb879383d"}, ] [[package]] @@ -1305,13 +1513,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.11.0" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] @@ -1331,13 +1539,13 @@ typing-extensions = ">=3.7.4" [[package]] name = "urllib3" -version = "1.26.18" +version = "1.26.19" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, - {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, + {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"}, + {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"}, ] [package.extras] @@ -1347,13 +1555,13 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "urllib3" -version = "2.2.1" +version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] @@ -1402,40 +1610,46 @@ tests = ["Werkzeug (==2.0.3)", "aiohttp", "boto3", "httplib2", "httpx", "pytest" [[package]] name = "watchdog" -version = "4.0.0" +version = "4.0.2" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, - {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, - {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, - {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, - {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, - {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, - {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, - {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8"}, + {file = "watchdog-4.0.2-py3-none-win32.whl", hash = "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19"}, + {file = "watchdog-4.0.2-py3-none-win_amd64.whl", hash = "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b"}, + {file = "watchdog-4.0.2-py3-none-win_ia64.whl", hash = "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c"}, + {file = "watchdog-4.0.2.tar.gz", hash = "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270"}, ] [package.extras] @@ -1629,4 +1843,4 @@ vcr = [] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "c37fcc9d809cf705f31fc6a8c2cf241fae01075b34cff613b62bd77ab049477a" +content-hash = "e062da3051244f0d59796d6659149eee4e2f46d9332714d57edd459c80b7d8cd" diff --git a/python/pyproject.toml b/python/pyproject.toml index beb690473..1242fb6d7 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.56" +version = "0.1.99" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" @@ -26,7 +26,10 @@ langsmith = "langsmith.cli.main:main" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -pydantic = ">=1,<3" +pydantic = [ + { version = ">=1,<3", python = "<3.12.4" }, + { version = "^2.7.4", python = ">=3.12.4" }, +] requests = "^2" orjson = "^3.9.14" @@ -35,7 +38,6 @@ pytest = "^7.3.1" black = ">=23.3,<25.0" mypy = "^1.9.0" ruff = "^0.3.4" -pydantic = ">=1,<2" types-requests = "^2.31.0.1" pandas-stubs = "^2.0.1.230501" types-pyyaml = "^6.0.12.10" @@ -53,10 +55,15 @@ vcrpy = "^6.0.1" fastapi = "^0.110.1" uvicorn = "^0.29.0" pytest-rerunfailures = "^14.0" +pytest-socket = "^0.7.0" [tool.poetry.group.lint.dependencies] openai = "^1.10" + +[tool.poetry.group.test.dependencies] +pytest-socket = "^0.7.0" + [tool.poetry.extras] vcr = ["vcrpy"] @@ -88,8 +95,10 @@ docstring-code-format = true docstring-code-line-length = 80 [tool.mypy] +plugins = ["pydantic.v1.mypy", "pydantic.mypy"] ignore_missing_imports = "True" disallow_untyped_defs = "True" [tool.pytest.ini_options] asyncio_mode = "auto" +markers = [ "slow: long-running tests",] diff --git a/python/tests/evaluation/test_evaluation.py b/python/tests/evaluation/test_evaluation.py index 234aeb6f0..e05f9e920 100644 --- a/python/tests/evaluation/test_evaluation.py +++ b/python/tests/evaluation/test_evaluation.py @@ -3,7 +3,7 @@ import pytest -from langsmith import Client, aevaluate, evaluate, expect, unit +from langsmith import Client, aevaluate, evaluate, expect, test from langsmith.schemas import Example, Run @@ -29,7 +29,7 @@ def precision(runs: Sequence[Run], examples: Sequence[Example]): def predict(inputs: dict) -> dict: return {"output": "Yes"} - evaluate( + results = evaluate( predict, data=dataset_name, evaluators=[accuracy], @@ -39,7 +39,12 @@ def predict(inputs: dict) -> dict: "my-prompt-version": "abcd-1234", "function": "evaluate", }, + num_repetitions=3, ) + assert len(results) == 30 + examples = client.list_examples(dataset_name=dataset_name) + for example in examples: + assert len([r for r in results if r["example"].id == example.id]) == 3 async def test_aevaluate(): @@ -65,7 +70,7 @@ async def apredict(inputs: dict) -> dict: await asyncio.sleep(0.1) return {"output": "Yes"} - await aevaluate( + results = await aevaluate( apredict, data=dataset_name, evaluators=[accuracy], @@ -76,10 +81,20 @@ async def apredict(inputs: dict) -> dict: "my-prompt-version": "abcd-1234", "function": "aevaluate", }, + num_repetitions=2, ) - - -@unit + assert len(results) == 20 + examples = client.list_examples(dataset_name=dataset_name) + all_results = [r async for r in results] + for example in examples: + count = 0 + for r in all_results: + if r["run"].reference_example_id == example.id: + count += 1 + assert count == 2 + + +@test def test_foo(): expect(3 + 4).to_equal(7) @@ -94,33 +109,33 @@ def expected_output(): return "input" -@unit(output_keys=["expected_output"]) +@test(output_keys=["expected_output"]) def test_bar(some_input: str, expected_output: str): expect(some_input).to_contain(expected_output) -@unit +@test async def test_baz(): await asyncio.sleep(0.1) expect(3 + 4).to_equal(7) return 7 -@unit +@test @pytest.mark.parametrize("x, y", [(1, 2), (2, 3)]) def test_foo_parametrized(x, y): expect(x + y).to_be_greater_than(0) return x + y -@unit(output_keys=["z"]) +@test(output_keys=["z"]) @pytest.mark.parametrize("x, y, z", [(1, 2, 3), (2, 3, 5)]) def test_bar_parametrized(x, y, z): expect(x + y).to_equal(z) return {"z": x + y} -@unit +@test(test_suite_name="tests.evaluation.test_evaluation::test_foo_async_parametrized") @pytest.mark.parametrize("x, y", [(1, 2), (2, 3)]) async def test_foo_async_parametrized(x, y): await asyncio.sleep(0.1) @@ -128,7 +143,7 @@ async def test_foo_async_parametrized(x, y): return x + y -@unit(output_keys=["z"]) +@test(output_keys=["z"]) @pytest.mark.parametrize("x, y, z", [(1, 2, 3), (2, 3, 5)]) async def test_bar_async_parametrized(x, y, z): await asyncio.sleep(0.1) @@ -136,11 +151,11 @@ async def test_bar_async_parametrized(x, y, z): return {"z": x + y} -@unit +@test def test_pytest_skip(): pytest.skip("Skip this test") -@unit +@test async def test_async_pytest_skip(): pytest.skip("Skip this test") diff --git a/python/tests/external/test_instructor_evals.py b/python/tests/external/test_instructor_evals.py index d90a53019..c56e06d92 100644 --- a/python/tests/external/test_instructor_evals.py +++ b/python/tests/external/test_instructor_evals.py @@ -8,7 +8,7 @@ from openai import AsyncOpenAI from pydantic import BaseModel -from langsmith import unit +from langsmith import test class Models(str, Enum): @@ -58,7 +58,7 @@ class ClassifySpam(BaseModel): @pytest.mark.asyncio_cooperative -@unit() +@test() @pytest.mark.parametrize("client, data", d[:3]) async def test_classification(client, data): input, expected = data diff --git a/python/tests/integration_tests/conftest.py b/python/tests/integration_tests/conftest.py new file mode 100644 index 000000000..8ad66c3d2 --- /dev/null +++ b/python/tests/integration_tests/conftest.py @@ -0,0 +1,17 @@ +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--runslow", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) diff --git a/python/tests/integration_tests/fake_server.py b/python/tests/integration_tests/fake_server.py index 93850d9da..f42f328f2 100644 --- a/python/tests/integration_tests/fake_server.py +++ b/python/tests/integration_tests/fake_server.py @@ -14,6 +14,7 @@ def fake_function(): assert parent_run is not None assert "did-propagate" in span.tags or [] assert span.metadata["some-cool-value"] == 42 + assert span.session_name == "distributed-tracing" return "Fake function response" @@ -25,6 +26,7 @@ def fake_function_two(foo: str): assert parent_run is not None assert "did-propagate" in (span.tags or []) assert span.metadata["some-cool-value"] == 42 + assert span.session_name == "distributed-tracing" return "Fake function response" @@ -36,6 +38,7 @@ def fake_function_three(foo: str): assert parent_run is not None assert "did-propagate" in (span.tags or []) assert span.metadata["some-cool-value"] == 42 + assert span.session_name == "distributed-tracing" return "Fake function response" @@ -47,8 +50,16 @@ async def fake_route(request: Request): parent=request.headers, ): fake_function() - fake_function_two("foo", langsmith_extra={"parent": request.headers}) + fake_function_two( + "foo", + langsmith_extra={ + "parent": request.headers, + "project_name": "Definitely-not-your-grandpas-project", + }, + ) - with tracing_context(parent=request.headers): + with tracing_context( + parent=request.headers, project_name="Definitely-not-your-grandpas-project" + ): fake_function_three("foo") return {"message": "Fake route response"} diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index 79c18c8ac..d939111d4 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -8,11 +8,12 @@ import sys import time from datetime import timedelta -from typing import Any, Callable, Dict, cast +from typing import Any, Callable, Dict from uuid import uuid4 import pytest from freezegun import freeze_time +from pydantic import BaseModel from langsmith.client import ID_TYPE, Client from langsmith.schemas import DataType @@ -98,36 +99,118 @@ def test_datasets(langchain_client: Client) -> None: assert updated_example_value.outputs["col2"] == "updatedExampleCol2" assert (updated_example_value.metadata or {}).get("foo") == "bar" + new_example = langchain_client.create_example( + inputs={"col1": "newAddedExampleCol1"}, + outputs={"col2": "newAddedExampleCol2"}, + dataset_id=new_dataset.id, + ) + example_value = langchain_client.read_example(new_example.id) + assert example_value.inputs is not None + assert example_value.inputs["col1"] == "newAddedExampleCol1" + assert example_value.outputs is not None + assert example_value.outputs["col2"] == "newAddedExampleCol2" + + langchain_client.update_examples( + example_ids=[new_example.id, example.id], + inputs=[{"col1": "newUpdatedExampleCol1"}, {"col1": "newNewUpdatedExampleCol"}], + outputs=[ + {"col2": "newUpdatedExampleCol2"}, + {"col2": "newNewUpdatedExampleCol2"}, + ], + metadata=[{"foo": "baz"}, {"foo": "qux"}], + ) + updated_example = langchain_client.read_example(new_example.id) + assert updated_example.id == new_example.id + assert updated_example.inputs["col1"] == "newUpdatedExampleCol1" + assert updated_example.outputs is not None + assert updated_example.outputs["col2"] == "newUpdatedExampleCol2" + assert (updated_example.metadata or {}).get("foo") == "baz" + + updated_example = langchain_client.read_example(example.id) + assert updated_example.id == example.id + assert updated_example.inputs["col1"] == "newNewUpdatedExampleCol" + assert updated_example.outputs is not None + assert updated_example.outputs["col2"] == "newNewUpdatedExampleCol2" + assert (updated_example.metadata or {}).get("foo") == "qux" + langchain_client.delete_example(example.id) examples2 = list( langchain_client.list_examples(dataset_id=new_dataset.id) # type: ignore ) - assert len(examples2) == 1 + assert len(examples2) == 2 langchain_client.delete_dataset(dataset_id=dataset_id) def test_list_examples(langchain_client: Client) -> None: """Test list_examples.""" examples = [ - ("Shut up, idiot", "Toxic"), - ("You're a wonderful person", "Not toxic"), - ("This is the worst thing ever", "Toxic"), - ("I had a great day today", "Not toxic"), - ("Nobody likes you", "Toxic"), - ("This is unacceptable. I want to speak to the manager.", "Not toxic"), + ("Shut up, idiot", "Toxic", ["train", "validation"]), + ("You're a wonderful person", "Not toxic", "test"), + ("This is the worst thing ever", "Toxic", ["train"]), + ("I had a great day today", "Not toxic", "test"), + ("Nobody likes you", "Toxic", "train"), + ("This is unacceptable. I want to speak to the manager.", "Not toxic", None), ] dataset_name = "__test_list_examples" + uuid4().hex[:4] dataset = langchain_client.create_dataset(dataset_name=dataset_name) - inputs, outputs = zip( - *[({"text": text}, {"label": label}) for text, label in examples] + inputs, outputs, splits = zip( + *[({"text": text}, {"label": label}, split) for text, label, split in examples] ) langchain_client.create_examples( - inputs=inputs, outputs=outputs, dataset_id=dataset.id + inputs=inputs, outputs=outputs, splits=splits, dataset_id=dataset.id ) example_list = list(langchain_client.list_examples(dataset_id=dataset.id)) assert len(example_list) == len(examples) + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, offset=1, limit=2) + ) + assert len(example_list) == 2 + + example_list = list(langchain_client.list_examples(dataset_id=dataset.id, offset=1)) + assert len(example_list) == len(examples) - 1 + + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["train"]) + ) + assert len(example_list) == 3 + + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["validation"]) + ) + assert len(example_list) == 1 + + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["test"]) + ) + assert len(example_list) == 2 + + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["train", "test"]) + ) + assert len(example_list) == 5 + + langchain_client.update_example( + example_id=[ + example.id + for example in example_list + if example.metadata is not None + and "test" in example.metadata.get("dataset_split", []) + ][0], + split="train", + ) + + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["test"]) + ) + assert len(example_list) == 1 + + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["train"]) + ) + assert len(example_list) == 4 + langchain_client.create_example( inputs={"text": "What's up!"}, outputs={"label": "Not toxic"}, @@ -162,6 +245,71 @@ def test_list_examples(langchain_client: Client) -> None: ) assert len(example_list) == 0 + example_list = list( + langchain_client.list_examples( + dataset_id=dataset.id, filter='exists(metadata, "baz")' + ) + ) + assert len(example_list) == 1 + + example_list = list( + langchain_client.list_examples( + dataset_id=dataset.id, filter='has("metadata", \'{"foo": "bar"}\')' + ) + ) + assert len(example_list) == 1 + + example_list = list( + langchain_client.list_examples( + dataset_id=dataset.id, filter='exists(metadata, "bazzz")' + ) + ) + assert len(example_list) == 0 + + langchain_client.delete_dataset(dataset_id=dataset.id) + + +@pytest.mark.slow +def test_similar_examples(langchain_client: Client) -> None: + inputs = [{"text": "how are you"}, {"text": "good bye"}, {"text": "see ya later"}] + outputs = [ + {"response": "good how are you"}, + {"response": "ta ta"}, + {"response": "tootles"}, + ] + dataset_name = "__test_similar_examples" + uuid4().hex[:4] + dataset = langchain_client.create_dataset( + dataset_name=dataset_name, + inputs_schema={ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "text": {"type": "string"}, + }, + "required": ["text"], + "additionalProperties": False, + }, + outputs_schema={ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "response": {"type": "string"}, + }, + "required": ["response"], + "additionalProperties": False, + }, + ) + langchain_client.create_examples( + inputs=inputs, outputs=outputs, dataset_id=dataset.id + ) + langchain_client.index_dataset(dataset_id=dataset.id) + # Need to wait for indexing to finish. + time.sleep(5) + similar_list = langchain_client.similar_examples( + {"text": "howdy"}, limit=2, dataset_id=dataset.id + ) + assert len(similar_list) == 2 + langchain_client.delete_dataset(dataset_id=dataset.id) @@ -209,11 +357,7 @@ def test_error_surfaced_invalid_uri(monkeypatch: pytest.MonkeyPatch, uri: str) - client.create_run("My Run", inputs={"text": "hello world"}, run_type="llm") -def test_create_dataset( - monkeypatch: pytest.MonkeyPatch, langchain_client: Client -) -> None: - """Test persisting runs and adding feedback.""" - monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://dev.api.smith.langchain.com") +def test_create_dataset(langchain_client: Client) -> None: dataset_name = "__test_create_dataset" + uuid4().hex[:4] if langchain_client.has_dataset(dataset_name=dataset_name): langchain_client.delete_dataset(dataset_name=dataset_name) @@ -257,6 +401,59 @@ def test_create_dataset( langchain_client.delete_dataset(dataset_id=dataset.id) +def test_dataset_schema_validation(langchain_client: Client) -> None: + dataset_name = "__test_create_dataset" + uuid4().hex[:4] + if langchain_client.has_dataset(dataset_name=dataset_name): + langchain_client.delete_dataset(dataset_name=dataset_name) + + class InputSchema(BaseModel): + input: str + + class OutputSchema(BaseModel): + output: str + + dataset = langchain_client.create_dataset( + dataset_name, + data_type=DataType.kv, + inputs_schema=InputSchema.model_json_schema(), + outputs_schema=OutputSchema.model_json_schema(), + ) + + # confirm we store the schema from the create request + assert dataset.inputs_schema == InputSchema.model_json_schema() + assert dataset.outputs_schema == OutputSchema.model_json_schema() + + # create an example that matches the schema, which should succeed + langchain_client.create_example( + inputs={"input": "hello world"}, + outputs={"output": "hello"}, + dataset_id=dataset.id, + ) + + # create an example that does not match the input schema + with pytest.raises(LangSmithError): + langchain_client.create_example( + inputs={"john": 1}, + outputs={"output": "hello"}, + dataset_id=dataset.id, + ) + + # create an example that does not match the output schema + with pytest.raises(LangSmithError): + langchain_client.create_example( + inputs={"input": "hello world"}, + outputs={"john": 1}, + dataset_id=dataset.id, + ) + + # assert read API includes the schema definition + read_dataset = langchain_client.read_dataset(dataset_id=dataset.id) + assert read_dataset.inputs_schema == InputSchema.model_json_schema() + assert read_dataset.outputs_schema == OutputSchema.model_json_schema() + + langchain_client.delete_dataset(dataset_id=dataset.id) + + @freeze_time("2023-01-01") def test_list_datasets(langchain_client: Client) -> None: ds1n = "__test_list_datasets1" + uuid4().hex[:4] @@ -404,6 +601,7 @@ def test_create_chat_example( def test_batch_ingest_runs(langchain_client: Client) -> None: _session = "__test_batch_ingest_runs" trace_id = uuid4() + trace_id_2 = uuid4() run_id_2 = uuid4() current_time = datetime.datetime.now(datetime.timezone.utc).strftime( "%Y%m%dT%H%M%S%fZ" @@ -422,6 +620,16 @@ def test_batch_ingest_runs(langchain_client: Client) -> None: "inputs": {"input1": 1, "input2": 2}, "outputs": {"output1": 3, "output2": 4}, }, + { + "id": str(trace_id_2), + "session_name": _session, + "name": "run 3", + "run_type": "chain", + "dotted_order": f"{current_time}{str(trace_id_2)}", + "trace_id": str(trace_id_2), + "inputs": {"input1": 1, "input2": 2}, + "error": "error", + }, { "id": str(run_id_2), "session_name": _session, @@ -441,7 +649,7 @@ def test_batch_ingest_runs(langchain_client: Client) -> None: f"{later_time}{str(run_id_2)}", "trace_id": str(trace_id), "parent_run_id": str(trace_id), - "outputs": {"output1": 7, "output2": 8}, + "outputs": {"output1": 4, "output2": 5}, }, ] langchain_client.batch_ingest_runs(create=runs_to_create, update=runs_to_update) @@ -451,10 +659,11 @@ def test_batch_ingest_runs(langchain_client: Client) -> None: try: runs = list( langchain_client.list_runs( - project_name=_session, run_ids=[str(trace_id), str(run_id_2)] + project_name=_session, + run_ids=[str(trace_id), str(run_id_2), str(trace_id_2)], ) ) - if len(runs) == 2: + if len(runs) == 3: break raise LangSmithError("Runs not created yet") except LangSmithError: @@ -462,22 +671,24 @@ def test_batch_ingest_runs(langchain_client: Client) -> None: wait += 1 else: raise ValueError("Runs not created in time") - assert len(runs) == 2 + assert len(runs) == 3 # Write all the assertions here - runs = sorted(runs, key=lambda x: cast(str, x.dotted_order)) - assert len(runs) == 2 + assert len(runs) == 3 # Assert inputs and outputs of run 1 - run1 = runs[0] + run1 = next(run for run in runs if run.id == trace_id) assert run1.inputs == {"input1": 1, "input2": 2} assert run1.outputs == {"output1": 3, "output2": 4} # Assert inputs and outputs of run 2 - run2 = runs[1] + run2 = next(run for run in runs if run.id == run_id_2) assert run2.inputs == {"input1": 5, "input2": 6} - assert run2.outputs == {"output1": 7, "output2": 8} + assert run2.outputs == {"output1": 4, "output2": 5} - langchain_client.delete_project(project_name=_session) + # Assert inputs and outputs of run 3 + run3 = next(run for run in runs if run.id == trace_id_2) + assert run3.inputs == {"input1": 1, "input2": 2} + assert run3.error == "error" @freeze_time("2023-01-01") @@ -581,3 +792,10 @@ def test_surrogates(): run_type="llm", end_time=datetime.datetime.now(datetime.timezone.utc), ) + + +def test_runs_stats(): + langchain_client = Client() + # We always have stuff in the "default" project... + stats = langchain_client.get_run_stats(project_names=["default"], run_type="llm") + assert stats diff --git a/python/tests/integration_tests/test_context_propagation.py b/python/tests/integration_tests/test_context_propagation.py index 32cd1f74d..096f8bb5d 100644 --- a/python/tests/integration_tests/test_context_propagation.py +++ b/python/tests/integration_tests/test_context_propagation.py @@ -54,6 +54,7 @@ async def test_tracing_fake_server(fake_server): langsmith_extra={ "metadata": {"some-cool-value": 42}, "tags": ["did-propagate"], + "project_name": "distributed-tracing", }, ) assert result["message"] == "Fake route response" diff --git a/python/tests/integration_tests/test_llm_evaluator.py b/python/tests/integration_tests/test_llm_evaluator.py new file mode 100644 index 000000000..28b742096 --- /dev/null +++ b/python/tests/integration_tests/test_llm_evaluator.py @@ -0,0 +1,209 @@ +import pytest + +from langsmith import Client, aevaluate, evaluate +from langsmith.evaluation.llm_evaluator import ( + CategoricalScoreConfig, + ContinuousScoreConfig, + LLMEvaluator, +) + + +def test_llm_evaluator_init() -> None: + evaluator = LLMEvaluator( + prompt_template="Is the response vague? Y/N\n{input}", + score_config=CategoricalScoreConfig( + key="vagueness", + choices=["Y", "N"], + description="Whether the response is vague. Y for yes, N for no.", + include_explanation=True, + ), + ) + assert evaluator is not None + assert evaluator.prompt.input_variables == ["input"] + assert evaluator.score_schema == { + "title": "vagueness", + "description": "Whether the response is vague. Y for yes, N for no.", + "type": "object", + "properties": { + "score": { + "type": "string", + "enum": ["Y", "N"], + "description": "The score for the evaluation, one of Y, N.", + }, + "explanation": { + "type": "string", + "description": "The explanation for the score.", + }, + }, + "required": ["score", "explanation"], + } + + # Try a continuous score + evaluator = LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + + assert evaluator is not None + assert evaluator.prompt.input_variables == ["input"] + assert evaluator.score_schema == { + "title": "rating", + "description": "The rating of the response, from 0 to 1.", + "type": "object", + "properties": { + "score": { + "type": "number", + "minimum": 0, + "maximum": 1, + "description": "The score for the evaluation, " + "between 0 and 1, inclusive.", + }, + }, + "required": ["score"], + } + + # Test invalid model + with pytest.raises(ValueError): + LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + model_provider="invalid", + ) + + evaluator = LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input} {output} {expected}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + assert evaluator is not None + assert set(evaluator.prompt.input_variables) == {"input", "output", "expected"} + + with pytest.raises(ValueError): + # Test invalid input variable without map_variables + LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input} {output} {hello}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + + evaluator = LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input} {output} {hello}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + map_variables=lambda run, example: {"hello": "world"}, + ) + assert evaluator is not None + assert set(evaluator.prompt.input_variables) == {"input", "output", "hello"} + + +def test_from_model() -> None: + from langchain_openai import ChatOpenAI + + evaluator = LLMEvaluator.from_model( + ChatOpenAI(), + prompt_template="Rate the response from 0 to 1.\n{input}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + assert evaluator is not None + assert evaluator.prompt.input_variables == ["input"] + assert evaluator.score_schema == { + "title": "rating", + "description": "The rating of the response, from 0 to 1.", + "type": "object", + "properties": { + "score": { + "type": "number", + "minimum": 0, + "maximum": 1, + "description": "The score for the evaluation, " + "between 0 and 1, inclusive.", + }, + }, + "required": ["score"], + } + + +async def test_evaluate() -> None: + client = Client() + client.clone_public_dataset( + "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" + ) + dataset_name = "Evaluate Examples" + + def predict(inputs: dict) -> dict: + return {"answer": "Yes"} + + async def apredict(inputs: dict) -> dict: + return {"answer": "Yes"} + + reference_accuracy = LLMEvaluator( + prompt_template="Is the output accurate with respect to the expected output? " + "Y/N\nOutput: {output}\nExpected: {expected}", + score_config=CategoricalScoreConfig( + key="reference_accuracy", + choices=["Y", "N"], + description="Whether the output is accurate with respect to " + "the expected output.", + include_explanation=False, + ), + ) + + accuracy = LLMEvaluator( + prompt_template=[ + ( + "system", + "Is the output accurate with respect to the context and " + "question? Y/N", + ), + ("human", "Context: {context}\nQuestion: {question}\nOutput: {output}"), + ], + score_config=CategoricalScoreConfig( + key="accuracy", + choices=["Y", "N"], + description="Whether the output is accurate with respect to " + "the context and question.", + include_explanation=True, + ), + map_variables=lambda run, example: { + "context": example.inputs.get("context", "") if example else "", + "question": example.inputs.get("question", "") if example else "", + "output": run.outputs.get("output", "") if run.outputs else "", + }, + model_provider="anthropic", + model_name="claude-3-haiku-20240307", + ) + results = evaluate( + predict, + data=dataset_name, + evaluators=[reference_accuracy, accuracy], + experiment_prefix=__name__ + "::test_evaluate.evaluate", + ) + results.wait() + + await aevaluate( + apredict, + data=dataset_name, + evaluators=[reference_accuracy, accuracy], + experiment_prefix=__name__ + "::test_evaluate.aevaluate", + ) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py new file mode 100644 index 000000000..0bef1ba57 --- /dev/null +++ b/python/tests/integration_tests/test_prompts.py @@ -0,0 +1,561 @@ +from typing import Literal +from uuid import uuid4 + +import pytest +from langchain_core.prompts import ( + BasePromptTemplate, + ChatPromptTemplate, + PromptTemplate, +) +from langchain_core.runnables.base import RunnableSequence + +import langsmith.schemas as ls_schemas +import langsmith.utils as ls_utils +from langsmith.client import ( + Client, + convert_prompt_to_anthropic_format, + convert_prompt_to_openai_format, +) + + +@pytest.fixture +def langsmith_client() -> Client: + return Client(timeout_ms=(50_000, 90_000)) + + +@pytest.fixture +def prompt_template_1() -> ChatPromptTemplate: + return ChatPromptTemplate.from_template("tell me a joke about {topic}") + + +@pytest.fixture +def prompt_template_2() -> ChatPromptTemplate: + return ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant."), + ("human", "{question}"), + ] + ) + + +@pytest.fixture +def prompt_template_3() -> PromptTemplate: + return PromptTemplate.from_template("Summarize the following text: {text}") + + +@pytest.fixture +def prompt_with_model() -> dict: + return { + "id": ["langsmith", "playground", "PromptPlayground"], + "lc": 1, + "type": "constructor", + "kwargs": { + "last": { + "id": ["langchain", "schema", "runnable", "RunnableBinding"], + "lc": 1, + "type": "constructor", + "kwargs": { + "bound": { + "id": ["langchain", "chat_models", "openai", "ChatOpenAI"], + "lc": 1, + "type": "constructor", + "kwargs": { + "openai_api_key": { + "id": ["OPENAI_API_KEY"], + "lc": 1, + "type": "secret", + } + }, + }, + "kwargs": {}, + }, + }, + "first": { + "id": ["langchain", "prompts", "chat", "ChatPromptTemplate"], + "lc": 1, + "type": "constructor", + "kwargs": { + "messages": [ + { + "id": [ + "langchain", + "prompts", + "chat", + "SystemMessagePromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "prompt": { + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "template": "You are a chatbot.", + "input_variables": [], + "template_format": "f-string", + }, + } + }, + }, + { + "id": [ + "langchain", + "prompts", + "chat", + "HumanMessagePromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "prompt": { + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "template": "{question}", + "input_variables": ["question"], + "template_format": "f-string", + }, + } + }, + }, + ], + "input_variables": ["question"], + }, + }, + }, + } + + +@pytest.fixture +def chat_prompt_template(): + return ChatPromptTemplate.from_messages( + [ + ("system", "You are a chatbot"), + ("user", "{question}"), + ] + ) + + +def test_current_tenant_is_owner(langsmith_client: Client): + settings = langsmith_client._get_settings() + assert langsmith_client._current_tenant_is_owner(settings.tenant_handle or "-") + assert langsmith_client._current_tenant_is_owner("-") + assert not langsmith_client._current_tenant_is_owner("non_existent_owner") + + +def test_list_prompts(langsmith_client: Client): + response = langsmith_client.list_prompts(limit=10, offset=0) + assert isinstance(response, ls_schemas.ListPromptsResponse) + assert len(response.repos) <= 10 + + +def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + url = langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + assert isinstance(url, str) + assert langsmith_client._prompt_exists(prompt_name) + + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) + assert prompt.repo_handle == prompt_name + + langsmith_client.delete_prompt(prompt_name) + + +def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): + non_existent_prompt = f"non_existent_{uuid4().hex[:8]}" + assert not langsmith_client._prompt_exists(non_existent_prompt) + + existent_prompt = f"existent_{uuid4().hex[:8]}" + assert langsmith_client.push_prompt(existent_prompt, object=prompt_template_2) + assert langsmith_client._prompt_exists(existent_prompt) + + langsmith_client.delete_prompt(existent_prompt) + + +def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + updated_data = langsmith_client.update_prompt( + prompt_name, + description="Updated description", + is_public=True, + tags=["test", "update"], + ) + assert isinstance(updated_data, dict) + + updated_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(updated_prompt, ls_schemas.Prompt) + assert updated_prompt.description == "Updated description" + assert updated_prompt.is_public + assert set(updated_prompt.tags) == set(["test", "update"]) + + langsmith_client.delete_prompt(prompt_name) + + +def test_delete_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + assert langsmith_client._prompt_exists(prompt_name) + langsmith_client.delete_prompt(prompt_name) + assert not langsmith_client._prompt_exists(prompt_name) + + +def test_pull_prompt_object( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + manifest = langsmith_client.pull_prompt_commit(prompt_name) + assert isinstance(manifest, ls_schemas.PromptCommit) + assert manifest.repo == prompt_name + + langsmith_client.delete_prompt(prompt_name) + + +def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + # test pulling with just prompt name + pulled_prompt = langsmith_client.pull_prompt(prompt_name) + assert isinstance(pulled_prompt, ChatPromptTemplate) + assert ( + pulled_prompt.metadata and pulled_prompt.metadata["lc_hub_repo"] == prompt_name + ) + + # test pulling with private owner (-) and name + pulled_prompt_2 = langsmith_client.pull_prompt(f"-/{prompt_name}") + assert pulled_prompt == pulled_prompt_2 + + # test pulling with tenant handle and name + tenant_handle = langsmith_client._get_settings().tenant_handle + pulled_prompt_3 = langsmith_client.pull_prompt(f"{tenant_handle}/{prompt_name}") + assert pulled_prompt.metadata and pulled_prompt_3.metadata + assert ( + pulled_prompt.metadata["lc_hub_commit_hash"] + == pulled_prompt_3.metadata["lc_hub_commit_hash"] + ) + assert pulled_prompt_3.metadata["lc_hub_owner"] == tenant_handle + + # test pulling with handle, name and commit hash + tenant_handle = langsmith_client._get_settings().tenant_handle + pulled_prompt_4 = langsmith_client.pull_prompt( + f"{tenant_handle}/{prompt_name}:latest" + ) + assert pulled_prompt_3 == pulled_prompt_4 + + # test pulling without handle, with commit hash + assert pulled_prompt_4.metadata + pulled_prompt_5 = langsmith_client.pull_prompt( + f"{prompt_name}:{pulled_prompt_4.metadata['lc_hub_commit_hash']}" + ) + assert pulled_prompt_5.metadata + assert ( + pulled_prompt_4.metadata["lc_hub_commit_hash"] + == pulled_prompt_5.metadata["lc_hub_commit_hash"] + ) + + langsmith_client.delete_prompt(prompt_name) + + +def test_push_and_pull_prompt( + langsmith_client: Client, prompt_template_2: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + + push_result = langsmith_client.push_prompt(prompt_name, object=prompt_template_2) + assert isinstance(push_result, str) + + pulled_prompt = langsmith_client.pull_prompt(prompt_name) + assert isinstance(pulled_prompt, ChatPromptTemplate) + + langsmith_client.delete_prompt(prompt_name) + + # should fail + with pytest.raises(ls_utils.LangSmithUserError): + langsmith_client.push_prompt( + f"random_handle/{prompt_name}", object=prompt_template_2 + ) + + +def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: dict): + prompt_name = f"test_prompt_with_model_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_with_model) + + pulled_prompt = langsmith_client.pull_prompt(prompt_name, include_model=True) + assert isinstance(pulled_prompt, RunnableSequence) + if getattr(pulled_prompt, "first", None): + first = getattr(pulled_prompt, "first") + assert isinstance(first, BasePromptTemplate) + assert first.metadata and first.metadata["lc_hub_repo"] == prompt_name + else: + assert False, "pulled_prompt.first should exist, incorrect prompt format" + + langsmith_client.delete_prompt(prompt_name) + + +def test_like_unlike_prompt( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + langsmith_client.like_prompt(prompt_name) + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) + assert prompt.num_likes == 1 + + langsmith_client.unlike_prompt(prompt_name) + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) + assert prompt.num_likes == 0 + + langsmith_client.delete_prompt(prompt_name) + + +def test_get_latest_commit_hash( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + commit_hash = langsmith_client._get_latest_commit_hash(f"-/{prompt_name}") + assert isinstance(commit_hash, str) + assert len(commit_hash) > 0 + + langsmith_client.delete_prompt(prompt_name) + + +def test_create_prompt(langsmith_client: Client): + prompt_name = f"test_create_prompt_{uuid4().hex[:8]}" + created_prompt = langsmith_client.create_prompt( + prompt_name, + description="Test description", + readme="Test readme", + tags=["test", "create"], + is_public=False, + ) + assert isinstance(created_prompt, ls_schemas.Prompt) + assert created_prompt.repo_handle == prompt_name + assert created_prompt.description == "Test description" + assert created_prompt.readme == "Test readme" + assert set(created_prompt.tags) == set(["test", "create"]) + assert not created_prompt.is_public + + langsmith_client.delete_prompt(prompt_name) + + +def test_create_commit( + langsmith_client: Client, + prompt_template_2: ChatPromptTemplate, + prompt_template_3: PromptTemplate, +): + prompt_name = f"test_create_commit_{uuid4().hex[:8]}" + try: + # this should fail because the prompt does not exist + commit_url = langsmith_client.create_commit( + prompt_name, object=prompt_template_2 + ) + pytest.fail("Expected LangSmithNotFoundError was not raised") + except ls_utils.LangSmithNotFoundError as e: + assert str(e) == "Prompt does not exist, you must create it first." + except Exception as e: + pytest.fail(f"Unexpected exception raised: {e}") + + langsmith_client.push_prompt(prompt_name, object=prompt_template_3) + commit_url = langsmith_client.create_commit(prompt_name, object=prompt_template_2) + assert isinstance(commit_url, str) + assert prompt_name in commit_url + + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) + assert prompt.num_commits == 2 + + # try submitting different types of unaccepted manifests + try: + # this should fail + commit_url = langsmith_client.create_commit(prompt_name, object={"hi": "hello"}) + except ls_utils.LangSmithError as e: + err = str(e) + assert "Manifest must have an id field" in err + assert "400 Client Error" in err + except Exception as e: + pytest.fail(f"Unexpected exception raised: {e}") + + try: + # this should fail + commit_url = langsmith_client.create_commit(prompt_name, object={"id": ["hi"]}) + except ls_utils.LangSmithError as e: + err = str(e) + assert "Manifest type hi is not supported" in err + assert "400 Client Error" in err + except Exception as e: + pytest.fail(f"Unexpected exception raised: {e}") + + langsmith_client.delete_prompt(prompt_name) + + +def test_push_prompt( + langsmith_client: Client, + prompt_template_3: PromptTemplate, + prompt_template_2: ChatPromptTemplate, +): + prompt_name = f"test_push_new_{uuid4().hex[:8]}" + url = langsmith_client.push_prompt( + prompt_name, + object=prompt_template_3, + is_public=True, + description="New prompt", + tags=["new", "test"], + ) + + assert isinstance(url, str) + assert prompt_name in url + + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) + assert prompt.is_public + assert prompt.description == "New prompt" + assert "new" in prompt.tags + assert "test" in prompt.tags + assert prompt.num_commits == 1 + + # test updating prompt metadata but not manifest + url = langsmith_client.push_prompt( + prompt_name, + is_public=False, + description="Updated prompt", + ) + + updated_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(updated_prompt, ls_schemas.Prompt) + assert updated_prompt.description == "Updated prompt" + assert not updated_prompt.is_public + assert updated_prompt.num_commits == 1 + + # test updating prompt manifest but not metadata + url = langsmith_client.push_prompt( + prompt_name, + object=prompt_template_2, + ) + assert isinstance(url, str) + + langsmith_client.delete_prompt(prompt_name) + + +@pytest.mark.parametrize("is_public,expected_count", [(True, 1), (False, 1)]) +def test_list_prompts_filter( + langsmith_client: Client, + prompt_template_1: ChatPromptTemplate, + is_public: bool, + expected_count: int, +): + prompt_name = f"test_list_filter_{uuid4().hex[:8]}" + langsmith_client.push_prompt( + prompt_name, object=prompt_template_1, is_public=is_public + ) + + response = langsmith_client.list_prompts(is_public=is_public, query=prompt_name) + + assert response.total == expected_count + if expected_count > 0: + assert response.repos[0].repo_handle == prompt_name + + langsmith_client.delete_prompt(prompt_name) + + +def test_update_prompt_archive( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_archive_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + langsmith_client.update_prompt(prompt_name, is_archived=True) + archived_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(archived_prompt, ls_schemas.Prompt) + assert archived_prompt.is_archived + + langsmith_client.update_prompt(prompt_name, is_archived=False) + unarchived_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(unarchived_prompt, ls_schemas.Prompt) + assert not unarchived_prompt.is_archived + + langsmith_client.delete_prompt(prompt_name) + + +@pytest.mark.parametrize( + "sort_field, sort_direction", + [ + (ls_schemas.PromptSortField.updated_at, "desc"), + ], +) +def test_list_prompts_sorting( + langsmith_client: Client, + prompt_template_1: ChatPromptTemplate, + sort_field: ls_schemas.PromptSortField, + sort_direction: Literal["asc", "desc"], +): + prompt_names = [f"test_sort_{i}_{uuid4().hex[:8]}" for i in range(3)] + for name in prompt_names: + langsmith_client.push_prompt(name, object=prompt_template_1) + + response = langsmith_client.list_prompts( + sort_field=sort_field, sort_direction=sort_direction, limit=10 + ) + + assert len(response.repos) >= 3 + sorted_names = [ + repo.repo_handle for repo in response.repos if repo.repo_handle in prompt_names + ] + assert sorted_names == sorted(sorted_names, reverse=(sort_direction == "desc")) + + for name in prompt_names: + langsmith_client.delete_prompt(name) + + +def test_convert_to_openai_format(chat_prompt_template: ChatPromptTemplate): + invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) + + res = convert_prompt_to_openai_format( + invoked, + ) + expected = { + "messages": [ + {"content": "You are a chatbot", "role": "system"}, + {"content": "What is the meaning of life?", "role": "user"}, + ], + "model": "gpt-3.5-turbo", + "stream": False, + "n": 1, + "temperature": 0.7, + } + assert {k: res[k] for k in expected.keys()} == expected + + +def test_convert_to_anthropic_format(chat_prompt_template: ChatPromptTemplate): + invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) + + res = convert_prompt_to_anthropic_format(invoked, {"model_name": "claude-2"}) + + assert res == { + "model": "claude-2", + "max_tokens": 1024, + "messages": [{"role": "user", "content": "What is the meaning of life?"}], + "system": "You are a chatbot", + } diff --git a/python/tests/integration_tests/test_runs.py b/python/tests/integration_tests/test_runs.py index 165a0cf6f..c9b62661e 100644 --- a/python/tests/integration_tests/test_runs.py +++ b/python/tests/integration_tests/test_runs.py @@ -1,8 +1,9 @@ import asyncio import time +import uuid from collections import defaultdict from concurrent.futures import ThreadPoolExecutor -from typing import AsyncGenerator, Generator, Optional +from typing import AsyncGenerator, Generator, Optional, Sequence import pytest # type: ignore @@ -24,11 +25,14 @@ def poll_runs_until_count( max_retries: int = 10, sleep_time: int = 2, require_success: bool = True, + filter_: Optional[str] = None, ): retries = 0 while retries < max_retries: try: - runs = list(langchain_client.list_runs(project_name=project_name)) + runs = list( + langchain_client.list_runs(project_name=project_name, filter=filter_) + ) if len(runs) == count: if not require_success or all( [run.status == "success" for run in runs] @@ -45,8 +49,7 @@ def test_nested_runs( langchain_client: Client, ): project_name = "__My Tracer Project - test_nested_runs" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex @traceable(run_type="chain") def my_run(text: str): @@ -61,10 +64,20 @@ def my_llm_run(text: str): def my_chain_run(text: str): return my_run(text) - my_chain_run("foo", langsmith_extra=dict(project_name=project_name)) + my_chain_run( + "foo", + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) for _ in range(15): try: - runs = list(langchain_client.list_runs(project_name=project_name)) + runs = list( + langchain_client.list_runs( + project_name=project_name, + filter=f"and(eq(metadata_key,'test_run'),eq(metadata_value,'{run_meta}'))", + ) + ) assert len(runs) == 3 break except (ls_utils.LangSmithError, AssertionError): @@ -81,10 +94,6 @@ def my_chain_run(text: str): assert runs_dict["my_llm_run"].parent_run_id == runs_dict["my_run"].id assert runs_dict["my_llm_run"].run_type == "llm" assert runs_dict["my_llm_run"].inputs == {"text": "foo"} - try: - langchain_client.delete_project(project_name=project_name) - except Exception: - pass async def test_list_runs_multi_project(langchain_client: Client): @@ -92,35 +101,36 @@ async def test_list_runs_multi_project(langchain_client: Client): "__My Tracer Project - test_list_runs_multi_project", "__My Tracer Project - test_list_runs_multi_project2", ] - try: - for project_name in project_names: - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) - - @traceable(run_type="chain") - async def my_run(text: str): - return "Completed: " + text - - for project_name in project_names: - await my_run("foo", langsmith_extra=dict(project_name=project_name)) - poll_runs_until_count(langchain_client, project_names[0], 1) - poll_runs_until_count(langchain_client, project_names[1], 1) - runs = list(langchain_client.list_runs(project_name=project_names)) - assert len(runs) == 2 - assert all([run.outputs["output"] == "Completed: foo" for run in runs]) # type: ignore - assert runs[0].session_id != runs[1].session_id - - finally: - for project_name in project_names: - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + + @traceable(run_type="chain") + async def my_run(text: str): + return "Completed: " + text + + run_meta = uuid.uuid4().hex + for project_name in project_names: + await my_run( + "foo", + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' + + poll_runs_until_count(langchain_client, project_names[0], 1, filter_=filter_) + runs = list( + langchain_client.list_runs( + project_name=project_names, + filter=filter_, + ) + ) + assert len(runs) == 2 + assert all([run.outputs["output"] == "Completed: foo" for run in runs]) # type: ignore + assert runs[0].session_id != runs[1].session_id async def test_nested_async_runs(langchain_client: Client): """Test nested runs with a mix of async and sync functions.""" project_name = "__My Tracer Project - test_nested_async_runs" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) executor = ThreadPoolExecutor(max_workers=1) @traceable(run_type="chain") @@ -143,10 +153,15 @@ def my_sync_tool(text: str, *, my_arg: int = 10): async def my_chain_run(text: str): return await my_run(text) - await my_chain_run("foo", langsmith_extra=dict(project_name=project_name)) + meta = uuid.uuid4().hex + await my_chain_run( + "foo", + langsmith_extra=dict(project_name=project_name, metadata={"test_run": meta}), + ) executor.shutdown(wait=True) - poll_runs_until_count(langchain_client, project_name, 4) - runs = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))' + poll_runs_until_count(langchain_client, project_name, 4, filter_=_filter) + runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) assert len(runs) == 4 runs_dict = {run.name: run for run in runs} assert runs_dict["my_chain_run"].parent_run_id is None @@ -162,14 +177,11 @@ async def my_chain_run(text: str): "text": "foo", "my_arg": 20, } - langchain_client.delete_project(project_name=project_name) async def test_nested_async_runs_with_threadpool(langchain_client: Client): """Test nested runs with a mix of async and sync functions.""" project_name = "__My Tracer Project - test_nested_async_runs_with_threadpol" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) @traceable(run_type="llm") async def async_llm(text: str): @@ -191,7 +203,12 @@ def my_run(text: str, *, run_tree: Optional[RunTree] = None): thread_pool = ThreadPoolExecutor(max_workers=1) for i in range(3): thread_pool.submit( - my_tool_run, f"Child Tool {i}", langsmith_extra={"run_tree": run_tree} + my_tool_run, + f"Child Tool {i}", + langsmith_extra={ + "run_tree": run_tree, + "metadata": getattr(run_tree, "metadata", {}), + }, ) thread_pool.shutdown(wait=True) return llm_run_result @@ -203,16 +220,27 @@ async def my_chain_run(text: str, run_tree: RunTree): thread_pool = ThreadPoolExecutor(max_workers=3) for i in range(2): thread_pool.submit( - my_run, f"Child {i}", langsmith_extra=dict(run_tree=run_tree) + my_run, + f"Child {i}", + langsmith_extra=dict(run_tree=run_tree, metadata=run_tree.metadata), ) thread_pool.shutdown(wait=True) return text - await my_chain_run("foo", langsmith_extra=dict(project_name=project_name)) + meta = uuid.uuid4().hex + await my_chain_run( + "foo", + langsmith_extra=dict(project_name=project_name, metadata={"test_run": meta}), + ) executor.shutdown(wait=True) - poll_runs_until_count(langchain_client, project_name, 17) - runs = list(langchain_client.list_runs(project_name=project_name)) - trace_runs = list(langchain_client.list_runs(trace_id=runs[0].trace_id)) + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))' + poll_runs_until_count(langchain_client, project_name, 17, filter_=filter_) + runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_)) + trace_runs = list( + langchain_client.list_runs( + trace_id=runs[0].trace_id, project_name=project_name, filter=filter_ + ) + ) assert len(trace_runs) == 17 assert len(runs) == 17 assert sum([run.run_type == "llm" for run in runs]) == 8 @@ -244,14 +272,15 @@ async def my_chain_run(text: str, run_tree: RunTree): async def test_context_manager(langchain_client: Client) -> None: project_name = "__My Tracer Project - test_context_manager" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) @traceable(run_type="llm") async def my_llm(prompt: str) -> str: return f"LLM {prompt}" - with trace("my_context", "chain", project_name=project_name) as run_tree: + meta = uuid.uuid4().hex + with trace( + "my_context", "chain", project_name=project_name, metadata={"test_run": meta} + ) as run_tree: await my_llm("foo") with trace("my_context2", "chain", run_tree=run_tree) as run_tree2: runs = [my_llm("baz"), my_llm("qux")] @@ -260,25 +289,35 @@ async def my_llm(prompt: str) -> str: await my_llm("corge") await asyncio.gather(*runs) run_tree.end(outputs={"End val": "my_context2"}) - poll_runs_until_count(langchain_client, project_name, 8) - runs_ = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))' + poll_runs_until_count(langchain_client, project_name, 8, filter_=_filter) + runs_ = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) assert len(runs_) == 8 -async def test_sync_generator(langchain_client: Client): +def test_sync_generator(langchain_client: Client): project_name = "__My Tracer Project - test_sync_generator" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex @traceable(run_type="chain") def my_generator(num: int) -> Generator[str, None, None]: for i in range(num): yield f"Yielded {i}" - results = list(my_generator(5, langsmith_extra=dict(project_name=project_name))) + results = list( + my_generator( + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) + ) assert results == ["Yielded 0", "Yielded 1", "Yielded 2", "Yielded 3", "Yielded 4"] - poll_runs_until_count(langchain_client, project_name, 1, max_retries=20) - runs = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' + poll_runs_until_count( + langchain_client, project_name, 1, max_retries=20, filter_=_filter + ) + runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_generator" @@ -287,12 +326,11 @@ def my_generator(num: int) -> Generator[str, None, None]: } -async def test_sync_generator_reduce_fn(langchain_client: Client): +def test_sync_generator_reduce_fn(langchain_client: Client): project_name = "__My Tracer Project - test_sync_generator_reduce_fn" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex - def reduce_fn(outputs: list) -> dict: + def reduce_fn(outputs: Sequence) -> dict: return {"my_output": " ".join(outputs)} @traceable(run_type="chain", reduce_fn=reduce_fn) @@ -300,10 +338,20 @@ def my_generator(num: int) -> Generator[str, None, None]: for i in range(num): yield f"Yielded {i}" - results = list(my_generator(5, langsmith_extra=dict(project_name=project_name))) + results = list( + my_generator( + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) + ) + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' assert results == ["Yielded 0", "Yielded 1", "Yielded 2", "Yielded 3", "Yielded 4"] - poll_runs_until_count(langchain_client, project_name, 1, max_retries=20) - runs = list(langchain_client.list_runs(project_name=project_name)) + poll_runs_until_count( + langchain_client, project_name, 1, max_retries=20, filter_=filter_ + ) + runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_generator" @@ -316,8 +364,7 @@ def my_generator(num: int) -> Generator[str, None, None]: async def test_async_generator(langchain_client: Client): project_name = "__My Tracer Project - test_async_generator" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex @traceable(run_type="chain") async def my_async_generator(num: int) -> AsyncGenerator[str, None]: @@ -328,7 +375,10 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: results = [ item async for item in my_async_generator( - 5, langsmith_extra=dict(project_name=project_name) + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), ) ] assert results == [ @@ -338,8 +388,11 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: "Async yielded 3", "Async yielded 4", ] - poll_runs_until_count(langchain_client, project_name, 1, max_retries=20) - runs = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' + poll_runs_until_count( + langchain_client, project_name, 1, max_retries=20, filter_=_filter + ) + runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_async_generator" @@ -356,10 +409,9 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: async def test_async_generator_reduce_fn(langchain_client: Client): project_name = "__My Tracer Project - test_async_generator_reduce_fn" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex - def reduce_fn(outputs: list) -> dict: + def reduce_fn(outputs: Sequence) -> dict: return {"my_output": " ".join(outputs)} @traceable(run_type="chain", reduce_fn=reduce_fn) @@ -371,7 +423,10 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: results = [ item async for item in my_async_generator( - 5, langsmith_extra=dict(project_name=project_name) + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), ) ] assert results == [ @@ -381,11 +436,11 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: "Async yielded 3", "Async yielded 4", ] - + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' poll_runs_until_count( - langchain_client, project_name, 1, max_retries=20, sleep_time=5 + langchain_client, project_name, 1, max_retries=20, sleep_time=5, filter_=filter_ ) - runs = list(langchain_client.list_runs(project_name=project_name)) + runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_async_generator" diff --git a/python/tests/integration_tests/wrappers/test_openai.py b/python/tests/integration_tests/wrappers/test_openai.py index 40804b96d..d12e77da6 100644 --- a/python/tests/integration_tests/wrappers/test_openai.py +++ b/python/tests/integration_tests/wrappers/test_openai.py @@ -4,6 +4,7 @@ import pytest +import langsmith from langsmith.wrappers import wrap_openai @@ -12,8 +13,9 @@ def test_chat_sync_api(mock_session: mock.MagicMock, stream: bool): import openai # noqa + client = langsmith.Client(session=mock_session()) original_client = openai.Client() - patched_client = wrap_openai(openai.Client()) + patched_client = wrap_openai(openai.Client(), tracing_extra={"client": client}) messages = [{"role": "user", "content": "Say 'foo'"}] original = original_client.chat.completions.create( messages=messages, # noqa: [arg-type] @@ -41,7 +43,7 @@ def test_chat_sync_api(mock_session: mock.MagicMock, stream: bool): assert original.choices == patched.choices # Give the thread a chance. time.sleep(0.01) - for call in mock_session.return_value.request.call_args_list: + for call in mock_session.return_value.request.call_args_list[1:]: assert call[0][0].upper() == "POST" @@ -50,8 +52,9 @@ def test_chat_sync_api(mock_session: mock.MagicMock, stream: bool): async def test_chat_async_api(mock_session: mock.MagicMock, stream: bool): import openai # noqa + client = langsmith.Client(session=mock_session()) original_client = openai.AsyncClient() - patched_client = wrap_openai(openai.AsyncClient()) + patched_client = wrap_openai(openai.AsyncClient(), tracing_extra={"client": client}) messages = [{"role": "user", "content": "Say 'foo'"}] original = await original_client.chat.completions.create( messages=messages, stream=stream, temperature=0, seed=42, model="gpt-3.5-turbo" @@ -75,7 +78,7 @@ async def test_chat_async_api(mock_session: mock.MagicMock, stream: bool): assert original.choices == patched.choices # Give the thread a chance. time.sleep(0.1) - for call in mock_session.return_value.request.call_args_list: + for call in mock_session.return_value.request.call_args_list[1:]: assert call[0][0].upper() == "POST" @@ -84,8 +87,9 @@ async def test_chat_async_api(mock_session: mock.MagicMock, stream: bool): def test_completions_sync_api(mock_session: mock.MagicMock, stream: bool): import openai + client = langsmith.Client(session=mock_session()) original_client = openai.Client() - patched_client = wrap_openai(openai.Client()) + patched_client = wrap_openai(openai.Client(), tracing_extra={"client": client}) prompt = ("Say 'Foo' then stop.",) original = original_client.completions.create( model="gpt-3.5-turbo-instruct", @@ -115,7 +119,7 @@ def test_completions_sync_api(mock_session: mock.MagicMock, stream: bool): assert original.choices == patched.choices # Give the thread a chance. time.sleep(0.1) - for call in mock_session.return_value.request.call_args_list: + for call in mock_session.return_value.request.call_args_list[1:]: assert call[0][0].upper() == "POST" @@ -124,8 +128,15 @@ def test_completions_sync_api(mock_session: mock.MagicMock, stream: bool): async def test_completions_async_api(mock_session: mock.MagicMock, stream: bool): import openai + client = langsmith.Client(session=mock_session()) + original_client = openai.AsyncClient() - patched_client = wrap_openai(openai.AsyncClient()) + patched_client = wrap_openai( + openai.AsyncClient(), + tracing_extra={"client": client}, + chat_name="chattychat", + completions_name="incompletions", + ) prompt = ("Say 'Hi i'm ChatGPT' then stop.",) original = await original_client.completions.create( model="gpt-3.5-turbo-instruct", @@ -158,7 +169,10 @@ async def test_completions_async_api(mock_session: mock.MagicMock, stream: bool) assert type(original) == type(patched) assert original.choices == patched.choices # Give the thread a chance. - time.sleep(0.1) + for _ in range(10): + time.sleep(0.1) + if mock_session.return_value.request.call_count >= 1: + break assert mock_session.return_value.request.call_count >= 1 - for call in mock_session.return_value.request.call_args_list: + for call in mock_session.return_value.request.call_args_list[1:]: assert call[0][0].upper() == "POST" diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py new file mode 100644 index 000000000..147f46d1c --- /dev/null +++ b/python/tests/unit_tests/test_anonymizer.py @@ -0,0 +1,141 @@ +# mypy: disable-error-code="annotation-unchecked" +import json +import re +import uuid +from typing import List, Union, cast +from unittest.mock import MagicMock +from uuid import uuid4 + +from pydantic import BaseModel + +from langsmith import Client, traceable, tracing_context +from langsmith.anonymizer import StringNodeRule, create_anonymizer + +EMAIL_REGEX = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}") +UUID_REGEX = re.compile( + r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" +) + + +def test_replacer_function(): + def replacer(text: str, _: List[Union[str, int]]): + text = EMAIL_REGEX.sub("[email address]", text) + text = UUID_REGEX.sub("[uuid]", text) + return text + + assert create_anonymizer(replacer)( + { + "message": "Hello, this is my email: hello@example.com", + "metadata": str(uuid4()), + } + ) == { + "message": "Hello, this is my email: [email address]", + "metadata": "[uuid]", + } + + assert create_anonymizer(replacer)(["human", "hello@example.com"]) == [ + "human", + "[email address]", + ] + assert create_anonymizer(replacer)("hello@example.com") == "[email address]" + + +def test_replacer_lambda(): + assert create_anonymizer(lambda text: EMAIL_REGEX.sub("[email address]", text))( + { + "message": "Hello, this is my email: hello@example.com", + } + ) == { + "message": "Hello, this is my email: [email address]", + } + + +def test_replacer_declared(): + replacers = [ + StringNodeRule(pattern=EMAIL_REGEX, replace="[email address]"), + StringNodeRule(pattern=UUID_REGEX, replace="[uuid]"), + ] + + assert create_anonymizer(replacers)( + { + "message": "Hello, this is my email: hello@example.com", + "metadata": str(uuid4()), + } + ) == { + "message": "Hello, this is my email: [email address]", + "metadata": "[uuid]", + } + + assert create_anonymizer(replacers)(["human", "hello@example.com"]) == [ + "human", + "[email address]", + ] + + assert create_anonymizer(replacers)("hello@example.com") == "[email address]" + + +def test_replacer_declared_in_traceable(): + replacers = [ + StringNodeRule(pattern=EMAIL_REGEX, replace="[email address]"), + StringNodeRule(pattern=UUID_REGEX, replace="[uuid]"), + ] + anonymizer = create_anonymizer(replacers) + mock_client = Client( + session=MagicMock(), + auto_batch_tracing=False, + anonymizer=anonymizer, + api_url="http://localhost:1984", + api_key="123", + ) + + user_email = "my-test@langchain.ai" + user_id = "4ae21a90-d43b-4017-bb21-4fd9add235ff" + + class MyOutput(BaseModel): + user_email: str + user_id: uuid.UUID + body: str + + class MyInput(BaseModel): + from_email: str + + @traceable(client=mock_client) + def my_func(body: str, from_: MyInput) -> MyOutput: + return MyOutput(user_email=user_email, user_id=user_id, body=body) + + body_ = "Hello from Pluto" + with tracing_context(enabled=True): + res = my_func(body_, from_=MyInput(from_email="my-from-test@langchain.ai")) + expected = MyOutput(user_email=user_email, user_id=uuid.UUID(user_id), body=body_) + assert res == expected + # get posts + posts = [ + json.loads(call[2]["data"]) + for call in mock_client.session.request.mock_calls + if call.args and call.args[1].endswith("runs") + ] + + patches = [ + json.loads(call[2]["data"]) + for call in mock_client.session.request.mock_calls + if call.args + and cast(str, call.args[0]).lower() == "patch" + and "/runs" in call.args[1] + ] + + expected_inputs = {"from_": {"from_email": "[email address]"}, "body": body_} + expected_outputs = { + "output": { + "user_email": "[email address]", + "user_id": "[uuid]", + "body": body_, + } + } + assert len(posts) == 1 + posted_data = posts[0] + assert posted_data["inputs"] == expected_inputs + assert len(patches) == 1 + patched_data = patches[0] + if "inputs" in patched_data: + assert patched_data["inputs"] == expected_inputs + assert patched_data["outputs"] == expected_outputs diff --git a/python/tests/unit_tests/test_client.py b/python/tests/unit_tests/test_client.py index 62dd302fd..c60afa702 100644 --- a/python/tests/unit_tests/test_client.py +++ b/python/tests/unit_tests/test_client.py @@ -11,7 +11,7 @@ import time import uuid import weakref -from datetime import datetime +from datetime import datetime, timezone from enum import Enum from io import BytesIO from typing import Any, NamedTuple, Optional @@ -28,6 +28,8 @@ import langsmith.env as ls_env import langsmith.utils as ls_utils +from langsmith import EvaluationResult, run_trees +from langsmith import schemas as ls_schemas from langsmith.client import ( Client, _dumps_json, @@ -37,7 +39,6 @@ _is_localhost, _serialize_json, ) -from langsmith.schemas import Example _CREATED_AT = datetime(2015, 1, 1, 0, 0, 0) @@ -173,14 +174,14 @@ def test_headers(monkeypatch: pytest.MonkeyPatch) -> None: @mock.patch("langsmith.client.requests.Session") def test_upload_csv(mock_session_cls: mock.Mock) -> None: dataset_id = str(uuid.uuid4()) - example_1 = Example( + example_1 = ls_schemas.Example( id=str(uuid.uuid4()), created_at=_CREATED_AT, inputs={"input": "1"}, outputs={"output": "2"}, dataset_id=dataset_id, ) - example_2 = Example( + example_2 = ls_schemas.Example( id=str(uuid.uuid4()), created_at=_CREATED_AT, inputs={"input": "3"}, @@ -197,7 +198,13 @@ def test_upload_csv(mock_session_cls: mock.Mock) -> None: "examples": [example_1, example_2], } mock_session = mock.Mock() - mock_session.post.return_value = mock_response + + def mock_request(*args, **kwargs): # type: ignore + if args[0] == "POST" and args[1].endswith("datasets"): + return mock_response + return MagicMock() + + mock_session.request.return_value = mock_response mock_session_cls.return_value = mock_session client = Client( @@ -303,6 +310,74 @@ def test_create_run_unicode() -> None: client.update_run(id_, status="completed") +def test_create_run_mutate() -> None: + inputs = {"messages": ["hi"], "mygen": (i for i in range(10))} + session = mock.Mock() + session.request = mock.Mock() + client = Client( + api_url="http://localhost:1984", + api_key="123", + session=session, + info=ls_schemas.LangSmithInfo( + batch_ingest_config=ls_schemas.BatchIngestConfig( + size_limit_bytes=None, # Note this field is not used here + size_limit=100, + scale_up_nthreads_limit=16, + scale_up_qsize_trigger=1000, + scale_down_nempty_trigger=4, + ) + ), + ) + id_ = uuid.uuid4() + run_dict = dict( + id=id_, + name="my_run", + inputs=inputs, + run_type="llm", + trace_id=id_, + dotted_order=run_trees._create_current_dotted_order( + datetime.now(timezone.utc), id_ + ), + ) + client.create_run(**run_dict) # type: ignore + inputs["messages"].append("there") # type: ignore + outputs = {"messages": ["hi", "there"]} + client.update_run( + id_, + outputs=outputs, + end_time=datetime.now(timezone.utc), + trace_id=id_, + dotted_order=run_dict["dotted_order"], + ) + for _ in range(10): + time.sleep(0.1) # Give the background thread time to stop + payloads = [ + json.loads(call[2]["data"]) + for call in session.request.mock_calls + if call.args and call.args[1].endswith("runs/batch") + ] + if payloads: + break + posts = [pr for payload in payloads for pr in payload.get("post", [])] + patches = [pr for payload in payloads for pr in payload.get("patch", [])] + inputs = next( + (pr["inputs"] for pr in itertools.chain(posts, patches) if pr.get("inputs")), + {}, + ) + outputs = next( + (pr["outputs"] for pr in itertools.chain(posts, patches) if pr.get("outputs")), + {}, + ) + # Check that the mutated value wasn't posted + assert "messages" in inputs + assert inputs["messages"] == ["hi"] + assert "mygen" in inputs + assert inputs["mygen"].startswith( # type: ignore + "." + ) + assert outputs == {"messages": ["hi", "there"]} + + class CallTracker: def __init__(self) -> None: self.counter = 0 @@ -356,27 +431,43 @@ def mock_get(*args, **kwargs): assert client.tracing_queue client.tracing_queue.join() - request_calls = [call for call in session.request.mock_calls if call.args] + request_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] == "POST" + ] assert len(request_calls) >= 1 for call in request_calls: assert call.args[0] == "POST" assert call.args[1] == "http://localhost:1984/runs/batch" - get_calls = [call for call in session.get.mock_calls if call.args] + get_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] == "GET" + ] # assert len(get_calls) == 1 for call in get_calls: - assert call.args[0] == f"{api_url}/info" + assert call.args[1] == f"{api_url}/info" else: - request_calls = [call for call in session.request.mock_calls if call.args] + request_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] == "POST" + ] assert len(request_calls) == 10 for call in request_calls: assert call.args[0] == "POST" assert call.args[1] == "http://localhost:1984/runs" if auto_batch_tracing: - get_calls = [call for call in session.get.mock_calls if call.args] + get_calls = [ + call + for call in session.get.mock_calls + if call.args and call.args[0] == "GET" + ] for call in get_calls: - assert call.args[0] == f"{api_url}/info" + assert call.args[1] == f"{api_url}/info" del client time.sleep(3) # Give the background thread time to stop gc.collect() # Force garbage collection @@ -399,7 +490,11 @@ def test_client_gc_no_batched_runs(auto_batch_tracing: bool) -> None: # because no trace_id/dotted_order provided, auto batch is disabled for _ in range(10): client.create_run("my_run", inputs={}, run_type="llm", id=uuid.uuid4()) - request_calls = [call for call in session.request.mock_calls if call.args] + request_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] == "POST" + ] assert len(request_calls) == 10 for call in request_calls: assert call.args[1] == "http://localhost:1984/runs" @@ -441,7 +536,11 @@ def filter_outputs(outputs: dict): ) expected.append(output_val + "goodbye") - request_calls = [call for call in session.request.mock_calls if call.args] + request_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] in {"POST", "PATCH"} + ] all_posted = "\n".join( [call.kwargs["data"].decode("utf-8") for call in request_calls] ) @@ -480,7 +579,11 @@ def test_client_gc_after_autoscale() -> None: gc.collect() # Force garbage collection assert tracker.counter == 1, "Client was not garbage collected" - request_calls = [call for call in session.request.mock_calls if call.args] + request_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] == "POST" + ] assert len(request_calls) >= 500 and len(request_calls) <= 550 for call in request_calls: assert call.args[0] == "POST" @@ -795,6 +898,9 @@ def test_host_url(_: MagicMock) -> None: client = Client(api_url="http://localhost:8000", api_key="API_KEY") assert client._host_url == "http://localhost" + client = Client(api_url="https://eu.api.smith.langchain.com", api_key="API_KEY") + assert client._host_url == "https://eu.smith.langchain.com" + client = Client(api_url="https://dev.api.smith.langchain.com", api_key="API_KEY") assert client._host_url == "https://dev.smith.langchain.com" @@ -805,7 +911,7 @@ def test_host_url(_: MagicMock) -> None: @patch("langsmith.client.time.sleep") def test_retry_on_connection_error(mock_sleep: MagicMock): mock_session = MagicMock() - client = Client(api_key="test", session=mock_session) + client = Client(api_key="test", session=mock_session, auto_batch_tracing=False) mock_session.request.side_effect = requests.ConnectionError() with pytest.raises(ls_utils.LangSmithConnectionError): @@ -816,7 +922,7 @@ def test_retry_on_connection_error(mock_sleep: MagicMock): @patch("langsmith.client.time.sleep") def test_http_status_500_handling(mock_sleep): mock_session = MagicMock() - client = Client(api_key="test", session=mock_session) + client = Client(api_key="test", session=mock_session, auto_batch_tracing=False) mock_response = MagicMock() mock_response.status_code = 500 mock_response.raise_for_status.side_effect = HTTPError() @@ -830,12 +936,11 @@ def test_http_status_500_handling(mock_sleep): @patch("langsmith.client.time.sleep") def test_pass_on_409_handling(mock_sleep): mock_session = MagicMock() - client = Client(api_key="test", session=mock_session) + client = Client(api_key="test", session=mock_session, auto_batch_tracing=False) mock_response = MagicMock() mock_response.status_code = 409 mock_response.raise_for_status.side_effect = HTTPError() mock_session.request.return_value = mock_response - response = client.request_with_retries( "GET", "https://test.url", @@ -959,7 +1064,9 @@ def test_batch_ingest_run_splits_large_batches(payload_size: int): request_bodies = [ op for call in mock_session.request.call_args_list - for reqs in orjson.loads(call[1]["data"]).values() + for reqs in ( + orjson.loads(call[1]["data"]).values() if call[0][0] == "POST" else [] + ) for op in reqs ] all_run_ids = run_ids + patch_ids @@ -970,3 +1077,42 @@ def test_batch_ingest_run_splits_large_batches(payload_size: int): # Check that no duplicate run_ids are present in the request bodies assert len(request_bodies) == len(set([body["id"] for body in request_bodies])) + + +def test_select_eval_results(): + expected = EvaluationResult( + key="foo", + value="bar", + score=7899082, + metadata={"a": "b"}, + comment="hi", + feedback_config={"c": "d"}, + ) + client = Client(api_key="test") + for count, input_ in [ + (1, expected), + (1, expected.dict()), + (1, {"results": [expected]}), + (1, {"results": [expected.dict()]}), + (2, {"results": [expected.dict(), expected.dict()]}), + (2, {"results": [expected, expected]}), + ]: + op = client._select_eval_results(input_) + assert len(op) == count + assert op == [expected] * count + + expected2 = EvaluationResult( + key="foo", + metadata={"a": "b"}, + comment="this is a comment", + feedback_config={"c": "d"}, + ) + + as_reasoning = { + "reasoning": expected2.comment, + **expected2.dict(exclude={"comment"}), + } + for input_ in [as_reasoning, {"results": [as_reasoning]}, {"results": [expected2]}]: + assert client._select_eval_results(input_) == [ + expected2, + ] diff --git a/python/tests/unit_tests/test_expect.py b/python/tests/unit_tests/test_expect.py new file mode 100644 index 000000000..cdf7ea9b2 --- /dev/null +++ b/python/tests/unit_tests/test_expect.py @@ -0,0 +1,20 @@ +from unittest import mock + +from langsmith import expect +from langsmith._expect import ls_client + + +def _is_none(x: object) -> bool: + return x is None + + +@mock.patch.object(ls_client, "Client", autospec=True) +def test_expect_explicit_none(mock_client: mock.Mock) -> None: + expect(None).against(_is_none) + expect(None).to_be_none() + expect.score(1).to_equal(1) + expect.score(1).to_be_less_than(2) + expect.score(1).to_be_greater_than(0) + expect.score(1).to_be_between(0, 2) + expect.score(1).to_be_approximately(1, 2) + expect({1, 2}).to_contain(1) diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index 61a1d2004..d3451e88f 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -2,27 +2,52 @@ import functools import inspect import json +import os import sys import time import uuid import warnings -from typing import Any, AsyncGenerator, Generator, Optional, cast +from typing import Any, AsyncGenerator, Generator, Optional, Set, cast from unittest.mock import MagicMock, patch import pytest import langsmith from langsmith import Client +from langsmith import schemas as ls_schemas from langsmith.run_helpers import ( _get_inputs, as_runnable, + get_current_run_tree, is_traceable_function, + trace, traceable, tracing_context, ) from langsmith.run_trees import RunTree +def _get_calls( + mock_client: Any, + minimum: Optional[int] = 0, + verbs: Set[str] = {"POST"}, + attempts: int = 5, +) -> list: + calls = [] + for _ in range(attempts): + calls = [ + c + for c in mock_client.session.request.mock_calls # type: ignore + if c.args and c.args[0] in verbs + ] + if minimum is None: + return calls + if minimum is not None and len(calls) > minimum: + break + time.sleep(0.1) + return calls + + def test__get_inputs_with_no_args() -> None: def foo() -> None: pass @@ -162,9 +187,9 @@ def foo(kwargs: int, *, b: int, c: int, **some_other_kwargs: Any) -> None: } -def _get_mock_client() -> Client: +def _get_mock_client(**kwargs: Any) -> Client: mock_session = MagicMock() - client = Client(session=mock_session, api_key="test") + client = Client(session=mock_session, api_key="test", **kwargs) return client @@ -178,12 +203,13 @@ def test_traceable_iterator(use_next: bool, mock_client: Client) -> None: with tracing_context(enabled=True): @traceable(client=mock_client) - def my_iterator_fn(a, b, d): + def my_iterator_fn(a, b, d, **kwargs): + assert kwargs == {"e": 5} for i in range(a + b + d): yield i expected = [0, 1, 2, 3, 4, 5] - genout = my_iterator_fn(1, 2, 3) + genout = my_iterator_fn(1, 2, 3, e=5) if use_next: results = [] while True: @@ -195,9 +221,9 @@ def my_iterator_fn(a, b, d): results = list(genout) assert results == expected # Wait for batcher - time.sleep(0.25) + # check the mock_calls - mock_calls = mock_client.session.request.mock_calls # type: ignore + mock_calls = _get_calls(mock_client, minimum=1) assert 1 <= len(mock_calls) <= 2 call = mock_calls[0] @@ -216,12 +242,13 @@ def filter_inputs(kwargs: dict): return {"a": "FOOOOOO", "b": kwargs["b"], "d": kwargs["d"]} @traceable(client=mock_client, process_inputs=filter_inputs) - async def my_iterator_fn(a, b, d): + async def my_iterator_fn(a, b, d, **kwargs): + assert kwargs == {"e": 5} for i in range(a + b + d): yield i expected = [0, 1, 2, 3, 4, 5] - genout = my_iterator_fn(1, 2, 3) + genout = my_iterator_fn(1, 2, 3, e=5) if use_next: results = [] async for item in genout: @@ -229,10 +256,8 @@ async def my_iterator_fn(a, b, d): else: results = [item async for item in genout] assert results == expected - # Wait for batcher - await asyncio.sleep(0.25) # check the mock_calls - mock_calls = mock_client.session.request.mock_calls # type: ignore + mock_calls = _get_calls(mock_client, minimum=1) assert 1 <= len(mock_calls) <= 2 call = mock_calls[0] @@ -240,9 +265,17 @@ async def my_iterator_fn(a, b, d): assert call.args[1].startswith("https://api.smith.langchain.com") body = json.loads(call.kwargs["data"]) assert body["post"] - assert body["post"][0]["outputs"]["output"] == expected - # Assert the inputs are filtered as expected assert body["post"][0]["inputs"] == {"a": "FOOOOOO", "b": 2, "d": 3} + outputs_ = body["post"][0]["outputs"] + if "output" in outputs_: + assert outputs_["output"] == expected + # Assert the inputs are filtered as expected + else: + # It was put in the second batch + assert len(mock_calls) == 2 + body_2 = json.loads(mock_calls[1].kwargs["data"]) + assert body_2["patch"] + assert body_2["patch"][0]["outputs"]["output"] == expected @patch("langsmith.run_trees.Client", autospec=True) @@ -322,6 +355,83 @@ async def my_function(a, b, d): assert result == [6, 7] +def test_traceable_parent_from_runnable_config() -> None: + try: + from langchain.callbacks.tracers import LangChainTracer + from langchain.schema.runnable import RunnableLambda + except ImportError: + pytest.skip("Skipping test that requires langchain") + with tracing_context(enabled=True): + mock_client_ = _get_mock_client() + + @traceable() + def my_function(a: int) -> int: + return a * 2 + + my_function_runnable = RunnableLambda(my_function) + + assert ( + my_function_runnable.invoke( + 1, {"callbacks": [LangChainTracer(client=mock_client_)]} + ) + == 2 + ) + # Inspect the mock_calls and assert that 2 runs were created, + # one for the parent and one for the child + mock_calls = _get_calls(mock_client_, minimum=2) + posts = [] + for call in mock_calls: + if call.args and call.args[0] != "GET": + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + posts.extend(body["post"]) + assert len(posts) == 2 + parent = next(p for p in posts if p["parent_run_id"] is None) + child = next(p for p in posts if p["parent_run_id"] is not None) + assert child["parent_run_id"] == parent["id"] + + +def test_traceable_parent_from_runnable_config_accepts_config() -> None: + try: + from langchain.callbacks.tracers import LangChainTracer + from langchain.schema.runnable import RunnableLambda + except ImportError: + pytest.skip("Skipping test that requires langchain") + with tracing_context(enabled=True): + mock_client_ = _get_mock_client() + + @traceable() + def my_function(a: int, config: dict) -> int: + assert isinstance(config, dict) + return a * 2 + + my_function_runnable = RunnableLambda(my_function) + + assert ( + my_function_runnable.invoke( + 1, {"callbacks": [LangChainTracer(client=mock_client_)]} + ) + == 2 + ) + # Inspect the mock_calls and assert that 2 runs were created, + # one for the parent and one for the child + mock_calls = _get_calls(mock_client_, minimum=2) + posts = [] + for call in mock_calls: + if call.args and call.args[0] != "GET": + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + posts.extend(body["post"]) + assert len(posts) == 2 + parent = next(p for p in posts if p["parent_run_id"] is None) + child = next(p for p in posts if p["parent_run_id"] is not None) + assert child["parent_run_id"] == parent["id"] + + def test_traceable_project_name() -> None: with tracing_context(enabled=True): mock_client_ = _get_mock_client() @@ -331,10 +441,9 @@ def my_function(a: int, b: int, d: int) -> int: return a + b + d my_function(1, 2, 3) - time.sleep(0.25) # Inspect the mock_calls and asser tthat "my foo project" is in # the session_name arg of the body - mock_calls = mock_client_.session.request.mock_calls # type: ignore + mock_calls = _get_calls(mock_client_, minimum=1) assert 1 <= len(mock_calls) <= 2 call = mock_calls[0] assert call.args[0] == "POST" @@ -350,12 +459,11 @@ def my_function(a: int, b: int, d: int) -> int: def my_other_function(run_tree) -> int: return my_function(1, 2, 3) - my_other_function() - time.sleep(0.25) + my_other_function() # type: ignore # Inspect the mock_calls and assert that "my bar project" is in # both all POST runs in the single request. We want to ensure # all runs in a trace are associated with the same project. - mock_calls = mock_client_.session.request.mock_calls # type: ignore + mock_calls = _get_calls(mock_client_, minimum=1) assert 1 <= len(mock_calls) <= 2 call = mock_calls[0] assert call.args[0] == "POST" @@ -628,7 +736,12 @@ def _get_run(r: RunTree) -> None: with tracing_context(enabled=True): chunks = my_answer( - "some_query", langsmith_extra={"on_end": _get_run, "client": mock_client_} + "some_query", + langsmith_extra={ + "name": "test_overridding_name", + "on_end": _get_run, + "client": mock_client_, + }, ) all_chunks = [] for chunk in chunks: @@ -643,7 +756,7 @@ def _get_run(r: RunTree) -> None: ] assert run is not None run = cast(RunTree, run) - assert run.name == "expand_and_answer_questions" + assert run.name == "test_overridding_name" child_runs = run.child_runs assert child_runs and len(child_runs) == 5 names = [run.name for run in child_runs] @@ -659,7 +772,8 @@ def _get_run(r: RunTree) -> None: def test_traceable_regular(): @traceable - def some_sync_func(query: str) -> list: + def some_sync_func(query: str, **kwargs: Any) -> list: + assert kwargs == {"a": 1, "b": 2} return [query, query] @traceable @@ -684,7 +798,7 @@ def summarize_answers(query: str, document_context: str) -> list: def my_answer( query: str, ) -> list: - expanded_terms = some_sync_func(query=query) + expanded_terms = some_sync_func(query=query, a=1, b=2) documents = some_func( queries=expanded_terms, ) @@ -739,7 +853,9 @@ def some_sync_func(query: str) -> list: return [query, query] @traceable - async def some_async_func(queries: list) -> list: + async def some_async_func(queries: list, *, required: str, **kwargs: Any) -> list: + assert required == "foo" + assert kwargs == {"a": 1, "b": 2} await asyncio.sleep(0.01) return queries @@ -765,7 +881,7 @@ async def my_answer( ) -> list: expanded_terms = some_sync_func(query=query) documents = await some_async_func( - queries=expanded_terms, + queries=expanded_terms, required="foo", a=1, b=2 ) await another_async_func(query=query) @@ -846,6 +962,65 @@ def _get_run(r: RunTree) -> None: assert child_runs[0].inputs == {"a": 1, "b": 2} +async def test_traceable_to_atrace(): + @traceable + async def great_grandchild_fn(a: int, b: int) -> int: + return a + b + + @traceable + async def parent_fn(a: int, b: int) -> int: + async with langsmith.trace( + name="child_fn", inputs={"a": a, "b": b} + ) as run_tree: + async with langsmith.trace( + "grandchild_fn", inputs={"a": a, "b": b, "c": "oh my"} + ) as run_tree_gc: + try: + async with langsmith.trace("expect_error", inputs={}): + raise ValueError("oh no") + except ValueError: + pass + result = await great_grandchild_fn(a, b) + run_tree_gc.end(outputs={"result": result}) + run_tree.end(outputs={"result": result}) + return result + + run: Optional[RunTree] = None # type: ignore + + def _get_run(r: RunTree) -> None: + nonlocal run + run = r + + with tracing_context(enabled=True): + result = await parent_fn( + 1, 2, langsmith_extra={"on_end": _get_run, "client": _get_mock_client()} + ) + + assert result == 3 + assert run is not None + run = cast(RunTree, run) + assert run.name == "parent_fn" + assert run.outputs == {"output": 3} + assert run.inputs == {"a": 1, "b": 2} + child_runs = run.child_runs + assert child_runs + assert len(child_runs) == 1 + child = child_runs[0] + assert child.name == "child_fn" + assert child.inputs == {"a": 1, "b": 2} + assert len(child.child_runs) == 1 + grandchild = child.child_runs[0] + assert grandchild.name == "grandchild_fn" + assert grandchild.inputs == {"a": 1, "b": 2, "c": "oh my"} + assert len(grandchild.child_runs) == 2 + ggcerror = grandchild.child_runs[0] + assert ggcerror.name == "expect_error" + assert "oh no" in str(ggcerror.error) + ggc = grandchild.child_runs[1] + assert ggc.name == "great_grandchild_fn" + assert ggc.inputs == {"a": 1, "b": 2} + + def test_trace_to_traceable(): @traceable def child_fn(a: int, b: int) -> int: @@ -870,3 +1045,418 @@ def child_fn(a: int, b: int) -> int: assert len(child_runs) == 1 assert child_runs[0].name == "child_fn" assert child_runs[0].inputs == {"a": 1, "b": 2} + + +def test_client_passed_when_traceable_parent(): + mock_client = _get_mock_client() + rt = RunTree(name="foo", client=mock_client) + headers = rt.to_headers() + + @traceable + def my_run(foo: str): + return {"baz": "buzz"} + + my_run(foo="bar", langsmith_extra={"parent": headers, "client": mock_client}) + mock_calls = _get_calls(mock_client) + assert len(mock_calls) == 1 + call = mock_client.session.request.call_args + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + assert body["post"][0]["inputs"] == {"foo": "bar"} + assert body["post"][0]["outputs"] == {"baz": "buzz"} + + +def test_client_passed_when_trace_parent(): + mock_client = _get_mock_client() + rt = RunTree(name="foo", client=mock_client) + headers = rt.to_headers() + with tracing_context(enabled=True): + with trace( + name="foo", inputs={"foo": "bar"}, parent=headers, client=mock_client + ) as rt: + rt.outputs["bar"] = "baz" + calls = _get_calls(mock_client) + assert len(calls) == 1 + call = calls[0] + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + assert body["post"][0]["inputs"] == {"foo": "bar"} + assert body["post"][0]["outputs"] == {"bar": "baz"} + + +def test_from_runnable_config(): + try: + from langchain_core.tools import tool # type: ignore + from langchain_core.tracers.langchain import LangChainTracer # type: ignore + except ImportError: + pytest.skip("Skipping test that requires langchain") + + gc_run_id = uuid.uuid4() + + @tool + def my_grandchild_tool(text: str, callbacks: Any = None) -> str: + """Foo.""" + lct: LangChainTracer = callbacks.handlers[0] + assert str(gc_run_id) in lct.run_map + run = lct.run_map[str(gc_run_id)] + assert run.name == "my_grandchild_tool" + assert run.run_type == "tool" + assert lct.project_name == "foo" + parent_run = lct.run_map[str(run.parent_run_id)] + assert parent_run + assert parent_run.name == "my_traceable" + assert parent_run.run_type == "retriever" + grandparent_run = lct.run_map[str(parent_run.parent_run_id)] + assert grandparent_run + assert grandparent_run.name == "my_tool" + assert grandparent_run.run_type == "tool" + return text + + @traceable(run_type="retriever") + def my_traceable(text: str) -> str: + rt = get_current_run_tree() + assert rt + assert rt.run_type == "retriever" + assert rt.parent_run_id + assert rt.parent_run + assert rt.parent_run.run_type == "tool" + assert rt.session_name == "foo" + return my_grandchild_tool.invoke({"text": text}, {"run_id": gc_run_id}) + + @tool + def my_tool(text: str) -> str: + """Foo.""" + return my_traceable(text) + + mock_client = _get_mock_client() + tracer = LangChainTracer(client=mock_client, project_name="foo") + my_tool.invoke({"text": "hello"}, {"callbacks": [tracer]}) + + +def test_io_interops(): + try: + from langchain.callbacks.tracers import LangChainTracer + from langchain.schema.runnable import RunnableLambda + except ImportError: + pytest.skip("Skipping test that requires langchain") + tracer = LangChainTracer(client=_get_mock_client(auto_batch_tracing=False)) + stage_added = { + "parent_input": {"original_input": "original_input_value"}, + "child_input": {"parent_input": "parent_input_value"}, + "child_output": {"child_output": "child_output_value"}, + "parent_output": {"parent_output": "parent_output_value"}, + } + + @RunnableLambda + def child(inputs: dict) -> dict: + return {**stage_added["child_output"], **inputs} + + @RunnableLambda + def parent(inputs: dict) -> dict: + return { + **stage_added["parent_output"], + **child.invoke({**stage_added["child_input"], **inputs}), + } + + expected_at_stage = {} + current = {} + for stage in stage_added: + current = {**current, **stage_added[stage]} + expected_at_stage[stage] = current + parent_result = parent.invoke(stage_added["parent_input"], {"callbacks": [tracer]}) + assert parent_result == expected_at_stage["parent_output"] + mock_posts = _get_calls(tracer.client, minimum=2) + assert len(mock_posts) == 2 + datas = [json.loads(mock_post.kwargs["data"]) for mock_post in mock_posts] + assert datas[0]["name"] == "parent" + assert datas[0]["inputs"] == expected_at_stage["parent_input"] + assert not datas[0]["outputs"] + assert datas[1]["name"] == "child" + assert datas[1]["inputs"] == expected_at_stage["child_input"] + assert not datas[1]["outputs"] + parent_uid = datas[0]["id"] + child_uid = datas[1]["id"] + + # Check the patch requests + mock_patches = _get_calls(tracer.client, verbs={"PATCH"}, minimum=2) + assert len(mock_patches) == 2 + child_patch = json.loads(mock_patches[0].kwargs["data"]) + assert child_patch["id"] == child_uid + assert child_patch["outputs"] == expected_at_stage["child_output"] + assert child_patch["inputs"] == expected_at_stage["child_input"] + parent_patch = json.loads(mock_patches[1].kwargs["data"]) + assert parent_patch["id"] == parent_uid + assert parent_patch["outputs"] == expected_at_stage["parent_output"] + assert parent_patch["inputs"] == expected_at_stage["parent_input"] + + +def test_trace_respects_tracing_context(): + mock_client = _get_mock_client() + with tracing_context(enabled=False): + with trace(name="foo", inputs={"a": 1}, client=mock_client): + pass + + mock_calls = _get_calls(mock_client) + assert not mock_calls + + +def test_trace_nested_enable_disable(): + # Test that you can disable then re-enable tracing + # and the trace connects as expected + mock_client = _get_mock_client() + with tracing_context(enabled=True): + with trace(name="foo", inputs={"a": 1}, client=mock_client) as run: + with tracing_context(enabled=False): + with trace(name="bar", inputs={"b": 2}, client=mock_client) as run2: + with tracing_context(enabled=True): + with trace( + name="baz", inputs={"c": 3}, client=mock_client + ) as run3: + run3.end(outputs={"c": 3}) + run2.end(outputs={"b": 2}) + run.end(outputs={"a": 1}) + + # Now we need to ensure that there are 2 runs created (2 posts and 2 patches), + # run -> run3 + # with run2 being invisible + mock_calls = _get_calls(mock_client, verbs={"POST", "PATCH"}) + datas = [json.loads(mock_post.kwargs["data"]) for mock_post in mock_calls] + assert "post" in datas[0] + posted = datas[0]["post"] + assert len(posted) == 2 + assert posted[0]["name"] == "foo" + assert posted[1]["name"] == "baz" + dotted_parts = posted[1]["dotted_order"].split(".") + assert len(dotted_parts) == 2 + parent_dotted = posted[0]["dotted_order"] + assert parent_dotted == dotted_parts[0] + + +def test_tracing_disabled_project_name_set(): + mock_client = _get_mock_client() + + @traceable + def foo(a: int) -> int: + return a + + with tracing_context(enabled=False): + with trace( + name="foo", inputs={"a": 1}, client=mock_client, project_name="my_project" + ): + pass + foo(1, langsmith_extra={"client": mock_client, "project_name": "my_project"}) + + mock_calls = _get_calls(mock_client) + assert not mock_calls + + +@pytest.mark.parametrize("auto_batch_tracing", [True, False]) +async def test_traceable_async_exception(auto_batch_tracing: bool): + mock_client = _get_mock_client( + auto_batch_tracing=auto_batch_tracing, + info=ls_schemas.LangSmithInfo( + batch_ingest_config=ls_schemas.BatchIngestConfig( + size_limit_bytes=None, # Note this field is not used here + size_limit=100, + scale_up_nthreads_limit=16, + scale_up_qsize_trigger=1000, + scale_down_nempty_trigger=4, + ) + ), + ) + + @traceable + async def my_function(a: int) -> int: + raise ValueError("foo") + + with tracing_context(enabled=True): + with pytest.raises(ValueError, match="foo"): + await my_function(1, langsmith_extra={"client": mock_client}) + + # Get ALL the call args for the mock_client + num_calls = 1 if auto_batch_tracing else 2 + mock_calls = _get_calls( + mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls + ) + assert len(mock_calls) >= num_calls + + +@pytest.mark.parametrize("auto_batch_tracing", [True, False]) +async def test_traceable_async_gen_exception(auto_batch_tracing: bool): + mock_client = _get_mock_client( + auto_batch_tracing=auto_batch_tracing, + info=ls_schemas.LangSmithInfo( + batch_ingest_config=ls_schemas.BatchIngestConfig( + size_limit_bytes=None, # Note this field is not used here + size_limit=100, + scale_up_nthreads_limit=16, + scale_up_qsize_trigger=1000, + scale_down_nempty_trigger=4, + ) + ), + ) + + @traceable + async def my_function(a: int) -> AsyncGenerator[int, None]: + for i in range(5): + yield i + raise ValueError("foo") + + with tracing_context(enabled=True): + with pytest.raises(ValueError, match="foo"): + async for _ in my_function(1, langsmith_extra={"client": mock_client}): + pass + + # Get ALL the call args for the mock_client + num_calls = 1 if auto_batch_tracing else 2 + mock_calls = _get_calls( + mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls + ) + assert len(mock_calls) == num_calls + + +@pytest.mark.parametrize("env_var", [True, False]) +@pytest.mark.parametrize("context", [True, False, None]) +async def test_trace_respects_env_var(env_var: bool, context: Optional[bool]): + mock_client = _get_mock_client() + with patch.dict(os.environ, {"LANGSMITH_TRACING": "true" if env_var else "false "}): + with tracing_context(enabled=context): + with trace(name="foo", inputs={"a": 1}, client=mock_client) as run: + assert run.name == "foo" + pass + async with trace(name="bar", inputs={"b": 2}, client=mock_client) as run2: + assert run2.name == "bar" + pass + + mock_calls = _get_calls(mock_client) + if context is None: + expect = env_var + else: + expect = context + if expect: + assert len(mock_calls) >= 1 + else: + assert not mock_calls + + +async def test_process_inputs_outputs(): + mock_client = _get_mock_client() + in_s = "what's life's meaning" + + def process_inputs(inputs: dict) -> dict: + assert inputs == {"val": in_s, "ooblek": "nada"} + inputs["val2"] = "this is mutated" + return {"serialized_in": "what's the meaning of life?"} + + def process_outputs(outputs: int) -> dict: + assert outputs == 42 + return {"serialized_out": 24} + + @traceable(process_inputs=process_inputs, process_outputs=process_outputs) + def my_function(val: str, **kwargs: Any) -> int: + assert not kwargs.get("val2") + return 42 + + with tracing_context(enabled=True): + my_function( + in_s, + ooblek="nada", + langsmith_extra={"client": mock_client}, + ) + + def _check_client(client: Client) -> None: + mock_calls = _get_calls(client) + assert len(mock_calls) == 1 + call = mock_calls[0] + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + assert body["post"][0]["inputs"] == { + "serialized_in": "what's the meaning of life?" + } + assert body["post"][0]["outputs"] == {"serialized_out": 24} + + _check_client(mock_client) + + class Untruthy: + def __init__(self, val: Any) -> None: + self.val = val + + def __bool__(self) -> bool: + raise ValueError("I'm not truthy") + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Untruthy): + return self.val == other.val + return self.val == other + + @traceable(process_inputs=process_inputs, process_outputs=process_outputs) + async def amy_function(val: str, **kwargs: Any) -> int: + assert not kwargs.get("val2") + return Untruthy(42) # type: ignore + + mock_client = _get_mock_client() + with tracing_context(enabled=True): + await amy_function( + in_s, + ooblek="nada", + langsmith_extra={"client": mock_client}, + ) + + _check_client(mock_client) + + # Do generator + + def reducer(outputs: list) -> dict: + return {"reduced": outputs[0]} + + def process_reduced_outputs(outputs: dict) -> dict: + assert outputs == {"reduced": 42} + return {"serialized_out": 24} + + @traceable( + process_inputs=process_inputs, + process_outputs=process_reduced_outputs, + reduce_fn=reducer, + ) + def my_gen(val: str, **kwargs: Any) -> Generator[int, None, None]: + assert not kwargs.get("val2") + yield 42 + + mock_client = _get_mock_client() + with tracing_context(enabled=True): + result = list( + my_gen( + in_s, + ooblek="nada", + langsmith_extra={"client": mock_client}, + ) + ) + assert result == [42] + + _check_client(mock_client) + + @traceable( + process_inputs=process_inputs, + process_outputs=process_reduced_outputs, + reduce_fn=reducer, + ) + async def amy_gen(val: str, **kwargs: Any) -> AsyncGenerator[int, None]: + assert not kwargs.get("val2") + yield Untruthy(42) # type: ignore + + mock_client = _get_mock_client() + with tracing_context(enabled=True): + result = [ + i + async for i in amy_gen( + in_s, ooblek="nada", langsmith_extra={"client": mock_client} + ) + ] + assert result == [42] + _check_client(mock_client) diff --git a/python/tests/unit_tests/test_run_trees.py b/python/tests/unit_tests/test_run_trees.py index 92c398b2d..77618ab5f 100644 --- a/python/tests/unit_tests/test_run_trees.py +++ b/python/tests/unit_tests/test_run_trees.py @@ -15,7 +15,7 @@ def test_run_tree_accepts_tpe() -> None: name="My Chat Bot", inputs={"text": "Summarize this morning's meetings."}, client=mock_client, - executor=ThreadPoolExecutor(), + executor=ThreadPoolExecutor(), # type: ignore ) @@ -59,3 +59,58 @@ def test_run_tree_accepts_tpe() -> None: ) def test_parse_dotted_order(inputs, expected): assert run_trees._parse_dotted_order(inputs) == expected + + +def test_run_tree_events_not_null(): + mock_client = MagicMock(spec=Client) + run_tree = run_trees.RunTree( + name="My Chat Bot", + inputs={"text": "Summarize this morning's meetings."}, + client=mock_client, + events=None, + ) + assert run_tree.events == [] + + +def test_nested_run_trees_from_dotted_order(): + grandparent = run_trees.RunTree( + name="Grandparent", + inputs={"text": "Summarize this morning's meetings."}, + client=MagicMock(spec=Client), + ) + parent = grandparent.create_child( + name="Parent", + ) + child = parent.create_child( + name="Child", + ) + # Check child + clone = run_trees.RunTree.from_dotted_order( + dotted_order=child.dotted_order, + name="Clone", + client=MagicMock(spec=Client), + ) + + assert clone.id == child.id + assert clone.parent_run_id == child.parent_run_id + assert clone.dotted_order == child.dotted_order + + # Check parent + parent_clone = run_trees.RunTree.from_dotted_order( + dotted_order=parent.dotted_order, + name="Parent Clone", + client=MagicMock(spec=Client), + ) + assert parent_clone.id == parent.id + assert parent_clone.parent_run_id == parent.parent_run_id + assert parent_clone.dotted_order == parent.dotted_order + + # Check grandparent + grandparent_clone = run_trees.RunTree.from_dotted_order( + dotted_order=grandparent.dotted_order, + name="Grandparent Clone", + client=MagicMock(spec=Client), + ) + assert grandparent_clone.id == grandparent.id + assert grandparent_clone.parent_run_id is None + assert grandparent_clone.dotted_order == grandparent.dotted_order diff --git a/python/tests/unit_tests/test_utils.py b/python/tests/unit_tests/test_utils.py index d0b0119d9..8fd493478 100644 --- a/python/tests/unit_tests/test_utils.py +++ b/python/tests/unit_tests/test_utils.py @@ -1,9 +1,22 @@ +# mypy: disable-error-code="annotation-unchecked" +import copy +import dataclasses +import itertools +import threading import unittest -from unittest.mock import patch +import uuid +from datetime import datetime +from enum import Enum +from typing import Any, NamedTuple, Optional +from unittest.mock import MagicMock, patch +import attr +import dataclasses_json import pytest +from pydantic import BaseModel import langsmith.utils as ls_utils +from langsmith import Client, traceable from langsmith.run_helpers import tracing_context @@ -76,7 +89,9 @@ def test_correct_get_tracer_project(self): def test_tracing_enabled(): - with patch.dict("os.environ", {"LANGCHAIN_TRACING_V2": "false"}): + with patch.dict( + "os.environ", {"LANGCHAIN_TRACING_V2": "false", "LANGSMITH_TRACING": "false"} + ): assert not ls_utils.tracing_is_enabled() with tracing_context(enabled=True): assert ls_utils.tracing_is_enabled() @@ -86,9 +101,39 @@ def test_tracing_enabled(): assert not ls_utils.tracing_is_enabled() assert not ls_utils.tracing_is_enabled() + @traceable + def child_function(): + assert ls_utils.tracing_is_enabled() + return 1 + + @traceable + def untraced_child_function(): + assert not ls_utils.tracing_is_enabled() + return 1 + + @traceable + def parent_function(): + with patch.dict( + "os.environ", + {"LANGCHAIN_TRACING_V2": "false", "LANGSMITH_TRACING": "false"}, + ): + assert ls_utils.tracing_is_enabled() + child_function() + with tracing_context(enabled=False): + assert not ls_utils.tracing_is_enabled() + return untraced_child_function() + + with patch.dict( + "os.environ", {"LANGCHAIN_TRACING_V2": "true", "LANGSMITH_TRACING": "true"} + ): + mock_client = MagicMock(spec=Client) + parent_function(langsmith_extra={"client": mock_client}) + def test_tracing_disabled(): - with patch.dict("os.environ", {"LANGCHAIN_TRACING_V2": "true"}): + with patch.dict( + "os.environ", {"LANGCHAIN_TRACING_V2": "true", "LANGSMITH_TRACING": "true"} + ): assert ls_utils.tracing_is_enabled() with tracing_context(enabled=False): assert not ls_utils.tracing_is_enabled() @@ -97,3 +142,172 @@ def test_tracing_disabled(): with tracing_context(enabled=False): assert not ls_utils.tracing_is_enabled() assert ls_utils.tracing_is_enabled() + + +def test_deepish_copy(): + class MyClass: + def __init__(self, x: int) -> None: + self.x = x + self.y = "y" + self.a_list = [1, 2, 3] + self.a_tuple = (1, 2, 3) + self.a_set = {1, 2, 3} + self.a_dict = {"foo": "bar"} + self.my_bytes = b"foo" + + class ClassWithTee: + def __init__(self) -> None: + tee_a, tee_b = itertools.tee(range(10)) + self.tee_a = tee_a + self.tee_b = tee_b + + class MyClassWithSlots: + __slots__ = ["x", "y"] + + def __init__(self, x: int) -> None: + self.x = x + self.y = "y" + + class MyPydantic(BaseModel): + foo: str + bar: int + baz: dict + + @dataclasses.dataclass + class MyDataclass: + foo: str + bar: int + + def something(self) -> None: + pass + + class MyEnum(str, Enum): + FOO = "foo" + BAR = "bar" + + class ClassWithFakeJson: + def json(self): + raise ValueError("This should not be called") + + def to_json(self) -> dict: + return {"foo": "bar"} + + @dataclasses_json.dataclass_json + @dataclasses.dataclass + class Person: + name: str + + @attr.dataclass + class AttrDict: + foo: str = attr.ib() + bar: int + + uid = uuid.uuid4() + current_time = datetime.now() + + class NestedClass: + __slots__ = ["person", "lock"] + + def __init__(self) -> None: + self.person = Person(name="foo") + self.lock = [threading.Lock()] + + def __deepcopy__(self, memo: Optional[dict] = None) -> Any: + cls = type(self) + m = cls.__new__(cls) + setattr(m, "__dict__", copy.deepcopy(self.__dict__, memo=memo)) + + class CyclicClass: + def __init__(self) -> None: + self.cyclic = self + + def __repr__(self) -> str: + return "SoCyclic" + + class CyclicClass2: + def __init__(self) -> None: + self.cyclic: Any = None + self.other: Any = None + + def __repr__(self) -> str: + return "SoCyclic2" + + cycle_2 = CyclicClass2() + cycle_2.cyclic = CyclicClass2() + cycle_2.cyclic.other = cycle_2 + + class MyNamedTuple(NamedTuple): + foo: str + bar: int + + my_dict = { + "uid": uid, + "time": current_time, + "adict": {"foo": "bar"}, + "my_class": MyClass(1), + "class_with_tee": ClassWithTee(), + "my_slotted_class": MyClassWithSlots(1), + "my_dataclass": MyDataclass("foo", 1), + "my_enum": MyEnum.FOO, + "my_pydantic": MyPydantic(foo="foo", bar=1, baz={"foo": "bar"}), + "person": Person(name="foo"), + "a_bool": True, + "a_none": None, + "a_str": "foo", + "an_int": 1, + "a_float": 1.1, + "nested_class": NestedClass(), + "attr_dict": AttrDict(foo="foo", bar=1), + "named_tuple": MyNamedTuple(foo="foo", bar=1), + "cyclic": CyclicClass(), + "cyclic2": cycle_2, + "fake_json": ClassWithFakeJson(), + } + assert ls_utils.deepish_copy(my_dict) == my_dict + + +def test_is_version_greater_or_equal(): + # Test versions equal to 0.5.23 + assert ls_utils.is_version_greater_or_equal("0.5.23", "0.5.23") + + # Test versions greater than 0.5.23 + assert ls_utils.is_version_greater_or_equal("0.5.24", "0.5.23") + assert ls_utils.is_version_greater_or_equal("0.6.0", "0.5.23") + assert ls_utils.is_version_greater_or_equal("1.0.0", "0.5.23") + + # Test versions less than 0.5.23 + assert not ls_utils.is_version_greater_or_equal("0.5.22", "0.5.23") + assert not ls_utils.is_version_greater_or_equal("0.5.0", "0.5.23") + assert not ls_utils.is_version_greater_or_equal("0.4.99", "0.5.23") + + +def test_parse_prompt_identifier(): + # Valid cases + assert ls_utils.parse_prompt_identifier("name") == ("-", "name", "latest") + assert ls_utils.parse_prompt_identifier("owner/name") == ("owner", "name", "latest") + assert ls_utils.parse_prompt_identifier("owner/name:commit") == ( + "owner", + "name", + "commit", + ) + assert ls_utils.parse_prompt_identifier("name:commit") == ("-", "name", "commit") + + # Invalid cases + invalid_identifiers = [ + "", + "/", + ":", + "owner/", + "/name", + "owner//name", + "owner/name/", + "owner/name/extra", + ":commit", + ] + + for invalid_id in invalid_identifiers: + try: + ls_utils.parse_prompt_identifier(invalid_id) + assert False, f"Expected ValueError for identifier: {invalid_id}" + except ValueError: + pass # This is the expected behavior