diff --git a/.github/actions/poetry_setup/action.yml b/.github/actions/poetry_setup/action.yml new file mode 100644 index 000000000..df04e1e71 --- /dev/null +++ b/.github/actions/poetry_setup/action.yml @@ -0,0 +1,88 @@ +# An action for setting up poetry install with caching. +# Using a custom action since the default action does not +# take poetry install groups into account. +# Action code from: +# https://github.com/actions/setup-python/issues/505#issuecomment-1273013236 +name: poetry-install-with-caching +description: Poetry install with support for caching of dependency groups. + +inputs: + python-version: + description: Python version, supporting MAJOR.MINOR only + required: true + + poetry-version: + description: Poetry version + required: true + + cache-key: + description: Cache key to use for manual handling of caching + required: true + +runs: + using: composite + steps: + - uses: actions/setup-python@v5 + name: Setup python ${{ inputs.python-version }} + id: setup-python + with: + python-version: ${{ inputs.python-version }} + + - uses: actions/cache@v3 + id: cache-bin-poetry + name: Cache Poetry binary - Python ${{ inputs.python-version }} + env: + SEGMENT_DOWNLOAD_TIMEOUT_MIN: "1" + with: + path: | + /opt/pipx/venvs/poetry + # This step caches the poetry installation, so make sure it's keyed on the poetry version as well. + key: bin-poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-${{ inputs.poetry-version }} + + - name: Refresh shell hashtable and fixup softlinks + if: steps.cache-bin-poetry.outputs.cache-hit == 'true' + shell: bash + env: + POETRY_VERSION: ${{ inputs.poetry-version }} + PYTHON_VERSION: ${{ inputs.python-version }} + run: | + set -eux + + # Refresh the shell hashtable, to ensure correct `which` output. + hash -r + + # `actions/cache@v3` doesn't always seem able to correctly unpack softlinks. + # Delete and recreate the softlinks pipx expects to have. + rm /opt/pipx/venvs/poetry/bin/python + cd /opt/pipx/venvs/poetry/bin + ln -s "$(which "python$PYTHON_VERSION")" python + chmod +x python + cd /opt/pipx_bin/ + ln -s /opt/pipx/venvs/poetry/bin/poetry poetry + chmod +x poetry + + # Ensure everything got set up correctly. + /opt/pipx/venvs/poetry/bin/python --version + /opt/pipx_bin/poetry --version + + - name: Install poetry + if: steps.cache-bin-poetry.outputs.cache-hit != 'true' + shell: bash + env: + POETRY_VERSION: ${{ inputs.poetry-version }} + PYTHON_VERSION: ${{ inputs.python-version }} + # Install poetry using the python version installed by setup-python step. + run: pipx install "poetry==$POETRY_VERSION" --python '${{ steps.setup-python.outputs.python-path }}' --verbose + + - name: Restore pip and poetry cached dependencies + uses: actions/cache@v3 + env: + SEGMENT_DOWNLOAD_TIMEOUT_MIN: "4" + with: + path: | + ~/.cache/pip + ~/.cache/pypoetry/virtualenvs + ~/.cache/pypoetry/cache + ~/.cache/pypoetry/artifacts + ./.venv + key: py-deps-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles('./poetry.lock') }} diff --git a/.github/workflows/js_test.yml b/.github/workflows/js_test.yml index 172ed9034..1778178cc 100644 --- a/.github/workflows/js_test.yml +++ b/.github/workflows/js_test.yml @@ -81,7 +81,7 @@ jobs: strategy: matrix: os: [ubuntu-latest] - node-version: [18.x, 19.x, 20.x, 21.x, 22.x] + node-version: [18.x, 20.x, "22.4.1"] # See Node.js release schedule at https://nodejs.org/en/about/releases/ include: - os: windows-latest @@ -107,4 +107,4 @@ jobs: - name: Check version run: yarn run check-version - name: Test - run: yarn run test \ No newline at end of file + run: yarn run test diff --git a/.github/workflows/py-baseline.yml b/.github/workflows/py-baseline.yml new file mode 100644 index 000000000..4b1998847 --- /dev/null +++ b/.github/workflows/py-baseline.yml @@ -0,0 +1,37 @@ +name: py-baseline + +on: + workflow_dispatch: + push: + branches: [main] + paths: + - "python/langsmith/**" + +env: + POETRY_VERSION: "1.7.1" + +jobs: + benchmark: + runs-on: ubuntu-latest + defaults: + run: + working-directory: python + steps: + - uses: actions/checkout@v4 + - run: SHA=$(git rev-parse HEAD) && echo "SHA=$SHA" >> $GITHUB_ENV + - name: Set up Python 3.11 + Poetry ${{ env.POETRY_VERSION }} + uses: "./.github/actions/poetry_setup" + with: + python-version: "3.11" + poetry-version: ${{ env.POETRY_VERSION }} + cache-key: py-benchi + - name: Install dependencies + run: poetry install --with dev + - name: Run benchmarks + run: OUTPUT=out/benchmark-baseline.json make -s benchmark + - name: Save outputs + uses: actions/cache/save@v4 + with: + key: ${{ runner.os }}-benchmark-baseline-${{ env.SHA }} + path: | + python/out/benchmark-baseline.json diff --git a/.github/workflows/py-bench.yml b/.github/workflows/py-bench.yml new file mode 100644 index 000000000..20ce118d1 --- /dev/null +++ b/.github/workflows/py-bench.yml @@ -0,0 +1,71 @@ +name: py-bench + +on: + pull_request: + paths: + - "python/langsmith/**" + +env: + POETRY_VERSION: "1.7.1" + +jobs: + benchmark: + runs-on: ubuntu-latest + defaults: + run: + working-directory: python + steps: + - uses: actions/checkout@v4 + - id: files + name: Get changed files + uses: Ana06/get-changed-files@v2.3.0 + with: + format: json + - name: Set up Python 3.11 + Poetry ${{ env.POETRY_VERSION }} + uses: "./.github/actions/poetry_setup" + with: + python-version: "3.11" + poetry-version: ${{ env.POETRY_VERSION }} + cache-key: py-bench + - name: Install dependencies + run: poetry install --with dev + - name: Download baseline + uses: actions/cache/restore@v4 + with: + key: ${{ runner.os }}-benchmark-baseline + restore-keys: | + ${{ runner.os }}-benchmark-baseline- + fail-on-cache-miss: true + path: | + python/out/benchmark-baseline.json + - name: Run benchmarks + id: benchmark + run: | + { + echo 'OUTPUT<> "$GITHUB_OUTPUT" + - name: Compare benchmarks + id: compare + run: | + { + echo 'OUTPUT<> "$GITHUB_OUTPUT" + - name: Annotation + uses: actions/github-script@v7 + with: + script: | + const file = JSON.parse(`${{ steps.files.outputs.added_modified_renamed }}`)[0] + core.notice(`${{ steps.benchmark.outputs.OUTPUT }}`, { + title: 'Benchmark results', + file, + }) + core.notice(`${{ steps.compare.outputs.OUTPUT }}`, { + title: 'Comparison against main', + file, + }) diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml index 5a45962ae..d207b112b 100644 --- a/.github/workflows/python_test.yml +++ b/.github/workflows/python_test.yml @@ -29,26 +29,20 @@ jobs: working-directory: python steps: - uses: actions/checkout@v3 - - uses: actions/setup-python@v5 - name: Setup python ${{ matrix.python-version }} - id: setup-python + - name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }} + uses: "./.github/actions/poetry_setup" with: python-version: ${{ matrix.python-version }} - cache: "pip" - - name: Install poetry - shell: bash - env: - PYTHON_VERSION: ${{ matrix.python-version }} - # Install poetry using the python version installed by setup-python step. - run: pipx install "poetry==$POETRY_VERSION" --python '${{ steps.setup-python.outputs.python-path }}' --verbose + poetry-version: ${{ env.POETRY_VERSION }} + cache-key: build-and-test - name: Install dependencies run: | poetry install --with dev,lint - poetry run pip install -U langchain langchain-core + poetry run pip install -U langchain langchain-core langchain_anthropic langchain_openai - name: Build ${{ matrix.python-version }} run: poetry build - name: Lint ${{ matrix.python-version }} run: make lint - name: Run Unit tests ${{ matrix.python-version }} run: make tests - shell: bash \ No newline at end of file + shell: bash diff --git a/.github/workflows/release_js.yml b/.github/workflows/release_js.yml index 5f2ac7294..4f1aee583 100644 --- a/.github/workflows/release_js.yml +++ b/.github/workflows/release_js.yml @@ -1,6 +1,11 @@ name: JS Release on: + push: + branches: + - main + paths: + - "js/package.json" workflow_dispatch: jobs: @@ -11,33 +16,34 @@ jobs: permissions: contents: write id-token: write + defaults: + run: + working-directory: "js" steps: - uses: actions/checkout@v3 # JS Build - - name: Use Node.js ${{ matrix.node-version }} + - name: Use Node.js 20.x uses: actions/setup-node@v3 with: - node-version: ${{ matrix.node-version }} + node-version: 20.x cache: "yarn" cache-dependency-path: "js/yarn.lock" - name: Install dependencies - run: cd js && yarn install --immutable + run: yarn install --immutable - name: Build - run: cd js && yarn run build + run: yarn run build - name: Check version - run: cd js && yarn run check-version + run: yarn run check-version - name: Check NPM version id: check_npm_version run: | - cd js if yarn run check-npm-version; then - echo "::set-output name=should_publish::true" + echo "should_publish=true" >> $GITHUB_OUTPUT else - echo "::set-output name=should_publish::false" + echo "should_publish=false" >> $GITHUB_OUTPUT fi - name: Publish package to NPM if: steps.check_npm_version.outputs.should_publish == 'true' run: | - cd js echo "//registry.npmjs.org/:_authToken=${{ secrets.NPM_TOKEN }}" > .npmrc yarn publish --non-interactive diff --git a/.github/workflows/test_docker_compose.yml b/.github/workflows/test_docker_compose.yml new file mode 100644 index 000000000..8b67d598a --- /dev/null +++ b/.github/workflows/test_docker_compose.yml @@ -0,0 +1,45 @@ +name: "CI: Test Docker Compose" + +on: + push: + branches: [ main ] + paths: + pull_request: + branches: [ main ] + paths: + - ".github/workflows/test_docker_compose.yml" + - "python/langsmith/cli/docker-compose.yaml" + + +concurrency: + group: "test-docker-compose" + cancel-in-progress: true + +jobs: + + docker-compose: + timeout-minutes: 10 + runs-on: ubuntu-latest + + env: + LANGSMITH_LICENSE_KEY: ${{ secrets.LANGSMITH_LICENSE_KEY }} + API_KEY_SALT: test + + steps: + - name: Checkout + uses: actions/checkout@v1 + + - uses: KengoTODA/actions-setup-docker-compose@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Start containers + working-directory: python/langsmith/cli + run: docker compose up -d + + - name: sleep 30 seconds + run: sleep 30 + + - name: Check backend health + run: curl localhost:1980/api/info + diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 000000000..98b654db8 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,35 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +formats: + - pdf + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.11" + commands: + - mkdir -p $READTHEDOCS_OUTPUT + - echo "Building docs" + - pip install -U uv + - uv venv + - . .venv/bin/activate + - uv pip install -r python/docs/requirements.txt + - . .venv/bin/activate && cd python/docs && make clobber generate-api-rst html && cd ../.. + - cp -r python/docs/_build/html $READTHEDOCS_OUTPUT +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: python/docs/conf.py + +# If using Sphinx, optionally build your docs in additional formats such as PDF +# formats: +# - pdf + +# Optionally declare the Python requirements required to build your docs +python: + install: + - requirements: python/docs/requirements.txt diff --git a/_scripts/_fetch_schema.py b/_scripts/_fetch_schema.py index 741e12a9c..ba8c171bd 100644 --- a/_scripts/_fetch_schema.py +++ b/_scripts/_fetch_schema.py @@ -1,4 +1,5 @@ """Fetch and prune the Langsmith spec.""" + import argparse from pathlib import Path @@ -19,7 +20,9 @@ def process_schema(sub_schema): get_dependencies(schema, sub_schema["$ref"].split("/")[-1], new_components) else: if "items" in sub_schema and "$ref" in sub_schema["items"]: - get_dependencies(schema, sub_schema["items"]["$ref"].split("/")[-1], new_components) + get_dependencies( + schema, sub_schema["items"]["$ref"].split("/")[-1], new_components + ) for keyword in ["anyOf", "oneOf", "allOf"]: if keyword in sub_schema: for item in sub_schema[keyword]: @@ -38,8 +41,6 @@ def process_schema(sub_schema): process_schema(item) - - def _extract_langsmith_routes_and_properties(schema, operation_ids): new_paths = {} new_components = {"schemas": {}} @@ -98,20 +99,25 @@ def test_openapi_specification(spec: dict): assert errors is None, f"OpenAPI validation failed: {errors}" -def main(out_file: str = "openapi.yaml", url: str = "https://web.smith.langchain.com/openapi.json"): +def main( + out_file: str = "openapi.yaml", + url: str = "https://web.smith.langchain.com/openapi.json", +): langsmith_schema = get_langsmith_runs_schema(url=url) parent_dir = Path(__file__).parent.parent test_openapi_specification(langsmith_schema) with (parent_dir / "openapi" / out_file).open("w") as f: # Sort the schema keys so the openapi version and info come at the top - for key in ['openapi', 'info', 'paths', 'components']: + for key in ["openapi", "info", "paths", "components"]: langsmith_schema[key] = langsmith_schema.pop(key) f.write(yaml.dump(langsmith_schema, sort_keys=False)) if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("--url", type=str, default="https://web.smith.langchain.com/openapi.json") + parser.add_argument( + "--url", type=str, default="https://web.smith.langchain.com/openapi.json" + ) parser.add_argument("--output", type=str, default="openapi.yaml") args = parser.parse_args() main(args.output, url=args.url) diff --git a/js/.eslintrc.cjs b/js/.eslintrc.cjs index a870c9f5a..da4c3ecb4 100644 --- a/js/.eslintrc.cjs +++ b/js/.eslintrc.cjs @@ -14,6 +14,7 @@ module.exports = { ignorePatterns: [ ".eslintrc.cjs", "scripts", + "src/utils/lodash/*", "node_modules", "dist", "dist-cjs", diff --git a/js/.gitignore b/js/.gitignore index 902b3f759..e758389d2 100644 --- a/js/.gitignore +++ b/js/.gitignore @@ -71,6 +71,10 @@ Chinook_Sqlite.sql /wrappers/openai.js /wrappers/openai.d.ts /wrappers/openai.d.cts +/wrappers/vercel.cjs +/wrappers/vercel.js +/wrappers/vercel.d.ts +/wrappers/vercel.d.cts /singletons/traceable.cjs /singletons/traceable.js /singletons/traceable.d.ts diff --git a/js/README.md b/js/README.md index 9eba64647..7aa73a1c9 100644 --- a/js/README.md +++ b/js/README.md @@ -53,6 +53,7 @@ Tracing can be activated by setting the following environment variables or by ma ```typescript process.env["LANGSMITH_TRACING"] = "true"; process.env["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"; +// process.env["LANGCHAIN_ENDPOINT"] = "https://eu.api.smith.langchain.com"; // If signed up in the EU region process.env["LANGCHAIN_API_KEY"] = ""; // process.env["LANGCHAIN_PROJECT"] = "My Project Name"; // Optional: "default" is used if not set ``` diff --git a/js/package.json b/js/package.json index ebbba5a72..45a05380e 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.33", + "version": "0.1.61", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -45,6 +45,10 @@ "wrappers/openai.js", "wrappers/openai.d.ts", "wrappers/openai.d.cts", + "wrappers/vercel.cjs", + "wrappers/vercel.js", + "wrappers/vercel.d.ts", + "wrappers/vercel.d.cts", "singletons/traceable.cjs", "singletons/traceable.js", "singletons/traceable.d.ts", @@ -93,25 +97,26 @@ }, "homepage": "https://github.com/langchain-ai/langsmith-sdk#readme", "dependencies": { - "@types/uuid": "^9.0.1", + "@types/uuid": "^10.0.0", "commander": "^10.0.1", - "lodash.set": "^4.3.2", "p-queue": "^6.6.2", "p-retry": "4", - "uuid": "^9.0.0" + "semver": "^7.6.3", + "uuid": "^10.0.0" }, "devDependencies": { + "@ai-sdk/openai": "^0.0.40", "@babel/preset-env": "^7.22.4", "@faker-js/faker": "^8.4.1", "@jest/globals": "^29.5.0", - "langchain": "^0.2.0", - "@langchain/core": "^0.2.0", - "@langchain/langgraph": "^0.0.19", + "@langchain/core": "^0.3.1", + "@langchain/langgraph": "^0.2.3", + "@langchain/openai": "^0.3.0", "@tsconfig/recommended": "^1.0.2", "@types/jest": "^29.5.1", - "@types/lodash.set": "^4.3.9", "@typescript-eslint/eslint-plugin": "^5.59.8", "@typescript-eslint/parser": "^5.59.8", + "ai": "^3.2.37", "babel-jest": "^29.5.0", "cross-env": "^7.0.3", "dotenv": "^16.1.3", @@ -121,31 +126,22 @@ "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", + "langchain": "^0.3.2", "openai": "^4.38.5", "prettier": "^2.8.8", "ts-jest": "^29.1.0", "ts-node": "^10.9.1", - "typescript": "^5.4.5" + "typescript": "^5.4.5", + "zod": "^3.23.8" }, "peerDependencies": { - "openai": "*", - "langchain": "*", - "@langchain/core": "*" + "openai": "*" }, "peerDependenciesMeta": { "openai": { "optional": true - }, - "langchain": { - "optional": true - }, - "@langchain/core": { - "optional": true } }, - "resolutions": { - "@langchain/core": "0.2.0" - }, "lint-staged": { "**/*.{ts,tsx}": [ "prettier --write --ignore-unknown", @@ -252,6 +248,15 @@ "import": "./wrappers/openai.js", "require": "./wrappers/openai.cjs" }, + "./wrappers/vercel": { + "types": { + "import": "./wrappers/vercel.d.ts", + "require": "./wrappers/vercel.d.cts", + "default": "./wrappers/vercel.d.ts" + }, + "import": "./wrappers/vercel.js", + "require": "./wrappers/vercel.cjs" + }, "./singletons/traceable": { "types": { "import": "./singletons/traceable.d.ts", @@ -263,4 +268,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} diff --git a/js/scripts/bump-version.js b/js/scripts/bump-version.js index f8abbfc1e..477b7c922 100644 --- a/js/scripts/bump-version.js +++ b/js/scripts/bump-version.js @@ -1,21 +1,24 @@ -import { readFileSync, writeFileSync } from 'fs'; -import process from 'process'; -const packageJson = JSON.parse(readFileSync('package.json')); +import { readFileSync, writeFileSync } from "fs"; +import process from "process"; +const packageJson = JSON.parse(readFileSync("package.json")); let newVersion; if (process.argv.length > 2) { - newVersion = process.argv[2]; + newVersion = process.argv[2]; } else { - const versionParts = packageJson.version.split('.'); - versionParts[2] = parseInt(versionParts[2]) + 1; - newVersion = versionParts.join('.'); + const versionParts = packageJson.version.split("."); + versionParts[2] = parseInt(versionParts[2]) + 1; + newVersion = versionParts.join("."); } console.log(`Bumping version to ${newVersion}`); packageJson.version = newVersion; -writeFileSync('package.json', JSON.stringify(packageJson, null, 2)); +writeFileSync("package.json", JSON.stringify(packageJson, null, 2) + "\n"); -const indexFilePath = 'src/index.ts'; -let indexFileContent = readFileSync(indexFilePath, 'utf-8'); -indexFileContent = indexFileContent.replace(/export const __version__ = "[0-9]+\.[0-9]+\.[0-9]+";/g, `export const __version__ = "${newVersion}";`); +const indexFilePath = "src/index.ts"; +let indexFileContent = readFileSync(indexFilePath, "utf-8"); +indexFileContent = indexFileContent.replace( + /export const __version__ = "[0-9]+\.[0-9]+\.[0-9]+";/g, + `export const __version__ = "${newVersion}";` +); writeFileSync(indexFilePath, indexFileContent); diff --git a/js/scripts/create-entrypoints.js b/js/scripts/create-entrypoints.js index 61d7341ab..a3487f756 100644 --- a/js/scripts/create-entrypoints.js +++ b/js/scripts/create-entrypoints.js @@ -17,6 +17,7 @@ const entrypoints = { wrappers: "wrappers/index", anonymizer: "anonymizer/index", "wrappers/openai": "wrappers/openai", + "wrappers/vercel": "wrappers/vercel", "singletons/traceable": "singletons/traceable", }; diff --git a/js/src/anonymizer/index.ts b/js/src/anonymizer/index.ts index 60becaf9c..dc360a3c4 100644 --- a/js/src/anonymizer/index.ts +++ b/js/src/anonymizer/index.ts @@ -1,4 +1,4 @@ -import set from "lodash.set"; +import set from "../utils/lodash/set.js"; export interface StringNode { value: string; @@ -36,10 +36,6 @@ function extractStringNodes(data: unknown, options: { maxDepth?: number }) { } function deepClone(data: T): T { - if ("structuredClone" in globalThis) { - return globalThis.structuredClone(data); - } - return JSON.parse(JSON.stringify(data)); } @@ -60,20 +56,14 @@ export type ReplacerType = export function createAnonymizer( replacer: ReplacerType, - options?: { - maxDepth?: number; - deepClone?: boolean; - } + options?: { maxDepth?: number } ) { return (data: T): T => { - const nodes = extractStringNodes(data, { + let mutateValue = deepClone(data); + const nodes = extractStringNodes(mutateValue, { maxDepth: options?.maxDepth, }); - // by default we opt-in to mutate the value directly - // to improve performance - let mutateValue = options?.deepClone ? deepClone(data) : data; - const processor: StringNodeProcessor = Array.isArray(replacer) ? (() => { const replacers: [regex: RegExp, replace: string][] = replacer.map( diff --git a/js/src/client.ts b/js/src/client.ts index 74590fc86..65b79713b 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -11,27 +11,38 @@ import { Example, ExampleCreate, ExampleUpdate, + ExampleUpdateWithId, Feedback, FeedbackConfig, FeedbackIngestToken, KVMap, LangChainBaseMessage, + LangSmithSettings, + LikePromptResponse, + ListCommitsResponse, + ListPromptsResponse, + Prompt, + PromptCommit, + PromptSortField, Run, RunCreate, RunUpdate, ScoreType, + ExampleSearch, TimeDelta, TracerSession, TracerSessionResult, ValueType, + AnnotationQueue, + RunWithAnnotationQueueInfo, } from "./schemas.js"; import { convertLangChainMessageToExample, isLangChainMessage, } from "./utils/messages.js"; import { - getEnvironmentVariable, getLangChainEnvVarsMetadata, + getLangSmithEnvironmentVariable, getRuntimeEnvironment, } from "./utils/env.js"; @@ -43,8 +54,16 @@ import { import { __version__ } from "./index.js"; import { assertUuid } from "./utils/_uuid.js"; import { warnOnce } from "./utils/warn.js"; +import { + isVersionGreaterOrEqual, + parsePromptIdentifier, +} from "./utils/prompts.js"; +import { raiseForStatus } from "./utils/error.js"; +import { _getFetchImplementation } from "./singletons/fetch.js"; + +import { stringify as stringifyForTracing } from "./utils/fast-safe-stringify/index.js"; -interface ClientConfig { +export interface ClientConfig { apiUrl?: string; apiKey?: string; callerOptions?: AsyncCallerParams; @@ -236,13 +255,20 @@ type RecordStringAny = Record; export type FeedbackSourceType = "model" | "api" | "app"; export type CreateExampleOptions = { + /** The ID of the dataset to create the example in. */ datasetId?: string; + /** The name of the dataset to create the example in (if dataset ID is not provided). */ datasetName?: string; + /** The creation date of the example. */ createdAt?: Date; + /** A unique identifier for the example. */ exampleId?: string; - + /** Additional metadata associated with the example. */ metadata?: KVMap; + /** The split(s) to assign the example to. */ split?: string | string[]; + /** The ID of the source run associated with this example. */ + sourceRunId?: string; }; type AutoBatchQueueItem = { @@ -275,8 +301,8 @@ async function mergeRuntimeEnvIntoRunCreates(runs: RunCreate[]) { } const getTracingSamplingRate = () => { - const samplingRateStr = getEnvironmentVariable( - "LANGCHAIN_TRACING_SAMPLING_RATE" + const samplingRateStr = getLangSmithEnvironmentVariable( + "TRACING_SAMPLING_RATE" ); if (samplingRateStr === undefined) { return undefined; @@ -284,7 +310,7 @@ const getTracingSamplingRate = () => { const samplingRate = parseFloat(samplingRateStr); if (samplingRate < 0 || samplingRate > 1) { throw new Error( - `LANGCHAIN_TRACING_SAMPLING_RATE must be between 0 and 1 if set. Got: ${samplingRate}` + `LANGSMITH_TRACING_SAMPLING_RATE must be between 0 and 1 if set. Got: ${samplingRate}` ); } return samplingRate; @@ -299,17 +325,6 @@ const isLocalhost = (url: string): boolean => { ); }; -const raiseForStatus = async (response: Response, operation: string) => { - // consume the response body to release the connection - // https://undici.nodejs.org/#/?id=garbage-collection - const body = await response.text(); - if (!response.ok) { - throw new Error( - `Failed to ${operation}: ${response.status} ${response.statusText} ${body}` - ); - } -}; - async function toArray(iterable: AsyncIterable): Promise { const result: T[] = []; for await (const item of iterable) { @@ -398,7 +413,7 @@ export class Client { private tracingSampleRate?: number; - private sampledPostUuids = new Set(); + private filteredPostUuids = new Set(); private autoBatchTracing = true; @@ -418,13 +433,21 @@ export class Client { private fetchOptions: RequestInit; + private settings: Promise | null; + constructor(config: ClientConfig = {}) { const defaultConfig = Client.getDefaultClientConfig(); this.tracingSampleRate = getTracingSamplingRate(); this.apiUrl = trimQuotes(config.apiUrl ?? defaultConfig.apiUrl) ?? ""; + if (this.apiUrl.endsWith("/")) { + this.apiUrl = this.apiUrl.slice(0, -1); + } this.apiKey = trimQuotes(config.apiKey ?? defaultConfig.apiKey); this.webUrl = trimQuotes(config.webUrl ?? defaultConfig.webUrl); + if (this.webUrl?.endsWith("/")) { + this.webUrl = this.webUrl.slice(0, -1); + } this.timeout_ms = config.timeout_ms ?? 12_000; this.caller = new AsyncCaller(config.callerOptions ?? {}); this.batchIngestCaller = new AsyncCaller({ @@ -450,14 +473,14 @@ export class Client { hideInputs?: boolean; hideOutputs?: boolean; } { - const apiKey = getEnvironmentVariable("LANGCHAIN_API_KEY"); + const apiKey = getLangSmithEnvironmentVariable("API_KEY"); const apiUrl = - getEnvironmentVariable("LANGCHAIN_ENDPOINT") ?? + getLangSmithEnvironmentVariable("ENDPOINT") ?? "https://api.smith.langchain.com"; const hideInputs = - getEnvironmentVariable("LANGCHAIN_HIDE_INPUTS") === "true"; + getLangSmithEnvironmentVariable("HIDE_INPUTS") === "true"; const hideOutputs = - getEnvironmentVariable("LANGCHAIN_HIDE_OUTPUTS") === "true"; + getLangSmithEnvironmentVariable("HIDE_OUTPUTS") === "true"; return { apiUrl: apiUrl, apiKey: apiKey, @@ -482,6 +505,9 @@ export class Client { } else if (this.apiUrl.split(".", 1)[0].includes("dev")) { this.webUrl = "https://dev.smith.langchain.com"; return this.webUrl; + } else if (this.apiUrl.split(".", 1)[0].includes("eu")) { + this.webUrl = "https://eu.smith.langchain.com"; + return this.webUrl; } else { this.webUrl = "https://smith.langchain.com"; return this.webUrl; @@ -545,17 +571,13 @@ export class Client { ): Promise { const paramsString = queryParams?.toString() ?? ""; const url = `${this.apiUrl}${path}?${paramsString}`; - const response = await this.caller.call(fetch, url, { + const response = await this.caller.call(_getFetchImplementation(), url, { method: "GET", headers: this.headers, signal: AbortSignal.timeout(this.timeout_ms), ...this.fetchOptions, }); - if (!response.ok) { - throw new Error( - `Failed to fetch ${path}: ${response.status} ${response.statusText}` - ); - } + await raiseForStatus(response, `Failed to fetch ${path}`); return response; } @@ -566,9 +588,10 @@ export class Client { const response = await this._getResponse(path, queryParams); return response.json() as T; } - private async *_getPaginated( + private async *_getPaginated( path: string, - queryParams: URLSearchParams = new URLSearchParams() + queryParams: URLSearchParams = new URLSearchParams(), + transform?: (data: TResponse) => T[] ): AsyncIterable { let offset = Number(queryParams.get("offset")) || 0; const limit = Number(queryParams.get("limit")) || 100; @@ -577,18 +600,16 @@ export class Client { queryParams.set("limit", String(limit)); const url = `${this.apiUrl}${path}?${queryParams}`; - const response = await this.caller.call(fetch, url, { + const response = await this.caller.call(_getFetchImplementation(), url, { method: "GET", headers: this.headers, signal: AbortSignal.timeout(this.timeout_ms), ...this.fetchOptions, }); - if (!response.ok) { - throw new Error( - `Failed to fetch ${path}: ${response.status} ${response.statusText}` - ); - } - const items: T[] = await response.json(); + await raiseForStatus(response, `Failed to fetch ${path}`); + const items: T[] = transform + ? transform(await response.json()) + : await response.json(); if (items.length === 0) { break; @@ -609,13 +630,17 @@ export class Client { ): AsyncIterable { const bodyParams = body ? { ...body } : {}; while (true) { - const response = await this.caller.call(fetch, `${this.apiUrl}${path}`, { - method: requestMethod, - headers: { ...this.headers, "Content-Type": "application/json" }, - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - body: JSON.stringify(bodyParams), - }); + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}${path}`, + { + method: requestMethod, + headers: { ...this.headers, "Content-Type": "application/json" }, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + body: JSON.stringify(bodyParams), + } + ); const responseBody = await response.json(); if (!responseBody) { break; @@ -646,18 +671,24 @@ export class Client { if (patch) { const sampled = []; for (const run of runs) { - if (this.sampledPostUuids.has(run.id)) { + if (!this.filteredPostUuids.has(run.id)) { sampled.push(run); - this.sampledPostUuids.delete(run.id); + } else { + this.filteredPostUuids.delete(run.id); } } return sampled; } else { const sampled = []; for (const run of runs) { - if (Math.random() < this.tracingSampleRate) { + if ( + (run.id !== run.trace_id && + !this.filteredPostUuids.has(run.trace_id)) || + Math.random() < this.tracingSampleRate + ) { sampled.push(run); - this.sampledPostUuids.add(run.id); + } else { + this.filteredPostUuids.add(run.id); } } return sampled; @@ -700,7 +731,7 @@ export class Client { immediatelyTriggerBatch || this.autoBatchQueue.size > this.pendingAutoBatchedRunLimit ) { - await this.drainAutoBatchQueue(); + await this.drainAutoBatchQueue().catch(console.error); } if (this.autoBatchQueue.size > 0) { this.autoBatchTimeout = setTimeout( @@ -719,18 +750,13 @@ export class Client { } protected async _getServerInfo() { - const response = await fetch(`${this.apiUrl}/info`, { + const response = await _getFetchImplementation()(`${this.apiUrl}/info`, { method: "GET", headers: { Accept: "application/json" }, signal: AbortSignal.timeout(this.timeout_ms), ...this.fetchOptions, }); - if (!response.ok) { - // consume the response body to release the connection - // https://undici.nodejs.org/#/?id=garbage-collection - await response.text(); - throw new Error("Failed to retrieve server info."); - } + await raiseForStatus(response, "get server info"); return response.json(); } @@ -743,6 +769,14 @@ export class Client { return true; } + protected async _getSettings() { + if (!this.settings) { + this.settings = this._get("/settings"); + } + + return await this.settings; + } + public async createRun(run: CreateRunParams): Promise { if (!this._filterForSampling([run]).length) { return; @@ -771,14 +805,18 @@ export class Client { runCreate, ]); - const response = await this.caller.call(fetch, `${this.apiUrl}/runs`, { - method: "POST", - headers, - body: JSON.stringify(mergedRunCreateParams[0]), - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - }); - await raiseForStatus(response, "create run"); + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/runs`, + { + method: "POST", + headers, + body: stringifyForTracing(mergedRunCreateParams[0]), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "create run", true); } /** @@ -870,12 +908,12 @@ export class Client { const batchItems = rawBatch[key].reverse(); let batchItem = batchItems.pop(); while (batchItem !== undefined) { - const stringifiedBatchItem = JSON.stringify(batchItem); + const stringifiedBatchItem = stringifyForTracing(batchItem); if ( currentBatchSizeBytes > 0 && currentBatchSizeBytes + stringifiedBatchItem.length > sizeLimitBytes ) { - await this._postBatchIngestRuns(JSON.stringify(batchChunks)); + await this._postBatchIngestRuns(stringifyForTracing(batchChunks)); currentBatchSizeBytes = 0; batchChunks.post = []; batchChunks.patch = []; @@ -886,7 +924,7 @@ export class Client { } } if (batchChunks.post.length > 0 || batchChunks.patch.length > 0) { - await this._postBatchIngestRuns(JSON.stringify(batchChunks)); + await this._postBatchIngestRuns(stringifyForTracing(batchChunks)); } } @@ -897,7 +935,7 @@ export class Client { Accept: "application/json", }; const response = await this.batchIngestCaller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/runs/batch`, { method: "POST", @@ -907,7 +945,7 @@ export class Client { ...this.fetchOptions, } ); - await raiseForStatus(response, "batch create run"); + await raiseForStatus(response, "batch create run", true); } public async updateRun(runId: string, run: RunUpdate): Promise { @@ -943,17 +981,17 @@ export class Client { } const headers = { ...this.headers, "Content-Type": "application/json" }; const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/runs/${runId}`, { method: "PATCH", headers, - body: JSON.stringify(run), + body: stringifyForTracing(run), signal: AbortSignal.timeout(this.timeout_ms), ...this.fetchOptions, } ); - await raiseForStatus(response, "update run"); + await raiseForStatus(response, "update run", true); } public async readRun( @@ -989,7 +1027,7 @@ export class Client { sessionId = projectOpts?.projectId; } else { const project = await this.readProject({ - projectName: getEnvironmentVariable("LANGCHAIN_PROJECT") || "default", + projectName: getLangSmithEnvironmentVariable("PROJECT") || "default", }); sessionId = project.id; } @@ -1226,6 +1264,94 @@ export class Client { } } + public async getRunStats({ + id, + trace, + parentRun, + runType, + projectNames, + projectIds, + referenceExampleIds, + startTime, + endTime, + error, + query, + filter, + traceFilter, + treeFilter, + isRoot, + dataSourceType, + }: { + id?: string[]; + trace?: string; + parentRun?: string; + runType?: string; + projectNames?: string[]; + projectIds?: string[]; + referenceExampleIds?: string[]; + startTime?: string; + endTime?: string; + error?: boolean; + query?: string; + filter?: string; + traceFilter?: string; + treeFilter?: string; + isRoot?: boolean; + dataSourceType?: string; + }): Promise { + let projectIds_ = projectIds || []; + if (projectNames) { + projectIds_ = [ + ...(projectIds || []), + ...(await Promise.all( + projectNames.map((name) => + this.readProject({ projectName: name }).then( + (project) => project.id + ) + ) + )), + ]; + } + + const payload = { + id, + trace, + parent_run: parentRun, + run_type: runType, + session: projectIds_, + reference_example: referenceExampleIds, + start_time: startTime, + end_time: endTime, + error, + query, + filter, + trace_filter: traceFilter, + tree_filter: treeFilter, + is_root: isRoot, + data_source_type: dataSourceType, + }; + + // Remove undefined values from the payload + const filteredPayload = Object.fromEntries( + Object.entries(payload).filter(([_, value]) => value !== undefined) + ); + + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/runs/stats`, + { + method: "POST", + headers: this.headers, + body: JSON.stringify(filteredPayload), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + const result = await response.json(); + return result; + } + public async shareRun( runId: string, { shareId }: { shareId?: string } = {} @@ -1236,7 +1362,7 @@ export class Client { }; assertUuid(runId); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/runs/${runId}/share`, { method: "PUT", @@ -1256,7 +1382,7 @@ export class Client { public async unshareRun(runId: string): Promise { assertUuid(runId); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/runs/${runId}/share`, { method: "DELETE", @@ -1265,13 +1391,13 @@ export class Client { ...this.fetchOptions, } ); - await raiseForStatus(response, "unshare run"); + await raiseForStatus(response, "unshare run", true); } public async readRunSharedLink(runId: string): Promise { assertUuid(runId); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/runs/${runId}/share`, { method: "GET", @@ -1305,7 +1431,7 @@ export class Client { } assertUuid(shareToken); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/public/${shareToken}/runs${queryParams}`, { method: "GET", @@ -1331,7 +1457,7 @@ export class Client { } assertUuid(datasetId); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/datasets/${datasetId}/share`, { method: "GET", @@ -1363,7 +1489,7 @@ export class Client { }; assertUuid(datasetId); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/datasets/${datasetId}/share`, { method: "PUT", @@ -1383,7 +1509,7 @@ export class Client { public async unshareDataset(datasetId: string): Promise { assertUuid(datasetId); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/datasets/${datasetId}/share`, { method: "DELETE", @@ -1392,13 +1518,13 @@ export class Client { ...this.fetchOptions, } ); - await raiseForStatus(response, "unshare dataset"); + await raiseForStatus(response, "unshare dataset", true); } public async readSharedDataset(shareToken: string): Promise { assertUuid(shareToken); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/public/${shareToken}/datasets`, { method: "GET", @@ -1411,6 +1537,61 @@ export class Client { return dataset as Dataset; } + /** + * Get shared examples. + * + * @param {string} shareToken The share token to get examples for. A share token is the UUID (or LangSmith URL, including UUID) generated when explicitly marking an example as public. + * @param {Object} [options] Additional options for listing the examples. + * @param {string[] | undefined} [options.exampleIds] A list of example IDs to filter by. + * @returns {Promise} The shared examples. + */ + public async listSharedExamples( + shareToken: string, + options?: { exampleIds?: string[] } + ): Promise { + const params: Record = {}; + if (options?.exampleIds) { + params.id = options.exampleIds; + } + + const urlParams = new URLSearchParams(); + Object.entries(params).forEach(([key, value]) => { + if (Array.isArray(value)) { + value.forEach((v) => urlParams.append(key, v)); + } else { + urlParams.append(key, value); + } + }); + + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/public/${shareToken}/examples?${urlParams.toString()}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + const result = await response.json(); + if (!response.ok) { + if ("detail" in result) { + throw new Error( + `Failed to list shared examples.\nStatus: ${ + response.status + }\nMessage: ${result.detail.join("\n")}` + ); + } + throw new Error( + `Failed to list shared examples: ${response.status} ${response.statusText}` + ); + } + return result.map((example: any) => ({ + ...example, + _hostUrl: this.getHostUrl(), + })); + } + public async createProject({ projectName, description = null, @@ -1440,19 +1621,19 @@ export class Client { if (referenceDatasetId !== null) { body["reference_dataset_id"] = referenceDatasetId; } - const response = await this.caller.call(fetch, endpoint, { - method: "POST", - headers: { ...this.headers, "Content-Type": "application/json" }, - body: JSON.stringify(body), - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - }); + const response = await this.caller.call( + _getFetchImplementation(), + endpoint, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(body), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "create project"); const result = await response.json(); - if (!response.ok) { - throw new Error( - `Failed to create session ${projectName}: ${response.status} ${response.statusText}` - ); - } return result as TracerSession; } @@ -1483,19 +1664,19 @@ export class Client { description, end_time: endTime ? new Date(endTime).toISOString() : null, }; - const response = await this.caller.call(fetch, endpoint, { - method: "PATCH", - headers: { ...this.headers, "Content-Type": "application/json" }, - body: JSON.stringify(body), - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - }); + const response = await this.caller.call( + _getFetchImplementation(), + endpoint, + { + method: "PATCH", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(body), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "update project"); const result = await response.json(); - if (!response.ok) { - throw new Error( - `Failed to update project ${projectId}: ${response.status} ${response.statusText}` - ); - } return result as TracerSession; } @@ -1520,7 +1701,7 @@ export class Client { throw new Error("Must provide projectName or projectId"); } const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}${path}?${params}`, { method: "GET", @@ -1642,6 +1823,7 @@ export class Client { referenceDatasetId, referenceDatasetName, referenceFree, + metadata, }: { projectIds?: string[]; name?: string; @@ -1649,6 +1831,7 @@ export class Client { referenceDatasetId?: string; referenceDatasetName?: string; referenceFree?: boolean; + metadata?: RecordStringAny; } = {}): AsyncIterable { const params = new URLSearchParams(); if (projectIds !== undefined) { @@ -1673,6 +1856,9 @@ export class Client { if (referenceFree !== undefined) { params.append("reference_free", referenceFree.toString()); } + if (metadata !== undefined) { + params.append("metadata", JSON.stringify(metadata)); + } for await (const projects of this._getPaginated( "/sessions", params @@ -1700,7 +1886,7 @@ export class Client { } assertUuid(projectId_); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/sessions/${projectId_}`, { method: "DELETE", @@ -1711,7 +1897,8 @@ export class Client { ); await raiseForStatus( response, - `delete session ${projectId_} (${projectName})` + `delete session ${projectId_} (${projectName})`, + true ); } @@ -1744,23 +1931,14 @@ export class Client { formData.append("name", name); } - const response = await this.caller.call(fetch, url, { + const response = await this.caller.call(_getFetchImplementation(), url, { method: "POST", headers: this.headers, body: formData, signal: AbortSignal.timeout(this.timeout_ms), ...this.fetchOptions, }); - - if (!response.ok) { - const result = await response.json(); - if (result.detail && result.detail.includes("already exists")) { - throw new Error(`Dataset ${fileName} already exists`); - } - throw new Error( - `Failed to upload CSV: ${response.status} ${response.statusText}` - ); - } + await raiseForStatus(response, "upload CSV"); const result = await response.json(); return result as Dataset; @@ -1771,33 +1949,43 @@ export class Client { { description, dataType, - }: { description?: string; dataType?: DataType } = {} + inputsSchema, + outputsSchema, + metadata, + }: { + description?: string; + dataType?: DataType; + inputsSchema?: KVMap; + outputsSchema?: KVMap; + metadata?: RecordStringAny; + } = {} ): Promise { const body: KVMap = { name, description, + extra: metadata ? { metadata } : undefined, }; if (dataType) { body.data_type = dataType; } - const response = await this.caller.call(fetch, `${this.apiUrl}/datasets`, { - method: "POST", - headers: { ...this.headers, "Content-Type": "application/json" }, - body: JSON.stringify(body), - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - }); - - if (!response.ok) { - const result = await response.json(); - if (result.detail && result.detail.includes("already exists")) { - throw new Error(`Dataset ${name} already exists`); - } - throw new Error( - `Failed to create dataset ${response.status} ${response.statusText}` - ); + if (inputsSchema) { + body.inputs_schema_definition = inputsSchema; } - + if (outputsSchema) { + body.outputs_schema_definition = outputsSchema; + } + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/datasets`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(body), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "create dataset"); const result = await response.json(); return result as Dataset; } @@ -1925,12 +2113,14 @@ export class Client { datasetIds, datasetName, datasetNameContains, + metadata, }: { limit?: number; offset?: number; datasetIds?: string[]; datasetName?: string; datasetNameContains?: string; + metadata?: RecordStringAny; } = {}): AsyncIterable { const path = "/datasets"; const params = new URLSearchParams({ @@ -1948,6 +2138,9 @@ export class Client { if (datasetNameContains !== undefined) { params.append("name_contains", datasetNameContains); } + if (metadata !== undefined) { + params.append("metadata", JSON.stringify(metadata)); + } for await (const datasets of this._getPaginated(path, params)) { yield* datasets; } @@ -1974,7 +2167,7 @@ export class Client { assertUuid(_datasetId); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/datasets/${_datasetId}`, { method: "PATCH", @@ -1984,11 +2177,7 @@ export class Client { ...this.fetchOptions, } ); - if (!response.ok) { - throw new Error( - `Failed to update dataset ${_datasetId}: ${response.status} ${response.statusText}` - ); - } + await raiseForStatus(response, "update dataset"); return (await response.json()) as Dataset; } @@ -2134,67 +2323,173 @@ export class Client { } else { throw new Error("Must provide datasetName or datasetId"); } - const response = await this.caller.call(fetch, this.apiUrl + path, { - method: "DELETE", - headers: this.headers, - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - }); - if (!response.ok) { - throw new Error( - `Failed to delete ${path}: ${response.status} ${response.statusText}` - ); - } + const response = await this.caller.call( + _getFetchImplementation(), + this.apiUrl + path, + { + method: "DELETE", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, `delete ${path}`); + await response.json(); } - public async createExample( - inputs: KVMap, - outputs: KVMap, - { - datasetId, - datasetName, - createdAt, - exampleId, - metadata, - split, - }: CreateExampleOptions - ): Promise { + public async indexDataset({ + datasetId, + datasetName, + tag, + }: { + datasetId?: string; + datasetName?: string; + tag?: string; + }): Promise { let datasetId_ = datasetId; - if (datasetId_ === undefined && datasetName === undefined) { + if (!datasetId_ && !datasetName) { throw new Error("Must provide either datasetName or datasetId"); - } else if (datasetId_ !== undefined && datasetName !== undefined) { + } else if (datasetId_ && datasetName) { throw new Error("Must provide either datasetName or datasetId, not both"); - } else if (datasetId_ === undefined) { + } else if (!datasetId_) { const dataset = await this.readDataset({ datasetName }); datasetId_ = dataset.id; } + assertUuid(datasetId_); - const createdAt_ = createdAt || new Date(); - const data: ExampleCreate = { - dataset_id: datasetId_, - inputs, - outputs, - created_at: createdAt_?.toISOString(), - id: exampleId, - metadata, - split, + const data = { + tag: tag, }; + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/datasets/${datasetId_}/index`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(data), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "index dataset"); + await response.json(); + } - const response = await this.caller.call(fetch, `${this.apiUrl}/examples`, { - method: "POST", - headers: { ...this.headers, "Content-Type": "application/json" }, - body: JSON.stringify(data), - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - }); + /** + * Lets you run a similarity search query on a dataset. + * + * Requires the dataset to be indexed. Please see the `indexDataset` method to set up indexing. + * + * @param inputs The input on which to run the similarity search. Must have the + * same schema as the dataset. + * + * @param datasetId The dataset to search for similar examples. + * + * @param limit The maximum number of examples to return. Will return the top `limit` most + * similar examples in order of most similar to least similar. If no similar + * examples are found, random examples will be returned. + * + * @param filter A filter string to apply to the search. Only examples will be returned that + * match the filter string. Some examples of filters + * + * - eq(metadata.mykey, "value") + * - and(neq(metadata.my.nested.key, "value"), neq(metadata.mykey, "value")) + * - or(eq(metadata.mykey, "value"), eq(metadata.mykey, "othervalue")) + * + * @returns A list of similar examples. + * + * + * @example + * dataset_id = "123e4567-e89b-12d3-a456-426614174000" + * inputs = {"text": "How many people live in Berlin?"} + * limit = 5 + * examples = await client.similarExamples(inputs, dataset_id, limit) + */ + public async similarExamples( + inputs: KVMap, + datasetId: string, + limit: number, + { + filter, + }: { + filter?: string; + } = {} + ): Promise { + const data: KVMap = { + limit: limit, + inputs: inputs, + }; - if (!response.ok) { - throw new Error( - `Failed to create example: ${response.status} ${response.statusText}` - ); + if (filter !== undefined) { + data["filter"] = filter; + } + + assertUuid(datasetId); + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/datasets/${datasetId}/search`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(data), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "fetch similar examples"); + const result = await response.json(); + return result["examples"] as ExampleSearch[]; + } + + public async createExample( + inputs: KVMap, + outputs: KVMap, + { + datasetId, + datasetName, + createdAt, + exampleId, + metadata, + split, + sourceRunId, + }: CreateExampleOptions + ): Promise { + let datasetId_ = datasetId; + if (datasetId_ === undefined && datasetName === undefined) { + throw new Error("Must provide either datasetName or datasetId"); + } else if (datasetId_ !== undefined && datasetName !== undefined) { + throw new Error("Must provide either datasetName or datasetId, not both"); + } else if (datasetId_ === undefined) { + const dataset = await this.readDataset({ datasetName }); + datasetId_ = dataset.id; } + const createdAt_ = createdAt || new Date(); + const data: ExampleCreate = { + dataset_id: datasetId_, + inputs, + outputs, + created_at: createdAt_?.toISOString(), + id: exampleId, + metadata, + split, + source_run_id: sourceRunId, + }; + + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/examples`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(data), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + await raiseForStatus(response, "create example"); const result = await response.json(); return result as Example; } @@ -2241,7 +2536,7 @@ export class Client { }); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/examples/bulk`, { method: "POST", @@ -2251,13 +2546,7 @@ export class Client { ...this.fetchOptions, } ); - - if (!response.ok) { - throw new Error( - `Failed to create examples: ${response.status} ${response.statusText}` - ); - } - + await raiseForStatus(response, "create examples"); const result = await response.json(); return result as Example[]; } @@ -2305,6 +2594,9 @@ export class Client { splits, inlineS3Urls, metadata, + limit, + offset, + filter, }: { datasetId?: string; datasetName?: string; @@ -2313,6 +2605,9 @@ export class Client { splits?: string[]; inlineS3Urls?: boolean; metadata?: KVMap; + limit?: number; + offset?: number; + filter?: string; } = {}): AsyncIterable { let datasetId_; if (datasetId !== undefined && datasetName !== undefined) { @@ -2350,28 +2645,44 @@ export class Client { const serializedMetadata = JSON.stringify(metadata); params.append("metadata", serializedMetadata); } + if (limit !== undefined) { + params.append("limit", limit.toString()); + } + if (offset !== undefined) { + params.append("offset", offset.toString()); + } + if (filter !== undefined) { + params.append("filter", filter); + } + let i = 0; for await (const examples of this._getPaginated( "/examples", params )) { - yield* examples; + for (const example of examples) { + yield example; + i++; + } + if (limit !== undefined && i >= limit) { + break; + } } } public async deleteExample(exampleId: string): Promise { assertUuid(exampleId); const path = `/examples/${exampleId}`; - const response = await this.caller.call(fetch, this.apiUrl + path, { - method: "DELETE", - headers: this.headers, - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - }); - if (!response.ok) { - throw new Error( - `Failed to delete ${path}: ${response.status} ${response.statusText}` - ); - } + const response = await this.caller.call( + _getFetchImplementation(), + this.apiUrl + path, + { + method: "DELETE", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, `delete ${path}`); await response.json(); } @@ -2381,7 +2692,7 @@ export class Client { ): Promise { assertUuid(exampleId); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/examples/${exampleId}`, { method: "PATCH", @@ -2391,15 +2702,119 @@ export class Client { ...this.fetchOptions, } ); - if (!response.ok) { - throw new Error( - `Failed to update example ${exampleId}: ${response.status} ${response.statusText}` - ); - } + await raiseForStatus(response, "update example"); + const result = await response.json(); + return result; + } + + public async updateExamples(update: ExampleUpdateWithId[]): Promise { + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/examples/bulk`, + { + method: "PATCH", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(update), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "update examples"); const result = await response.json(); return result; } + public async listDatasetSplits({ + datasetId, + datasetName, + asOf, + }: { + datasetId?: string; + datasetName?: string; + asOf?: string | Date; + }): Promise { + let datasetId_: string; + if (datasetId === undefined && datasetName === undefined) { + throw new Error("Must provide dataset name or ID"); + } else if (datasetId !== undefined && datasetName !== undefined) { + throw new Error("Must provide either datasetName or datasetId, not both"); + } else if (datasetId === undefined) { + const dataset = await this.readDataset({ datasetName }); + datasetId_ = dataset.id; + } else { + datasetId_ = datasetId; + } + + assertUuid(datasetId_); + + const params = new URLSearchParams(); + const dataset_version = asOf + ? typeof asOf === "string" + ? asOf + : asOf?.toISOString() + : undefined; + if (dataset_version) { + params.append("as_of", dataset_version); + } + + const response = await this._get( + `/datasets/${datasetId_}/splits`, + params + ); + return response; + } + + public async updateDatasetSplits({ + datasetId, + datasetName, + splitName, + exampleIds, + remove = false, + }: { + datasetId?: string; + datasetName?: string; + splitName: string; + exampleIds: string[]; + remove?: boolean; + }): Promise { + let datasetId_: string; + if (datasetId === undefined && datasetName === undefined) { + throw new Error("Must provide dataset name or ID"); + } else if (datasetId !== undefined && datasetName !== undefined) { + throw new Error("Must provide either datasetName or datasetId, not both"); + } else if (datasetId === undefined) { + const dataset = await this.readDataset({ datasetName }); + datasetId_ = dataset.id; + } else { + datasetId_ = datasetId; + } + + assertUuid(datasetId_); + + const data = { + split_name: splitName, + examples: exampleIds.map((id) => { + assertUuid(id); + return id; + }), + remove, + }; + + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/datasets/${datasetId_}/splits`, + { + method: "PUT", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(data), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + await raiseForStatus(response, "update dataset splits", true); + } + /** * @deprecated This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead. */ @@ -2511,14 +2926,14 @@ export class Client { session_id: projectId, }; const url = `${this.apiUrl}/feedback`; - const response = await this.caller.call(fetch, url, { + const response = await this.caller.call(_getFetchImplementation(), url, { method: "POST", headers: { ...this.headers, "Content-Type": "application/json" }, body: JSON.stringify(feedback), signal: AbortSignal.timeout(this.timeout_ms), ...this.fetchOptions, }); - await raiseForStatus(response, "create feedback"); + await raiseForStatus(response, "create feedback", true); return feedback as Feedback; } @@ -2551,7 +2966,7 @@ export class Client { } assertUuid(feedbackId); const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/feedback/${feedbackId}`, { method: "PATCH", @@ -2561,7 +2976,7 @@ export class Client { ...this.fetchOptions, } ); - await raiseForStatus(response, "update feedback"); + await raiseForStatus(response, "update feedback", true); } public async readFeedback(feedbackId: string): Promise { @@ -2574,17 +2989,17 @@ export class Client { public async deleteFeedback(feedbackId: string): Promise { assertUuid(feedbackId); const path = `/feedback/${feedbackId}`; - const response = await this.caller.call(fetch, this.apiUrl + path, { - method: "DELETE", - headers: this.headers, - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - }); - if (!response.ok) { - throw new Error( - `Failed to delete ${path}: ${response.status} ${response.statusText}` - ); - } + const response = await this.caller.call( + _getFetchImplementation(), + this.apiUrl + path, + { + method: "DELETE", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, `delete ${path}`); await response.json(); } @@ -2663,7 +3078,7 @@ export class Client { } const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/feedback/tokens`, { method: "POST", @@ -2723,7 +3138,7 @@ export class Client { if (metadata) body.extra["metadata"] = metadata; const response = await this.caller.call( - fetch, + _getFetchImplementation(), `${this.apiUrl}/datasets/comparative`, { method: "POST", @@ -2817,4 +3232,779 @@ export class Client { ); return results; } + + /** + * API for managing annotation queues + */ + + /** + * List the annotation queues on the LangSmith API. + * @param options - The options for listing annotation queues + * @param options.queueIds - The IDs of the queues to filter by + * @param options.name - The name of the queue to filter by + * @param options.nameContains - The substring that the queue name should contain + * @param options.limit - The maximum number of queues to return + * @returns An iterator of AnnotationQueue objects + */ + public async *listAnnotationQueues( + options: { + queueIds?: string[]; + name?: string; + nameContains?: string; + limit?: number; + } = {} + ): AsyncIterableIterator { + const { queueIds, name, nameContains, limit } = options; + const params = new URLSearchParams(); + if (queueIds) { + queueIds.forEach((id, i) => { + assertUuid(id, `queueIds[${i}]`); + params.append("ids", id); + }); + } + if (name) params.append("name", name); + if (nameContains) params.append("name_contains", nameContains); + params.append( + "limit", + (limit !== undefined ? Math.min(limit, 100) : 100).toString() + ); + + let count = 0; + for await (const queues of this._getPaginated( + "/annotation-queues", + params + )) { + yield* queues; + count++; + if (limit !== undefined && count >= limit) break; + } + } + + /** + * Create an annotation queue on the LangSmith API. + * @param options - The options for creating an annotation queue + * @param options.name - The name of the annotation queue + * @param options.description - The description of the annotation queue + * @param options.queueId - The ID of the annotation queue + * @returns The created AnnotationQueue object + */ + public async createAnnotationQueue(options: { + name: string; + description?: string; + queueId?: string; + }): Promise { + const { name, description, queueId } = options; + const body = { + name, + description, + id: queueId || uuid.v4(), + }; + + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/annotation-queues`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify( + Object.fromEntries( + Object.entries(body).filter(([_, v]) => v !== undefined) + ) + ), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "create annotation queue"); + const data = await response.json(); + return data as AnnotationQueue; + } + + /** + * Read an annotation queue with the specified queue ID. + * @param queueId - The ID of the annotation queue to read + * @returns The AnnotationQueue object + */ + public async readAnnotationQueue(queueId: string): Promise { + // TODO: Replace when actual endpoint is added + const queueIteratorResult = await this.listAnnotationQueues({ + queueIds: [queueId], + }).next(); + if (queueIteratorResult.done) { + throw new Error(`Annotation queue with ID ${queueId} not found`); + } + return queueIteratorResult.value; + } + + /** + * Update an annotation queue with the specified queue ID. + * @param queueId - The ID of the annotation queue to update + * @param options - The options for updating the annotation queue + * @param options.name - The new name for the annotation queue + * @param options.description - The new description for the annotation queue + */ + public async updateAnnotationQueue( + queueId: string, + options: { + name: string; + description?: string; + } + ): Promise { + const { name, description } = options; + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/annotation-queues/${assertUuid(queueId, "queueId")}`, + { + method: "PATCH", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify({ name, description }), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "update annotation queue"); + } + + /** + * Delete an annotation queue with the specified queue ID. + * @param queueId - The ID of the annotation queue to delete + */ + public async deleteAnnotationQueue(queueId: string): Promise { + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/annotation-queues/${assertUuid(queueId, "queueId")}`, + { + method: "DELETE", + headers: { ...this.headers, Accept: "application/json" }, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "delete annotation queue"); + } + + /** + * Add runs to an annotation queue with the specified queue ID. + * @param queueId - The ID of the annotation queue + * @param runIds - The IDs of the runs to be added to the annotation queue + */ + public async addRunsToAnnotationQueue( + queueId: string, + runIds: string[] + ): Promise { + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/annotation-queues/${assertUuid(queueId, "queueId")}/runs`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify( + runIds.map((id, i) => assertUuid(id, `runIds[${i}]`).toString()) + ), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, "add runs to annotation queue"); + } + + /** + * Get a run from an annotation queue at the specified index. + * @param queueId - The ID of the annotation queue + * @param index - The index of the run to retrieve + * @returns A Promise that resolves to a RunWithAnnotationQueueInfo object + * @throws {Error} If the run is not found at the given index or for other API-related errors + */ + public async getRunFromAnnotationQueue( + queueId: string, + index: number + ): Promise { + const baseUrl = `/annotation-queues/${assertUuid(queueId, "queueId")}/run`; + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}${baseUrl}/${index}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + await raiseForStatus(response, "get run from annotation queue"); + return await response.json(); + } + + protected async _currentTenantIsOwner(owner: string): Promise { + const settings = await this._getSettings(); + return owner == "-" || settings.tenant_handle === owner; + } + + protected async _ownerConflictError( + action: string, + owner: string + ): Promise { + const settings = await this._getSettings(); + return new Error( + `Cannot ${action} for another tenant.\n + Current tenant: ${settings.tenant_handle}\n + Requested tenant: ${owner}` + ); + } + + protected async _getLatestCommitHash( + promptOwnerAndName: string + ): Promise { + const res = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/commits/${promptOwnerAndName}/?limit=${1}&offset=${0}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + const json = await res.json(); + if (!res.ok) { + const detail = + typeof json.detail === "string" + ? json.detail + : JSON.stringify(json.detail); + const error = new Error( + `Error ${res.status}: ${res.statusText}\n${detail}` + ); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (error as any).statusCode = res.status; + throw error; + } + + if (json.commits.length === 0) { + return undefined; + } + + return json.commits[0].commit_hash; + } + + protected async _likeOrUnlikePrompt( + promptIdentifier: string, + like: boolean + ): Promise { + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/likes/${owner}/${promptName}`, + { + method: "POST", + body: JSON.stringify({ like: like }), + headers: { ...this.headers, "Content-Type": "application/json" }, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + await raiseForStatus(response, `${like ? "like" : "unlike"} prompt`); + + return await response.json(); + } + + protected async _getPromptUrl(promptIdentifier: string): Promise { + const [owner, promptName, commitHash] = + parsePromptIdentifier(promptIdentifier); + if (!(await this._currentTenantIsOwner(owner))) { + if (commitHash !== "latest") { + return `${this.getHostUrl()}/hub/${owner}/${promptName}/${commitHash.substring( + 0, + 8 + )}`; + } else { + return `${this.getHostUrl()}/hub/${owner}/${promptName}`; + } + } else { + const settings = await this._getSettings(); + if (commitHash !== "latest") { + return `${this.getHostUrl()}/prompts/${promptName}/${commitHash.substring( + 0, + 8 + )}?organizationId=${settings.id}`; + } else { + return `${this.getHostUrl()}/prompts/${promptName}?organizationId=${ + settings.id + }`; + } + } + } + + public async promptExists(promptIdentifier: string): Promise { + const prompt = await this.getPrompt(promptIdentifier); + return !!prompt; + } + + public async likePrompt( + promptIdentifier: string + ): Promise { + return this._likeOrUnlikePrompt(promptIdentifier, true); + } + + public async unlikePrompt( + promptIdentifier: string + ): Promise { + return this._likeOrUnlikePrompt(promptIdentifier, false); + } + + public async *listCommits( + promptOwnerAndName: string + ): AsyncIterableIterator { + for await (const commits of this._getPaginated< + PromptCommit, + ListCommitsResponse + >( + `/commits/${promptOwnerAndName}/`, + new URLSearchParams(), + (res) => res.commits + )) { + yield* commits; + } + } + + public async *listPrompts(options?: { + isPublic?: boolean; + isArchived?: boolean; + sortField?: PromptSortField; + query?: string; + }): AsyncIterableIterator { + const params = new URLSearchParams(); + params.append("sort_field", options?.sortField ?? "updated_at"); + params.append("sort_direction", "desc"); + params.append("is_archived", (!!options?.isArchived).toString()); + + if (options?.isPublic !== undefined) { + params.append("is_public", options.isPublic.toString()); + } + + if (options?.query) { + params.append("query", options.query); + } + + for await (const prompts of this._getPaginated( + "/repos", + params, + (res) => res.repos + )) { + yield* prompts; + } + } + + public async getPrompt(promptIdentifier: string): Promise { + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/repos/${owner}/${promptName}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (response.status === 404) { + return null; + } + await raiseForStatus(response, "get prompt"); + + const result = await response.json(); + if (result.repo) { + return result.repo as Prompt; + } else { + return null; + } + } + + public async createPrompt( + promptIdentifier: string, + options?: { + description?: string; + readme?: string; + tags?: string[]; + isPublic?: boolean; + } + ): Promise { + const settings = await this._getSettings(); + if (options?.isPublic && !settings.tenant_handle) { + throw new Error( + `Cannot create a public prompt without first\n + creating a LangChain Hub handle. + You can add a handle by creating a public prompt at:\n + https://smith.langchain.com/prompts` + ); + } + + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + if (!(await this._currentTenantIsOwner(owner))) { + throw await this._ownerConflictError("create a prompt", owner); + } + + const data = { + repo_handle: promptName, + ...(options?.description && { description: options.description }), + ...(options?.readme && { readme: options.readme }), + ...(options?.tags && { tags: options.tags }), + is_public: !!options?.isPublic, + }; + + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/repos/`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(data), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + await raiseForStatus(response, "create prompt"); + + const { repo } = await response.json(); + return repo as Prompt; + } + + public async createCommit( + promptIdentifier: string, + object: any, + options?: { + parentCommitHash?: string; + } + ): Promise { + if (!(await this.promptExists(promptIdentifier))) { + throw new Error("Prompt does not exist, you must create it first."); + } + + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + const resolvedParentCommitHash = + options?.parentCommitHash === "latest" || !options?.parentCommitHash + ? await this._getLatestCommitHash(`${owner}/${promptName}`) + : options?.parentCommitHash; + + const payload = { + manifest: JSON.parse(JSON.stringify(object)), + parent_commit: resolvedParentCommitHash, + }; + + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/commits/${owner}/${promptName}`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(payload), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + await raiseForStatus(response, "create commit"); + + const result = await response.json(); + return this._getPromptUrl( + `${owner}/${promptName}${ + result.commit_hash ? `:${result.commit_hash}` : "" + }` + ); + } + + public async updatePrompt( + promptIdentifier: string, + options?: { + description?: string; + readme?: string; + tags?: string[]; + isPublic?: boolean; + isArchived?: boolean; + } + ): Promise> { + if (!(await this.promptExists(promptIdentifier))) { + throw new Error("Prompt does not exist, you must create it first."); + } + + const [owner, promptName] = parsePromptIdentifier(promptIdentifier); + + if (!(await this._currentTenantIsOwner(owner))) { + throw await this._ownerConflictError("update a prompt", owner); + } + + const payload: Record = {}; + + if (options?.description !== undefined) + payload.description = options.description; + if (options?.readme !== undefined) payload.readme = options.readme; + if (options?.tags !== undefined) payload.tags = options.tags; + if (options?.isPublic !== undefined) payload.is_public = options.isPublic; + if (options?.isArchived !== undefined) + payload.is_archived = options.isArchived; + + // Check if payload is empty + if (Object.keys(payload).length === 0) { + throw new Error("No valid update options provided"); + } + + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/repos/${owner}/${promptName}`, + { + method: "PATCH", + body: JSON.stringify(payload), + headers: { + ...this.headers, + "Content-Type": "application/json", + }, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + await raiseForStatus(response, "update prompt"); + + return response.json(); + } + + public async deletePrompt(promptIdentifier: string): Promise { + if (!(await this.promptExists(promptIdentifier))) { + throw new Error("Prompt does not exist, you must create it first."); + } + + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + + if (!(await this._currentTenantIsOwner(owner))) { + throw await this._ownerConflictError("delete a prompt", owner); + } + + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/repos/${owner}/${promptName}`, + { + method: "DELETE", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + return await response.json(); + } + + public async pullPromptCommit( + promptIdentifier: string, + options?: { + includeModel?: boolean; + } + ): Promise { + const [owner, promptName, commitHash] = + parsePromptIdentifier(promptIdentifier); + const serverInfo = await this._getServerInfo(); + const useOptimization = isVersionGreaterOrEqual( + serverInfo.version, + "0.5.23" + ); + + let passedCommitHash = commitHash; + + if (!useOptimization && commitHash === "latest") { + const latestCommitHash = await this._getLatestCommitHash( + `${owner}/${promptName}` + ); + if (!latestCommitHash) { + throw new Error("No commits found"); + } else { + passedCommitHash = latestCommitHash; + } + } + + const response = await this.caller.call( + _getFetchImplementation(), + `${this.apiUrl}/commits/${owner}/${promptName}/${passedCommitHash}${ + options?.includeModel ? "?include_model=true" : "" + }`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + await raiseForStatus(response, "pull prompt commit"); + + const result = await response.json(); + + return { + owner, + repo: promptName, + commit_hash: result.commit_hash, + manifest: result.manifest, + examples: result.examples, + }; + } + + /** + * This method should not be used directly, use `import { pull } from "langchain/hub"` instead. + * Using this method directly returns the JSON string of the prompt rather than a LangChain object. + * @private + */ + public async _pullPrompt( + promptIdentifier: string, + options?: { + includeModel?: boolean; + } + ): Promise { + const promptObject = await this.pullPromptCommit(promptIdentifier, { + includeModel: options?.includeModel, + }); + const prompt = JSON.stringify(promptObject.manifest); + return prompt; + } + + public async pushPrompt( + promptIdentifier: string, + options?: { + object?: any; + parentCommitHash?: string; + isPublic?: boolean; + description?: string; + readme?: string; + tags?: string[]; + } + ): Promise { + // Create or update prompt metadata + if (await this.promptExists(promptIdentifier)) { + if (options && Object.keys(options).some((key) => key !== "object")) { + await this.updatePrompt(promptIdentifier, { + description: options?.description, + readme: options?.readme, + tags: options?.tags, + isPublic: options?.isPublic, + }); + } + } else { + await this.createPrompt(promptIdentifier, { + description: options?.description, + readme: options?.readme, + tags: options?.tags, + isPublic: options?.isPublic, + }); + } + + if (!options?.object) { + return await this._getPromptUrl(promptIdentifier); + } + + // Create a commit with the new manifest + const url = await this.createCommit(promptIdentifier, options?.object, { + parentCommitHash: options?.parentCommitHash, + }); + return url; + } + + /** + * Clone a public dataset to your own langsmith tenant. + * This operation is idempotent. If you already have a dataset with the given name, + * this function will do nothing. + + * @param {string} tokenOrUrl The token of the public dataset to clone. + * @param {Object} [options] Additional options for cloning the dataset. + * @param {string} [options.sourceApiUrl] The URL of the langsmith server where the data is hosted. Defaults to the API URL of your current client. + * @param {string} [options.datasetName] The name of the dataset to create in your tenant. Defaults to the name of the public dataset. + * @returns {Promise} + */ + async clonePublicDataset( + tokenOrUrl: string, + options: { + sourceApiUrl?: string; + datasetName?: string; + } = {} + ): Promise { + const { sourceApiUrl = this.apiUrl, datasetName } = options; + const [parsedApiUrl, tokenUuid] = this.parseTokenOrUrl( + tokenOrUrl, + sourceApiUrl + ); + const sourceClient = new Client({ + apiUrl: parsedApiUrl, + // Placeholder API key not needed anymore in most cases, but + // some private deployments may have API key-based rate limiting + // that would cause this to fail if we provide no value. + apiKey: "placeholder", + }); + + const ds = await sourceClient.readSharedDataset(tokenUuid); + const finalDatasetName = datasetName || ds.name; + + try { + if (await this.hasDataset({ datasetId: finalDatasetName })) { + console.log( + `Dataset ${finalDatasetName} already exists in your tenant. Skipping.` + ); + return; + } + } catch (_) { + // `.hasDataset` will throw an error if the dataset does not exist. + // no-op in that case + } + + // Fetch examples first, then create the dataset + const examples = await sourceClient.listSharedExamples(tokenUuid); + const dataset = await this.createDataset(finalDatasetName, { + description: ds.description, + dataType: ds.data_type || "kv", + inputsSchema: ds.inputs_schema_definition ?? undefined, + outputsSchema: ds.outputs_schema_definition ?? undefined, + }); + try { + await this.createExamples({ + inputs: examples.map((e) => e.inputs), + outputs: examples.flatMap((e) => (e.outputs ? [e.outputs] : [])), + datasetId: dataset.id, + }); + } catch (e) { + console.error( + `An error occurred while creating dataset ${finalDatasetName}. ` + + "You should delete it manually." + ); + throw e; + } + } + + private parseTokenOrUrl( + urlOrToken: string, + apiUrl: string, + numParts = 2, + kind = "dataset" + ): [string, string] { + // Try parsing as UUID + try { + assertUuid(urlOrToken); // Will throw if it's not a UUID. + return [apiUrl, urlOrToken]; + } catch (_) { + // no-op if it's not a uuid + } + + // Parse as URL + try { + const parsedUrl = new URL(urlOrToken); + const pathParts = parsedUrl.pathname + .split("/") + .filter((part) => part !== ""); + + if (pathParts.length >= numParts) { + const tokenUuid = pathParts[pathParts.length - numParts]; + return [apiUrl, tokenUuid]; + } else { + throw new Error(`Invalid public ${kind} URL: ${urlOrToken}`); + } + } catch (error) { + throw new Error(`Invalid public ${kind} URL or token: ${urlOrToken}`); + } + } } diff --git a/js/src/env.ts b/js/src/env.ts index 2847b6e73..9d04037a5 100644 --- a/js/src/env.ts +++ b/js/src/env.ts @@ -1,14 +1,11 @@ -import { getEnvironmentVariable } from "./utils/env.js"; +import { getLangSmithEnvironmentVariable } from "./utils/env.js"; export const isTracingEnabled = (tracingEnabled?: boolean): boolean => { if (tracingEnabled !== undefined) { return tracingEnabled; } - const envVars = [ - "LANGSMITH_TRACING_V2", - "LANGCHAIN_TRACING_V2", - "LANGSMITH_TRACING", - "LANGCHAIN_TRACING", - ]; - return !!envVars.find((envVar) => getEnvironmentVariable(envVar) === "true"); + const envVars = ["TRACING_V2", "TRACING"]; + return !!envVars.find( + (envVar) => getLangSmithEnvironmentVariable(envVar) === "true" + ); }; diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index 69d71ebf7..cdd5b3ccf 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -1,5 +1,4 @@ import { Client, RunTree, RunTreeConfig } from "../index.js"; -import { getLangchainCallbacks } from "../langchain.js"; import { BaseRun, Example, KVMap, Run, TracerSession } from "../schemas.js"; import { traceable } from "../traceable.js"; import { getDefaultRevisionId, getGitInfo } from "../utils/_git.js"; @@ -15,6 +14,7 @@ import { RunEvaluator, runEvaluator, } from "./evaluator.js"; +import { LangSmithConflictError } from "../utils/error.js"; import { v4 as uuidv4 } from "uuid"; // eslint-disable-next-line @typescript-eslint/no-explicit-any @@ -140,7 +140,7 @@ interface ExperimentResultRow { * Supports lazily running predictions and evaluations in parallel to facilitate * result streaming and early debugging. */ -class _ExperimentManager { +export class _ExperimentManager { _data?: DataT; _runs?: AsyncGenerator; @@ -312,30 +312,43 @@ class _ExperimentManager { return projectMetadata; } - async _getProject(firstExample: Example): Promise { + async _createProject(firstExample: Example, projectMetadata: KVMap) { + // Create the project, updating the experimentName until we find a unique one. let project: TracerSession; - if (!this._experiment) { + const originalExperimentName = this._experimentName; + for (let i = 0; i < 10; i++) { try { - const projectMetadata = await this._getExperimentMetadata(); project = await this.client.createProject({ - projectName: this.experimentName, + projectName: this._experimentName, referenceDatasetId: firstExample.dataset_id, metadata: projectMetadata, description: this._description, }); - this._experiment = project; + return project; } catch (e) { - if (String(e).includes("already exists")) { + // Naming collision + if ((e as LangSmithConflictError)?.name === "LangSmithConflictError") { + const ent = uuidv4().slice(0, 6); + this._experimentName = `${originalExperimentName}-${ent}`; + } else { throw e; } - throw new Error( - `Experiment ${this._experimentName} already exists. Please use a different name.` - ); } - } else { - project = this._experiment; } - return project; + throw new Error( + "Could not generate a unique experiment name within 10 attempts." + + " Please try again with a different name." + ); + } + + async _getProject(firstExample: Example): Promise { + let project: TracerSession; + if (!this._experiment) { + const projectMetadata = await this._getExperimentMetadata(); + project = await this._createProject(firstExample, projectMetadata); + this._experiment = project; + } + return this._experiment; } protected async _printExperimentStart(): Promise { @@ -553,6 +566,7 @@ class _ExperimentManager { : new Date(example.created_at).toISOString(), }, client: fields.client, + tracingEnabled: true, }; const evaluatorResponse = await evaluator.evaluateRun( run, @@ -648,11 +662,12 @@ class _ExperimentManager { this.client._selectEvalResults(summaryEvalResult); aggregateFeedback.push(...flattenedResults); for (const result of flattenedResults) { - const { targetRunId, ...feedback } = result; + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { targetRunId, key, ...feedback } = result; const evaluatorInfo = feedback.evaluatorInfo; delete feedback.evaluatorInfo; - await this.client.createFeedback(null, "key", { + await this.client.createFeedback(null, key, { ...feedback, projectId: projectId, sourceInfo: evaluatorInfo, @@ -867,8 +882,20 @@ async function _forward( const wrappedFn = "invoke" in fn ? traceable(async (inputs) => { - const callbacks = await getLangchainCallbacks(); - return fn.invoke(inputs, { callbacks }); + let langChainCallbacks; + try { + // TODO: Deprecate this and rely on interop on 0.2 minor bump. + const { getLangchainCallbacks } = await import("../langchain.js"); + langChainCallbacks = await getLangchainCallbacks(); + } catch { + // no-op + } + // Issue with retrieving LangChain callbacks, rely on interop + if (langChainCallbacks === undefined) { + return await fn.invoke(inputs); + } else { + return await fn.invoke(inputs, { callbacks: langChainCallbacks }); + } }, options) : traceable(fn, options); @@ -882,7 +909,7 @@ async function _forward( if (!run) { throw new Error(`Run not created by target function. This is most likely due to tracing not being enabled.\n -Try setting "LANGCHAIN_TRACING_V2=true" in your environment.`); +Try setting "LANGSMITH_TRACING=true" in your environment.`); } return { diff --git a/js/src/evaluation/langchain.ts b/js/src/evaluation/langchain.ts index 87010c7ec..bbd2a5149 100644 --- a/js/src/evaluation/langchain.ts +++ b/js/src/evaluation/langchain.ts @@ -1,5 +1,6 @@ -import type { Run, Example } from "../schemas.js"; +// eslint-disable-next-line import/no-extraneous-dependencies import { type LoadEvaluatorOptions, loadEvaluator } from "langchain/evaluation"; +import type { Run, Example } from "../schemas.js"; import { getLangchainCallbacks } from "../langchain.js"; function isStringifiable( @@ -27,6 +28,8 @@ function getPrimitiveValue(value: unknown) { } /** + * @deprecated Use `evaluate` instead. + * * This utility function loads a LangChain string evaluator and returns a function * which can be used by newer `evaluate` function. * diff --git a/js/src/index.ts b/js/src/index.ts index 1ecd0341b..96d782360 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -1,4 +1,4 @@ -export { Client } from "./client.js"; +export { Client, type ClientConfig } from "./client.js"; export type { Dataset, @@ -11,5 +11,7 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; +export { overrideFetchImplementation } from "./singletons/fetch.js"; + // Update using yarn bump-version -export const __version__ = "0.1.33"; +export const __version__ = "0.1.61"; diff --git a/js/src/langchain.ts b/js/src/langchain.ts index a3a4de845..ce815de9c 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -1,5 +1,10 @@ +// These `@langchain/core` imports are intentionally not peer dependencies +// to avoid package manager issues around circular dependencies. +// eslint-disable-next-line import/no-extraneous-dependencies import { CallbackManager } from "@langchain/core/callbacks/manager"; +// eslint-disable-next-line import/no-extraneous-dependencies import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; +// eslint-disable-next-line import/no-extraneous-dependencies import { Runnable, RunnableConfig, @@ -77,12 +82,21 @@ export async function getLangchainCallbacks( } if (langChainTracer != null) { - Object.assign(langChainTracer, { - runMap, - client: runTree.client, - projectName: runTree.project_name || langChainTracer.projectName, - exampleId: runTree.reference_example_id || langChainTracer.exampleId, - }); + if ( + "updateFromRunTree" in langChainTracer && + typeof langChainTracer === "function" + ) { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore @langchain/core can use a different version of LangSmith + langChainTracer.updateFromRunTree(runTree); + } else { + Object.assign(langChainTracer, { + runMap, + client: runTree.client, + projectName: runTree.project_name || langChainTracer.projectName, + exampleId: runTree.reference_example_id || langChainTracer.exampleId, + }); + } } return callbacks; diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index b1219d819..5cb2aea97 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -80,16 +80,19 @@ export interface RunnableConfigLike { interface CallbackManagerLike { handlers: TracerLike[]; getParentRunId?: () => string | undefined; + copy?: () => CallbackManagerLike; } interface TracerLike { name: string; } + interface LangChainTracerLike extends TracerLike { name: "langchain_tracer"; projectName: string; getRun?: (id: string) => RunTree | undefined; client: Client; + updateFromRunTree?: (runTree: RunTree) => void; } interface HeadersLike { @@ -143,6 +146,8 @@ class Baggage { } export class RunTree implements BaseRun { + private static sharedClient: Client | null = null; + id: string; name: RunTreeConfig["name"]; run_type: string; @@ -170,7 +175,7 @@ export class RunTree implements BaseRun { constructor(originalConfig: RunTreeConfig) { const defaultConfig = RunTree.getDefaultConfig(); const { metadata, ...config } = originalConfig; - const client = config.client ?? new Client(); + const client = config.client ?? RunTree.getSharedClient(); const dedupedMetadata = { ...metadata, ...config?.extra?.metadata, @@ -223,6 +228,13 @@ export class RunTree implements BaseRun { }; } + private static getSharedClient(): Client { + if (!RunTree.sharedClient) { + RunTree.sharedClient = new Client(); + } + return RunTree.sharedClient; + } + public createChild(config: RunTreeConfig): RunTree { const child_execution_order = this.child_execution_order + 1; @@ -236,6 +248,36 @@ export class RunTree implements BaseRun { child_execution_order: child_execution_order, }); + type ExtraWithSymbol = Record; + const LC_CHILD = Symbol.for("lc:child_config"); + + const presentConfig = + (config.extra as ExtraWithSymbol | undefined)?.[LC_CHILD] ?? + (this.extra as ExtraWithSymbol)[LC_CHILD]; + + // tracing for LangChain is defined by the _parentRunId and runMap of the tracer + if (isRunnableConfigLike(presentConfig)) { + const newConfig: RunnableConfigLike = { ...presentConfig }; + const callbacks: CallbackManagerLike | unknown[] | undefined = + isCallbackManagerLike(newConfig.callbacks) + ? newConfig.callbacks.copy?.() + : undefined; + + if (callbacks) { + // update the parent run id + Object.assign(callbacks, { _parentRunId: child.id }); + + // only populate if we're in a newer LC.JS version + callbacks.handlers + ?.find(isLangChainTracerLike) + ?.updateFromRunTree?.(child); + + newConfig.callbacks = callbacks; + } + + (child.extra as ExtraWithSymbol)[LC_CHILD] = newConfig; + } + // propagate child_execution_order upwards const visited = new Set(); let current: RunTree | undefined = this as RunTree; @@ -314,36 +356,44 @@ export class RunTree implements BaseRun { } async postRun(excludeChildRuns = true): Promise { - const runtimeEnv = await getRuntimeEnvironment(); - const runCreate = await this._convertToCreate(this, runtimeEnv, true); - await this.client.createRun(runCreate); - - if (!excludeChildRuns) { - warnOnce( - "Posting with excludeChildRuns=false is deprecated and will be removed in a future version." - ); - for (const childRun of this.child_runs) { - await childRun.postRun(false); + try { + const runtimeEnv = await getRuntimeEnvironment(); + const runCreate = await this._convertToCreate(this, runtimeEnv, true); + await this.client.createRun(runCreate); + + if (!excludeChildRuns) { + warnOnce( + "Posting with excludeChildRuns=false is deprecated and will be removed in a future version." + ); + for (const childRun of this.child_runs) { + await childRun.postRun(false); + } } + } catch (error) { + console.error(`Error in postRun for run ${this.id}:`, error); } } async patchRun(): Promise { - const runUpdate: RunUpdate = { - end_time: this.end_time, - error: this.error, - inputs: this.inputs, - outputs: this.outputs, - parent_run_id: this.parent_run?.id, - reference_example_id: this.reference_example_id, - extra: this.extra, - events: this.events, - dotted_order: this.dotted_order, - trace_id: this.trace_id, - tags: this.tags, - }; - - await this.client.updateRun(this.id, runUpdate); + try { + const runUpdate: RunUpdate = { + end_time: this.end_time, + error: this.error, + inputs: this.inputs, + outputs: this.outputs, + parent_run_id: this.parent_run?.id, + reference_example_id: this.reference_example_id, + extra: this.extra, + events: this.events, + dotted_order: this.dotted_order, + trace_id: this.trace_id, + tags: this.tags, + }; + + await this.client.updateRun(this.id, runUpdate); + } catch (error) { + console.error(`Error in patchRun for run ${this.id}`, error); + } } toJSON() { @@ -388,6 +438,8 @@ export class RunTree implements BaseRun { const parentRunTree = new RunTree({ name: parentRun.name, id: parentRun.id, + trace_id: parentRun.trace_id, + dotted_order: parentRun.dotted_order, client, tracingEnabled, project_name: projectName, @@ -475,15 +527,26 @@ export function isRunTree(x?: unknown): x is RunTree { ); } -function containsLangChainTracerLike(x?: unknown): x is LangChainTracerLike[] { +function isLangChainTracerLike(x: unknown): x is LangChainTracerLike { return ( - Array.isArray(x) && - x.some((callback: unknown) => { - return ( - typeof (callback as LangChainTracerLike).name === "string" && - (callback as LangChainTracerLike).name === "langchain_tracer" - ); - }) + typeof x === "object" && + x != null && + typeof (x as LangChainTracerLike).name === "string" && + (x as LangChainTracerLike).name === "langchain_tracer" + ); +} + +function containsLangChainTracerLike(x: unknown): x is LangChainTracerLike[] { + return ( + Array.isArray(x) && x.some((callback) => isLangChainTracerLike(callback)) + ); +} + +function isCallbackManagerLike(x: unknown): x is CallbackManagerLike { + return ( + typeof x === "object" && + x != null && + Array.isArray((x as CallbackManagerLike).handlers) ); } diff --git a/js/src/schemas.ts b/js/src/schemas.ts index 874c49a84..0af56312b 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -60,6 +60,7 @@ export interface BaseExample { inputs: KVMap; outputs?: KVMap; metadata?: KVMap; + source_run_id?: string; } /** @@ -247,11 +248,22 @@ export interface ExampleUpdate { metadata?: KVMap; split?: string | string[]; } + +export interface ExampleUpdateWithId extends ExampleUpdate { + id: string; +} + +export interface ExampleSearch extends BaseExample { + id: string; +} + export interface BaseDataset { name: string; description: string; tenant_id: string; data_type?: DataType; + inputs_schema_definition?: KVMap; + outputs_schema_definition?: KVMap; } export interface Dataset extends BaseDataset { @@ -404,8 +416,96 @@ export type RetrieverOutput = Array<{ export interface InvocationParamsSchema { ls_provider?: string; ls_model_name?: string; - ls_model_type: "chat" | "text"; + ls_model_type: "chat" | "llm"; ls_temperature?: number; ls_max_tokens?: number; ls_stop?: string[]; } + +export interface PromptCommit { + owner: string; + repo: string; + commit_hash: string; + manifest: Record; + examples: Array>; +} + +export interface Prompt { + repo_handle: string; + description?: string; + readme?: string; + id: string; + tenant_id: string; + created_at: string; + updated_at: string; + is_public: boolean; + is_archived: boolean; + tags: string[]; + original_repo_id?: string; + upstream_repo_id?: string; + owner?: string; + full_name: string; + num_likes: number; + num_downloads: number; + num_views: number; + liked_by_auth_user: boolean; + last_commit_hash?: string; + num_commits: number; + original_repo_full_name?: string; + upstream_repo_full_name?: string; +} + +export interface ListPromptsResponse { + repos: Prompt[]; + total: number; +} + +export interface ListCommitsResponse { + commits: PromptCommit[]; + total: number; +} + +export type PromptSortField = + | "num_downloads" + | "num_views" + | "updated_at" + | "num_likes"; + +export interface LikePromptResponse { + likes: number; +} + +export interface LangSmithSettings { + id: string; + display_name: string; + created_at: string; + tenant_handle?: string; +} + +export interface AnnotationQueue { + /** The unique identifier of the annotation queue. */ + id: string; + + /** The name of the annotation queue. */ + name: string; + + /** An optional description of the annotation queue. */ + description?: string; + + /** The timestamp when the annotation queue was created. */ + created_at: string; + + /** The timestamp when the annotation queue was last updated. */ + updated_at: string; + + /** The ID of the tenant associated with the annotation queue. */ + tenant_id: string; +} + +export interface RunWithAnnotationQueueInfo extends BaseRun { + /** The last time this run was reviewed. */ + last_reviewed_time?: string; + + /** The time this run was added to the queue. */ + added_at?: string; +} diff --git a/js/src/singletons/fetch.ts b/js/src/singletons/fetch.ts new file mode 100644 index 000000000..a7db0473d --- /dev/null +++ b/js/src/singletons/fetch.ts @@ -0,0 +1,29 @@ +// Wrap the default fetch call due to issues with illegal invocations +// in some environments: +// https://stackoverflow.com/questions/69876859/why-does-bind-fix-failed-to-execute-fetch-on-window-illegal-invocation-err +// @ts-expect-error Broad typing to support a range of fetch implementations +const DEFAULT_FETCH_IMPLEMENTATION = (...args: any[]) => fetch(...args); + +const LANGSMITH_FETCH_IMPLEMENTATION_KEY = Symbol.for( + "ls:fetch_implementation" +); + +/** + * Overrides the fetch implementation used for LangSmith calls. + * You should use this if you need to use an implementation of fetch + * other than the default global (e.g. for dealing with proxies). + * @param fetch The new fetch functino to use. + */ +export const overrideFetchImplementation = (fetch: (...args: any[]) => any) => { + (globalThis as any)[LANGSMITH_FETCH_IMPLEMENTATION_KEY] = fetch; +}; + +/** + * @internal + */ +export const _getFetchImplementation: () => (...args: any[]) => any = () => { + return ( + (globalThis as any)[LANGSMITH_FETCH_IMPLEMENTATION_KEY] ?? + DEFAULT_FETCH_IMPLEMENTATION + ); +}; diff --git a/js/src/singletons/traceable.ts b/js/src/singletons/traceable.ts index c750bc8ac..0cdd1f936 100644 --- a/js/src/singletons/traceable.ts +++ b/js/src/singletons/traceable.ts @@ -17,20 +17,21 @@ class MockAsyncLocalStorage implements AsyncLocalStorageInterface { } } -class AsyncLocalStorageProvider { - private asyncLocalStorage: AsyncLocalStorageInterface = - new MockAsyncLocalStorage(); +const TRACING_ALS_KEY = Symbol.for("ls:tracing_async_local_storage"); - private hasBeenInitialized = false; +const mockAsyncLocalStorage = new MockAsyncLocalStorage(); +class AsyncLocalStorageProvider { getInstance(): AsyncLocalStorageInterface { - return this.asyncLocalStorage; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return (globalThis as any)[TRACING_ALS_KEY] ?? mockAsyncLocalStorage; } initializeGlobalInstance(instance: AsyncLocalStorageInterface) { - if (!this.hasBeenInitialized) { - this.hasBeenInitialized = true; - this.asyncLocalStorage = instance; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + if ((globalThis as any)[TRACING_ALS_KEY] === undefined) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (globalThis as any)[TRACING_ALS_KEY] = instance; } } } diff --git a/js/src/singletons/types.ts b/js/src/singletons/types.ts index 1ebe0eb19..dd7efabf3 100644 --- a/js/src/singletons/types.ts +++ b/js/src/singletons/types.ts @@ -32,11 +32,10 @@ type UnionToIntersection = (U extends any ? (x: U) => void : never) extends ( ? I : never; // eslint-disable-next-line @typescript-eslint/no-explicit-any - export type TraceableFunction any> = // function overloads are represented as intersections rather than unions // matches the behavior introduced in https://github.com/microsoft/TypeScript/pull/54448 - Func extends { + (Func extends { (...args: infer A1): infer R1; (...args: infer A2): infer R2; (...args: infer A3): infer R3; @@ -70,6 +69,9 @@ export type TraceableFunction any> = (...args: infer A1): infer R1; } ? UnionToIntersection> - : never; + : never) & { + // Other properties of Func + [K in keyof Func]: Func[K]; + }; export type RunTreeLike = RunTree; diff --git a/js/src/tests/batch_client.test.ts b/js/src/tests/batch_client.test.ts index 0db1db1dc..fd73237cc 100644 --- a/js/src/tests/batch_client.test.ts +++ b/js/src/tests/batch_client.test.ts @@ -3,6 +3,7 @@ import { jest } from "@jest/globals"; import { v4 as uuidv4 } from "uuid"; import { Client } from "../client.js"; import { convertToDottedOrderFormat } from "../run_trees.js"; +import { _getFetchImplementation } from "../singletons/fetch.js"; describe("Batch client tracing", () => { it("should create a batched run with the given input", async () => { @@ -55,7 +56,7 @@ describe("Batch client tracing", () => { }); expect(callSpy).toHaveBeenCalledWith( - fetch, + _getFetchImplementation(), "https://api.smith.langchain.com/runs/batch", expect.objectContaining({ body: expect.any(String) }) ); @@ -161,7 +162,7 @@ describe("Batch client tracing", () => { }); expect(callSpy).toHaveBeenCalledWith( - fetch, + _getFetchImplementation(), "https://api.smith.langchain.com/runs/batch", expect.objectContaining({ body: expect.any(String) }) ); @@ -505,9 +506,88 @@ describe("Batch client tracing", () => { }); expect(callSpy).toHaveBeenCalledWith( - fetch, + _getFetchImplementation(), "https://api.smith.langchain.com/runs", expect.objectContaining({ body: expect.any(String) }) ); }); + + it("Should handle circular values", async () => { + const client = new Client({ + apiKey: "test-api-key", + autoBatchTracing: true, + }); + const callSpy = jest + .spyOn((client as any).batchIngestCaller, "call") + .mockResolvedValue({ + ok: true, + text: () => "", + }); + jest + .spyOn(client as any, "batchEndpointIsSupported") + .mockResolvedValue(true); + const projectName = "__test_batch"; + const a: Record = {}; + const b: Record = {}; + a.b = b; + b.a = a; + + const runId = uuidv4(); + const dottedOrder = convertToDottedOrderFormat( + new Date().getTime() / 1000, + runId + ); + await client.createRun({ + id: runId, + project_name: projectName, + name: "test_run", + run_type: "llm", + inputs: a, + trace_id: runId, + dotted_order: dottedOrder, + }); + + const endTime = Math.floor(new Date().getTime() / 1000); + + await client.updateRun(runId, { + outputs: b, + dotted_order: dottedOrder, + trace_id: runId, + end_time: endTime, + }); + + await new Promise((resolve) => setTimeout(resolve, 100)); + + const calledRequestParam: any = callSpy.mock.calls[0][2]; + expect(JSON.parse(calledRequestParam?.body)).toEqual({ + post: [ + expect.objectContaining({ + id: runId, + run_type: "llm", + inputs: { + b: { + a: { + result: "[Circular]", + }, + }, + }, + outputs: { + a: { + result: "[Circular]", + }, + }, + end_time: endTime, + trace_id: runId, + dotted_order: dottedOrder, + }), + ], + patch: [], + }); + + expect(callSpy).toHaveBeenCalledWith( + _getFetchImplementation(), + "https://api.smith.langchain.com/runs/batch", + expect.objectContaining({ body: expect.any(String) }) + ); + }); }); diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 0b87522e9..ddf160fa3 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -1,5 +1,9 @@ -import { Dataset, Run } from "../schemas.js"; -import { FunctionMessage, HumanMessage } from "@langchain/core/messages"; +import { Dataset, Example, Run, TracerSession } from "../schemas.js"; +import { + FunctionMessage, + HumanMessage, + SystemMessage, +} from "@langchain/core/messages"; import { Client } from "../client.js"; import { v4 as uuidv4 } from "uuid"; @@ -10,6 +14,11 @@ import { toArray, waitUntil, } from "./utils.js"; +import { ChatPromptTemplate, PromptTemplate } from "@langchain/core/prompts"; +import { ChatOpenAI } from "@langchain/openai"; +import { RunnableSequence } from "@langchain/core/runnables"; +import { load } from "langchain/load"; +import { _getFetchImplementation } from "../singletons/fetch.js"; type CheckOutputsType = boolean | ((run: Run) => boolean); async function waitUntilRunFound( @@ -113,11 +122,45 @@ test.concurrent("Test LangSmith Client Dataset CRD", async () => { const newExampleValue2 = await client.readExample(example.id); expect(newExampleValue2.inputs.col1).toBe("updatedExampleCol3"); expect(newExampleValue2.metadata?.dataset_split).toStrictEqual(["my_split3"]); + + const newExample = await client.createExample( + { col1: "newAddedExampleCol1" }, + { col2: "newAddedExampleCol2" }, + { datasetId: newDataset.id } + ); + const newExampleValue_ = await client.readExample(newExample.id); + expect(newExampleValue_.inputs.col1).toBe("newAddedExampleCol1"); + expect(newExampleValue_.outputs?.col2).toBe("newAddedExampleCol2"); + + await client.updateExamples([ + { + id: newExample.id, + inputs: { col1: "newUpdatedExampleCol1" }, + outputs: { col2: "newUpdatedExampleCol2" }, + metadata: { foo: "baz" }, + }, + { + id: example.id, + inputs: { col1: "newNewUpdatedExampleCol" }, + outputs: { col2: "newNewUpdatedExampleCol2" }, + metadata: { foo: "qux" }, + }, + ]); + const updatedExample = await client.readExample(newExample.id); + expect(updatedExample.inputs.col1).toBe("newUpdatedExampleCol1"); + expect(updatedExample.outputs?.col2).toBe("newUpdatedExampleCol2"); + expect(updatedExample.metadata?.foo).toBe("baz"); + + const updatedExample2 = await client.readExample(example.id); + expect(updatedExample2.inputs.col1).toBe("newNewUpdatedExampleCol"); + expect(updatedExample2.outputs?.col2).toBe("newNewUpdatedExampleCol2"); + expect(updatedExample2.metadata?.foo).toBe("qux"); + await client.deleteExample(example.id); const examples2 = await toArray( client.listExamples({ datasetId: newDataset.id }) ); - expect(examples2.length).toBe(1); + expect(examples2.length).toBe(2); await client.deleteDataset({ datasetId }); const rawDataset = await client.createDataset(fileName, { @@ -141,6 +184,7 @@ test.concurrent( }); const dataset = await langchainClient.createDataset(datasetName, { dataType: "llm", + metadata: { key: "valuefoo" }, }); await langchainClient.createExample( { input: "hello world" }, @@ -151,6 +195,12 @@ test.concurrent( ); const loadedDataset = await langchainClient.readDataset({ datasetName }); expect(loadedDataset.data_type).toEqual("llm"); + + const datasetsByMetadata = await toArray( + langchainClient.listDatasets({ metadata: { key: "valuefoo" } }) + ); + expect(datasetsByMetadata.length).toEqual(1); + expect(datasetsByMetadata.map((d) => d.id)).toContain(dataset.id); await langchainClient.deleteDataset({ datasetName }); }, 180_000 @@ -172,7 +222,7 @@ test.concurrent( await waitUntilRunFound(langchainClient, runId); const sharedUrl = await langchainClient.shareRun(runId); - const response = await fetch(sharedUrl); + const response = await _getFetchImplementation()(sharedUrl); expect(response.status).toEqual(200); expect(await langchainClient.readRunSharedLink(runId)).toEqual(sharedUrl); @@ -510,6 +560,22 @@ test.concurrent( client.listExamples({ datasetId: dataset.id }) ); expect(examplesList.length).toEqual(4); + + const examplesListLimited = await toArray( + client.listExamples({ datasetId: dataset.id, limit: 2 }) + ); + expect(examplesListLimited.length).toEqual(2); + + const examplesListOffset = await toArray( + client.listExamples({ datasetId: dataset.id, offset: 2 }) + ); + expect(examplesListOffset.length).toEqual(2); + + const examplesListLimitedOffset = await toArray( + client.listExamples({ datasetId: dataset.id, limit: 1, offset: 2 }) + ); + expect(examplesListLimitedOffset.length).toEqual(1); + await client.deleteExample(example.id); const examplesList2 = await toArray( client.listExamples({ datasetId: dataset.id }) @@ -583,6 +649,34 @@ test.concurrent( expect(examplesList3[0].metadata?.foo).toEqual("bar"); expect(examplesList3[0].metadata?.baz).toEqual("qux"); + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + filter: 'exists(metadata, "baz")', + }) + ); + expect(examplesList3.length).toEqual(1); + expect(examplesList3[0].metadata?.foo).toEqual("bar"); + expect(examplesList3[0].metadata?.baz).toEqual("qux"); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + filter: 'has("metadata", \'{"foo": "bar"}\')', + }) + ); + expect(examplesList3.length).toEqual(1); + expect(examplesList3[0].metadata?.foo).toEqual("bar"); + expect(examplesList3[0].metadata?.baz).toEqual("qux"); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + filter: 'exists(metadata, "bazzz")', + }) + ); + expect(examplesList3.length).toEqual(0); + examplesList3 = await toArray( client.listExamples({ datasetId: dataset.id, @@ -661,3 +755,488 @@ test.concurrent("list runs limit arg works", async () => { } } }); + +test.concurrent("Test run stats", async () => { + const client = new Client(); + const stats = await client.getRunStats({ + projectNames: ["default"], + runType: "llm", + }); + expect(stats).toBeDefined(); +}); + +test("Test createProject raises LangSmithConflictError on duplicate name", async () => { + const client = new Client(); + const projectName = `test_project_${uuidv4()}`; + + try { + // Create the project for the first time + await client.createProject({ projectName }); + + // Attempt to create the project with the same name again + await expect(client.createProject({ projectName })).rejects.toThrow( + expect.objectContaining({ + name: "LangSmithConflictError", + }) + ); + } finally { + try { + // Clean up: delete the project + if (await client.hasProject({ projectName })) { + await client.deleteProject({ projectName }); + } + } catch (e) { + // Everyone has those days. + } + } +}); + +test("Test list prompts", async () => { + const client = new Client(); + const uid = uuidv4(); + // push 3 prompts + const promptName1 = `test_prompt_${uid}__0`; + const promptName2 = `test_prompt_${uid}__1`; + const promptName3 = `test_prompt_${uid}__2`; + + await client.pushPrompt(promptName1, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + isPublic: true, + }); + await client.pushPrompt(promptName2, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + await client.pushPrompt(promptName3, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + // expect at least one of the prompts to have promptName1 + const response = client.listPrompts({ isPublic: true, query: uid }); + let found = false; + expect(response).toBeDefined(); + for await (const prompt of response) { + expect(prompt).toBeDefined(); + if (prompt.repo_handle === promptName1) { + found = true; + } + } + expect(found).toBe(true); + + // expect the prompts to be sorted by updated_at + const response2 = client.listPrompts({ sortField: "updated_at", query: uid }); + expect(response2).toBeDefined(); + let lastUpdatedAt: number | undefined; + for await (const prompt of response2) { + expect(prompt.updated_at).toBeDefined(); + const currentUpdatedAt = new Date(prompt.updated_at).getTime(); + if (lastUpdatedAt !== undefined) { + expect(currentUpdatedAt).toBeLessThanOrEqual(lastUpdatedAt); + } + lastUpdatedAt = currentUpdatedAt; + } + expect(lastUpdatedAt).toBeDefined(); +}); + +test("Test get prompt", async () => { + const client = new Client(); + const promptName = `test_prompt_${uuidv4().slice(0, 8)}`; + const promptTemplate = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ); + + const url = await client.pushPrompt(promptName, { object: promptTemplate }); + expect(url).toBeDefined(); + + const prompt = await client.getPrompt(promptName); + expect(prompt).toBeDefined(); + expect(prompt?.repo_handle).toBe(promptName); + + await client.deletePrompt(promptName); +}); + +test("Test prompt exists", async () => { + const client = new Client(); + const nonExistentPrompt = `non_existent_${uuidv4().slice(0, 8)}`; + expect(await client.promptExists(nonExistentPrompt)).toBe(false); + + const existentPrompt = `existent_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(existentPrompt, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + expect(await client.promptExists(existentPrompt)).toBe(true); + + await client.deletePrompt(existentPrompt); +}); + +test("Test update prompt", async () => { + const client = new Client(); + + const promptName = `test_update_prompt_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + const updatedData = await client.updatePrompt(promptName, { + description: "Updated description", + isPublic: true, + tags: ["test", "update"], + }); + + expect(updatedData).toBeDefined(); + + const updatedPrompt = await client.getPrompt(promptName); + expect(updatedPrompt?.description).toBe("Updated description"); + expect(updatedPrompt?.is_public).toBe(true); + expect(updatedPrompt?.tags).toEqual( + expect.arrayContaining(["test", "update"]) + ); + + await client.deletePrompt(promptName); +}); + +test("Test delete prompt", async () => { + const client = new Client(); + + const promptName = `test_delete_prompt_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + expect(await client.promptExists(promptName)).toBe(true); + await client.deletePrompt(promptName); + expect(await client.promptExists(promptName)).toBe(false); +}); + +test("test listing projects by metadata", async () => { + const client = new Client(); + const uid = uuidv4(); + const projectName = `my_metadata_project_${uid}`; + + await client.createProject({ + projectName: projectName, + metadata: { + foobar: uid, + baz: "barfooqux", + }, + }); + + const projects = await client.listProjects({ metadata: { foobar: uid } }); + + let myProject: TracerSession | null = null; + for await (const project of projects) { + myProject = project; + } + expect(myProject?.name).toEqual(projectName); + + await client.deleteProject({ projectName: projectName }); +}); + +test("Test create commit", async () => { + const client = new Client(); + + const promptName = `test_create_commit_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + const newTemplate = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "My question is: {{question}}" }), + ], + { templateFormat: "mustache" } + ); + const commitUrl = await client.createCommit(promptName, newTemplate); + + expect(commitUrl).toBeDefined(); + expect(commitUrl).toContain(promptName); + + await client.deletePrompt(promptName); +}); + +test("Test like and unlike prompt", async () => { + const client = new Client(); + + const promptName = `test_like_prompt_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + await client.likePrompt(promptName); + let prompt = await client.getPrompt(promptName); + expect(prompt?.num_likes).toBe(1); + + await client.unlikePrompt(promptName); + prompt = await client.getPrompt(promptName); + expect(prompt?.num_likes).toBe(0); + + await client.deletePrompt(promptName); +}); + +test("Test pull prompt commit", async () => { + const client = new Client(); + + const promptName = `test_pull_commit_${uuidv4().slice(0, 8)}`; + const initialTemplate = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ); + await client.pushPrompt(promptName, { object: initialTemplate }); + + const promptCommit = await client.pullPromptCommit(promptName); + expect(promptCommit).toBeDefined(); + expect(promptCommit.repo).toBe(promptName); + + await client.deletePrompt(promptName); +}); + +test("Test push and pull prompt", async () => { + const client = new Client(); + + const promptName = `test_push_pull_${uuidv4().slice(0, 8)}`; + const template = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ); + const template2 = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "My question is: {{question}}" }), + ], + { templateFormat: "mustache" } + ); + + await client.pushPrompt(promptName, { + object: template, + description: "Test description", + readme: "Test readme", + tags: ["test", "tag"], + }); + + // test you can push an updated manifest without any other options + await client.pushPrompt(promptName, { + object: template2, + }); + + const pulledPrompt = await client._pullPrompt(promptName); + expect(pulledPrompt).toBeDefined(); + + const promptInfo = await client.getPrompt(promptName); + expect(promptInfo?.description).toBe("Test description"); + expect(promptInfo?.readme).toBe("Test readme"); + expect(promptInfo?.tags).toEqual(expect.arrayContaining(["test", "tag"])); + expect(promptInfo?.is_public).toBe(false); + + await client.deletePrompt(promptName); +}); + +test("Test pull prompt include model", async () => { + const client = new Client(); + const model = new ChatOpenAI({}); + const promptTemplate = PromptTemplate.fromTemplate( + "Tell me a joke about {topic}" + ); + const promptWithModel = promptTemplate.pipe(model); + + const promptName = `test_prompt_with_model_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { object: promptWithModel }); + + const pulledPrompt = await client._pullPrompt(promptName, { + includeModel: true, + }); + const rs: RunnableSequence = await load(pulledPrompt); + expect(rs).toBeDefined(); + expect(rs).toBeInstanceOf(RunnableSequence); + + await client.deletePrompt(promptName); +}); + +test("list shared examples can list shared examples", async () => { + const client = new Client(); + const multiverseMathPublicDatasetShareToken = + "620596ee-570b-4d2b-8c8f-f828adbe5242"; + const sharedExamples = await client.listSharedExamples( + multiverseMathPublicDatasetShareToken + ); + expect(sharedExamples.length).toBeGreaterThan(0); +}); + +test("clonePublicDataset method can clone a dataset", async () => { + const client = new Client(); + const datasetName = "multiverse_math_public_testing"; + const multiverseMathPublicDatasetURL = + "https://smith.langchain.com/public/620596ee-570b-4d2b-8c8f-f828adbe5242/d"; + + try { + await client.clonePublicDataset(multiverseMathPublicDatasetURL, { + datasetName, + }); + + const clonedDataset = await client.hasDataset({ datasetName }); + expect(clonedDataset).toBe(true); + + const examples: Example[] = []; + for await (const ex of client.listExamples({ datasetName })) { + examples.push(ex); + } + expect(examples.length).toBeGreaterThan(0); + } finally { + try { + // Attempt to remove the newly created dataset if successful. + await client.deleteDataset({ datasetName }); + } catch (_) { + // no-op if failure + } + } +}); + +test("annotationqueue crud", async () => { + const client = new Client(); + const queueName = `test-queue-${uuidv4().substring(0, 8)}`; + const projectName = `test-project-${uuidv4().substring(0, 8)}`; + const queueId = uuidv4(); + + try { + // 1. Create an annotation queue + const queue = await client.createAnnotationQueue({ + name: queueName, + description: "Initial description", + queueId, + }); + expect(queue).toBeDefined(); + expect(queue.name).toBe(queueName); + + // 1a. Get the annotation queue + const fetchedQueue = await client.readAnnotationQueue(queue.id); + expect(fetchedQueue).toBeDefined(); + expect(fetchedQueue.name).toBe(queueName); + + // 1b. List annotation queues and check nameContains + const listedQueues = await toArray( + client.listAnnotationQueues({ nameContains: queueName }) + ); + expect(listedQueues.length).toBeGreaterThan(0); + expect(listedQueues.some((q) => q.id === queue.id)).toBe(true); + + // 2. Create a run in a random project + await client.createProject({ projectName }); + const runId = uuidv4(); + await client.createRun({ + id: runId, + name: "Test Run", + run_type: "chain", + inputs: { foo: "bar" }, + outputs: { baz: "qux" }, + project_name: projectName, + }); + + // Wait for run to be found in the db + const maxWaitTime = 30000; // 30 seconds + const startTime = Date.now(); + let foundRun = null; + + while (Date.now() - startTime < maxWaitTime) { + try { + foundRun = await client.readRun(runId); + if (foundRun) break; + } catch (error) { + // If run is not found, getRun might throw an error + // We'll ignore it and keep trying + } + await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait for 1 second before trying again + } + + if (!foundRun) { + throw new Error( + `Run with ID ${runId} not found after ${maxWaitTime / 1000} seconds` + ); + } + + // 3. Add the run to the annotation queue + await client.addRunsToAnnotationQueue(fetchedQueue.id, [runId]); + + // 4. Update the annotation queue description and check that it is updated + const newDescription = "Updated description"; + await client.updateAnnotationQueue(queue.id, { + name: queueName, + description: newDescription, + }); + const updatedQueue = await client.readAnnotationQueue(queue.id); + expect(updatedQueue.description).toBe(newDescription); + + // Get the run from the annotation queue + const run = await client.getRunFromAnnotationQueue(queueId, 0); + expect(run).toBeDefined(); + expect(run.id).toBe(runId); + expect(run.name).toBe("Test Run"); + expect(run.run_type).toBe("chain"); + expect(run.inputs).toEqual({ foo: "bar" }); + expect(run.outputs).toEqual({ baz: "qux" }); + } finally { + // 6. Delete the annotation queue + await client.deleteAnnotationQueue(queueId); + + // Clean up the project + if (await client.hasProject({ projectName })) { + await client.deleteProject({ projectName }); + } + } +}); diff --git a/js/src/tests/client.test.ts b/js/src/tests/client.test.ts index 245c9487e..d86c0dc24 100644 --- a/js/src/tests/client.test.ts +++ b/js/src/tests/client.test.ts @@ -6,6 +6,10 @@ import { getLangChainEnvVars, getLangChainEnvVarsMetadata, } from "../utils/env.js"; +import { + isVersionGreaterOrEqual, + parsePromptIdentifier, +} from "../utils/prompts.js"; describe("Client", () => { describe("createLLMExample", () => { @@ -81,6 +85,12 @@ describe("Client", () => { }); }); + it("should trim trailing slash on a passed apiUrl", () => { + const client = new Client({ apiUrl: "https://example.com/" }); + const result = (client as any).apiUrl; + expect(result).toBe("https://example.com"); + }); + describe("getHostUrl", () => { it("should return the webUrl if it exists", () => { const client = new Client({ @@ -106,6 +116,12 @@ describe("Client", () => { expect(result).toBe("https://example.com"); }); + it("should trim trailing slash on a passed webUrl", () => { + const client = new Client({ webUrl: "https://example.com/" }); + const result = (client as any).getHostUrl(); + expect(result).toBe("https://example.com"); + }); + it("should return 'https://dev.smith.langchain.com' if apiUrl contains 'dev'", () => { const client = new Client({ apiUrl: "https://dev.smith.langchain.com/api", @@ -115,6 +131,15 @@ describe("Client", () => { expect(result).toBe("https://dev.smith.langchain.com"); }); + it("should return 'https://eu.smith.langchain.com' if apiUrl contains 'eu'", () => { + const client = new Client({ + apiUrl: "https://eu.smith.langchain.com/api", + apiKey: "test-api-key", + }); + const result = (client as any).getHostUrl(); + expect(result).toBe("https://eu.smith.langchain.com"); + }); + it("should return 'https://smith.langchain.com' for any other apiUrl", () => { const client = new Client({ apiUrl: "https://smith.langchain.com/api", @@ -166,4 +191,62 @@ describe("Client", () => { }); }); }); + + describe("isVersionGreaterOrEqual", () => { + it("should return true if the version is greater or equal", () => { + // Test versions equal to 0.5.23 + expect(isVersionGreaterOrEqual("0.5.23", "0.5.23")).toBe(true); + + // Test versions greater than 0.5.23 + expect(isVersionGreaterOrEqual("0.5.24", "0.5.23")); + expect(isVersionGreaterOrEqual("0.6.0", "0.5.23")); + expect(isVersionGreaterOrEqual("1.0.0", "0.5.23")); + + // Test versions less than 0.5.23 + expect(isVersionGreaterOrEqual("0.5.22", "0.5.23")).toBe(false); + expect(isVersionGreaterOrEqual("0.5.0", "0.5.23")).toBe(false); + expect(isVersionGreaterOrEqual("0.4.99", "0.5.23")).toBe(false); + }); + }); + + describe("parsePromptIdentifier", () => { + it("should parse valid identifiers correctly", () => { + expect(parsePromptIdentifier("name")).toEqual(["-", "name", "latest"]); + expect(parsePromptIdentifier("owner/name")).toEqual([ + "owner", + "name", + "latest", + ]); + expect(parsePromptIdentifier("owner/name:commit")).toEqual([ + "owner", + "name", + "commit", + ]); + expect(parsePromptIdentifier("name:commit")).toEqual([ + "-", + "name", + "commit", + ]); + }); + + it("should throw an error for invalid identifiers", () => { + const invalidIdentifiers = [ + "", + "/", + ":", + "owner/", + "/name", + "owner//name", + "owner/name/", + "owner/name/extra", + ":commit", + ]; + + invalidIdentifiers.forEach((identifier) => { + expect(() => parsePromptIdentifier(identifier)).toThrowError( + `Invalid identifier format: ${identifier}` + ); + }); + }); + }); }); diff --git a/js/src/tests/evaluate.int.test.ts b/js/src/tests/evaluate.int.test.ts index 733b68a6d..4d68b920c 100644 --- a/js/src/tests/evaluate.int.test.ts +++ b/js/src/tests/evaluate.int.test.ts @@ -7,8 +7,9 @@ import { Example, Run, TracerSession } from "../schemas.js"; import { Client } from "../index.js"; import { afterAll, beforeAll } from "@jest/globals"; import { RunnableLambda, RunnableSequence } from "@langchain/core/runnables"; - -const TESTING_DATASET_NAME = "test_dataset_js_evaluate_123"; +import { v4 as uuidv4 } from "uuid"; +const TESTING_DATASET_NAME = `test_dataset_js_evaluate_${uuidv4()}`; +const TESTING_DATASET_NAME2 = `my_splits_ds_${uuidv4()}`; beforeAll(async () => { const client = new Client(); @@ -46,7 +47,6 @@ afterAll(async () => { test("evaluate can evaluate", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -84,7 +84,6 @@ test("evaluate can evaluate", async () => { test("evaluate can repeat", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -184,7 +183,6 @@ test("evaluate can evaluate with RunEvaluator evaluators", async () => { test("evaluate can evaluate with custom evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -256,7 +254,6 @@ test("evaluate can evaluate with custom evaluators", async () => { test("evaluate can evaluate with summary evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -269,7 +266,7 @@ test("evaluate can evaluate with summary evaluators", async () => { const runIds = runs.map(({ id }) => id).join(", "); const exampleIds = examples?.map(({ id }) => id).join(", "); return Promise.resolve({ - key: "key", + key: "MyCustomScore", score: 1, comment: `Runs: ${runIds} Examples: ${exampleIds}`, }); @@ -282,7 +279,7 @@ test("evaluate can evaluate with summary evaluators", async () => { }); expect(evalRes.summaryResults.results).toHaveLength(1); - expect(evalRes.summaryResults.results[0].key).toBe("key"); + expect(evalRes.summaryResults.results[0].key).toBe("MyCustomScore"); expect(evalRes.summaryResults.results[0].score).toBe(1); const allRuns = evalRes.results.map(({ run }) => run); const allExamples = evalRes.results.map(({ example }) => example); @@ -314,7 +311,6 @@ test("evaluate can evaluate with summary evaluators", async () => { test.skip("can iterate over evaluate results", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -343,7 +339,6 @@ test.skip("can iterate over evaluate results", async () => { test("can pass multiple evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -391,7 +386,7 @@ test("can pass multiple evaluators", async () => { test("split info saved correctly", async () => { const client = new Client(); // create a new dataset - await client.createDataset("my_splits_ds2", { + await client.createDataset(TESTING_DATASET_NAME2, { description: "For testing purposed. Is created & deleted for each test run.", }); @@ -400,21 +395,22 @@ test("split info saved correctly", async () => { inputs: [{ input: 1 }, { input: 2 }, { input: 3 }], outputs: [{ output: 2 }, { output: 3 }, { output: 4 }], splits: [["test"], ["train"], ["validation", "test"]], - datasetName: "my_splits_ds2", + datasetName: TESTING_DATASET_NAME2, }); const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; }; await evaluate(targetFunc, { - data: client.listExamples({ datasetName: "my_splits_ds2" }), + data: client.listExamples({ datasetName: TESTING_DATASET_NAME2 }), description: "splits info saved correctly", }); - const exp = client.listProjects({ referenceDatasetName: "my_splits_ds2" }); + const exp = client.listProjects({ + referenceDatasetName: TESTING_DATASET_NAME2, + }); let myExp: TracerSession | null = null; for await (const session of exp) { myExp = session; @@ -425,13 +421,15 @@ test("split info saved correctly", async () => { await evaluate(targetFunc, { data: client.listExamples({ - datasetName: "my_splits_ds2", + datasetName: TESTING_DATASET_NAME2, splits: ["test"], }), description: "splits info saved correctly", }); - const exp2 = client.listProjects({ referenceDatasetName: "my_splits_ds2" }); + const exp2 = client.listProjects({ + referenceDatasetName: TESTING_DATASET_NAME2, + }); let myExp2: TracerSession | null = null; for await (const session of exp2) { if (myExp2 === null || session.start_time > myExp2.start_time) { @@ -445,13 +443,15 @@ test("split info saved correctly", async () => { await evaluate(targetFunc, { data: client.listExamples({ - datasetName: "my_splits_ds2", + datasetName: TESTING_DATASET_NAME2, splits: ["train"], }), description: "splits info saved correctly", }); - const exp3 = client.listProjects({ referenceDatasetName: "my_splits_ds2" }); + const exp3 = client.listProjects({ + referenceDatasetName: TESTING_DATASET_NAME2, + }); let myExp3: TracerSession | null = null; for await (const session of exp3) { if (myExp3 === null || session.start_time > myExp3.start_time) { @@ -466,7 +466,6 @@ test("split info saved correctly", async () => { test("can pass multiple summary evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -517,7 +516,6 @@ test("can pass AsyncIterable of Example's to evaluator instead of dataset name", }); const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -551,7 +549,6 @@ test("can pass AsyncIterable of Example's to evaluator instead of dataset name", test("max concurrency works with custom evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -587,7 +584,6 @@ test("max concurrency works with custom evaluators", async () => { test("max concurrency works with summary evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -704,7 +700,6 @@ test("evaluate can accept array of examples", async () => { } const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; diff --git a/js/src/tests/evaluate_comparative.int.test.ts b/js/src/tests/evaluate_comparative.int.test.ts index 5b18884bb..81c14d653 100644 --- a/js/src/tests/evaluate_comparative.int.test.ts +++ b/js/src/tests/evaluate_comparative.int.test.ts @@ -2,8 +2,9 @@ import { evaluate } from "../evaluation/_runner.js"; import { evaluateComparative } from "../evaluation/evaluate_comparative.js"; import { Client } from "../index.js"; import { waitUntilRunFound } from "./utils.js"; +import { v4 as uuidv4 } from "uuid"; -const TESTING_DATASET_NAME = "test_evaluate_comparative_js"; +const TESTING_DATASET_NAME = `test_evaluate_comparative_js_${uuidv4()}`; beforeAll(async () => { const client = new Client(); diff --git a/js/src/tests/experiment_manager.int.test.ts b/js/src/tests/experiment_manager.int.test.ts new file mode 100644 index 000000000..ab15bc69c --- /dev/null +++ b/js/src/tests/experiment_manager.int.test.ts @@ -0,0 +1,45 @@ +import { _ExperimentManager } from "../evaluation/_runner.js"; +import { Client } from "../index.js"; +import { v4 as uuidv4 } from "uuid"; + +const TESTING_DATASET_NAME = `test_experiment_manager_${uuidv4()}`; + +beforeAll(async () => { + const client = new Client(); + + if (!(await client.hasDataset({ datasetName: TESTING_DATASET_NAME }))) { + await client.createDataset(TESTING_DATASET_NAME, { + description: "For testing pruposes", + }); + + await client.createExamples({ + inputs: [{ input: 1 }, { input: 2 }], + outputs: [{ output: 2 }, { output: 3 }], + datasetName: TESTING_DATASET_NAME, + }); + } +}); + +afterAll(async () => { + const client = new Client(); + await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); +}); + +describe("experiment manager", () => { + test("can recover from collisions", async () => { + const client = new Client(); + const ds = await client.readDataset({ datasetName: TESTING_DATASET_NAME }); + const manager = await new _ExperimentManager({ + data: TESTING_DATASET_NAME, + client, + numRepetitions: 1, + }); + const experimentName = manager._experimentName; + await client.createProject({ + projectName: experimentName, + referenceDatasetId: ds.id, + }); + await manager.start(); + expect(manager._experimentName).not.toEqual(experimentName); + }); +}); diff --git a/js/src/tests/fetch.test.ts b/js/src/tests/fetch.test.ts new file mode 100644 index 000000000..9210aca77 --- /dev/null +++ b/js/src/tests/fetch.test.ts @@ -0,0 +1,98 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +import { jest } from "@jest/globals"; +import { Client } from "../client.js"; +import { overrideFetchImplementation } from "../singletons/fetch.js"; +import { traceable } from "../traceable.js"; + +describe.each([[""], ["mocked"]])("Client uses %s fetch", (description) => { + let globalFetchMock: jest.Mock; + let overriddenFetch: jest.Mock; + let expectedFetchMock: jest.Mock; + let unexpectedFetchMock: jest.Mock; + + beforeEach(() => { + globalFetchMock = jest.fn(() => + Promise.resolve({ + ok: true, + json: () => Promise.resolve({}), + text: () => Promise.resolve(""), + }) + ); + overriddenFetch = jest.fn(() => + Promise.resolve({ + ok: true, + json: () => Promise.resolve({}), + text: () => Promise.resolve(""), + }) + ); + expectedFetchMock = + description === "mocked" ? overriddenFetch : globalFetchMock; + unexpectedFetchMock = + description === "mocked" ? globalFetchMock : overriddenFetch; + + if (description === "mocked") { + overrideFetchImplementation(overriddenFetch); + } else { + overrideFetchImplementation(globalFetchMock); + } + // Mock global fetch + (globalThis as any).fetch = globalFetchMock; + }); + + afterEach(() => { + jest.restoreAllMocks(); + }); + + describe("createLLMExample", () => { + it("should create an example with the given input and generation", async () => { + const client = new Client({ apiKey: "test-api-key" }); + + const input = "Hello, world!"; + const generation = "Bonjour, monde!"; + const options = { datasetName: "test-dataset" }; + + await client.createLLMExample(input, generation, options); + expect(expectedFetchMock).toHaveBeenCalled(); + expect(unexpectedFetchMock).not.toHaveBeenCalled(); + }); + }); + + describe("createChatExample", () => { + it("should convert LangChainBaseMessage objects to examples", async () => { + const client = new Client({ apiKey: "test-api-key" }); + const input = [ + { text: "Hello", sender: "user" }, + { text: "Hi there", sender: "bot" }, + ]; + const generations = { + type: "langchain", + data: { text: "Bonjour", sender: "bot" }, + }; + const options = { datasetName: "test-dataset" }; + + await client.createChatExample(input, generations, options); + + expect(expectedFetchMock).toHaveBeenCalled(); + expect(unexpectedFetchMock).not.toHaveBeenCalled(); + }); + }); + + test("basic traceable implementation", async () => { + const llm = traceable( + async function* llm(input: string) { + const response = input.repeat(2).split(""); + for (const char of response) { + yield char; + } + }, + { tracingEnabled: true } + ); + + // eslint-disable-next-line @typescript-eslint/no-unused-vars + for await (const _ of llm("Hello world")) { + // pass + } + expect(expectedFetchMock).toHaveBeenCalled(); + expect(unexpectedFetchMock).not.toHaveBeenCalled(); + }); +}); diff --git a/js/src/tests/few_shot.int.test.ts b/js/src/tests/few_shot.int.test.ts new file mode 100644 index 000000000..484e5a391 --- /dev/null +++ b/js/src/tests/few_shot.int.test.ts @@ -0,0 +1,78 @@ +import { KVMap, ExampleSearch } from "../schemas.js"; +import { Client } from "../index.js"; +import { v4 as uuidv4 } from "uuid"; + +const TESTING_DATASET_NAME = `test_dataset_few_shot_js_${uuidv4()}`; + +test("few shot search", async () => { + const client = new Client(); + + const schema: KVMap = { + type: "object", + properties: { + name: { type: "string" }, + }, + required: ["name"], + additionalProperties: false, + }; + + const has_dataset = await client.hasDataset({ + datasetName: TESTING_DATASET_NAME, + }); + if (has_dataset === true) { + await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); + } + + const dataset = await client.createDataset(TESTING_DATASET_NAME, { + description: + "For testing purposed. Is created & deleted for each test run.", + inputsSchema: schema, + }); + + // create examples + const res = await client.createExamples({ + inputs: [{ name: "foo" }, { name: "bar" }], + outputs: [{ output: 2 }, { output: 3 }], + metadata: [{ somekey: "somevalue" }, { somekey: "someothervalue" }], + datasetName: TESTING_DATASET_NAME, + }); + if (res.length !== 2) { + throw new Error("Failed to create examples"); + } + + await client.indexDataset({ datasetId: dataset.id }); + + let i = 0; + let examples: ExampleSearch[] = []; + while (i < 10) { + examples = await client.similarExamples( + { name: "foo" }, + dataset.id, + // specify limit of 5 so you return all examples + 5 + ); + if (examples.length === 2) { + break; + } + + // sleep for one second + await new Promise((r) => setTimeout(r, 1000)); + i++; + } + + expect(examples.length).toBe(2); + expect(examples[0].inputs).toEqual({ name: "foo" }); + expect(examples[1].inputs).toEqual({ name: "bar" }); + + const filtered_examples = await client.similarExamples( + { name: "foo" }, + dataset.id, + 1, + { + filter: "eq(metadata.somekey, 'somevalue')", + } + ); + + expect(filtered_examples.length).toBe(1); + expect(filtered_examples[0].inputs).toEqual({ name: "foo" }); +}); diff --git a/js/src/tests/lcls_handoff.int.test.ts b/js/src/tests/lcls_handoff.int.test.ts index 3a064a07f..e0ec379be 100644 --- a/js/src/tests/lcls_handoff.int.test.ts +++ b/js/src/tests/lcls_handoff.int.test.ts @@ -54,10 +54,12 @@ test.concurrent( timeout_ms: 30_000, }); try { + const runId = uuidv4(); const result = await app.invoke( [new HumanMessage({ content: "Hello!" })], { callbacks: [tracer], + runId, } ); expect(result[result.length - 1].content).toEqual("Hello! world"); @@ -84,6 +86,7 @@ test.concurrent( const trace = traces[0]; expect(trace.name).toEqual("add_negligible_value"); expect(trace.parent_run_id).not.toBeNull(); + expect(trace.trace_id).toEqual(runId); } catch (e) { console.error(e); throw e; diff --git a/js/src/tests/run_trees.int.test.ts b/js/src/tests/run_trees.int.test.ts index 79efba3fa..15199efda 100644 --- a/js/src/tests/run_trees.int.test.ts +++ b/js/src/tests/run_trees.int.test.ts @@ -1,4 +1,5 @@ import { Client } from "../client.js"; +import * as uuid from "uuid"; import { RunTree, RunTreeConfig, @@ -14,14 +15,8 @@ import { test.concurrent( "Test post and patch run", async () => { - const projectName = `__test_run_tree`; - const langchainClient = new Client({ timeout_ms: 30000 }); - try { - await langchainClient.readProject({ projectName }); - await langchainClient.deleteProject({ projectName }); - } catch (e) { - // Pass - } + const projectName = `__test_run_tree_js ${uuid.v4()}`; + const langchainClient = new Client({ timeout_ms: 30_000 }); const parentRunConfig: RunTreeConfig = { name: "parent_run", run_type: "chain", @@ -38,7 +33,7 @@ test.concurrent( ); await parent_run.postRun(); - const child_llm_run = await parent_run.createChild({ + const child_llm_run = parent_run.createChild({ name: "child_run", run_type: "llm", inputs: { text: "hello world" }, @@ -113,6 +108,20 @@ test.concurrent( runMap.get("parent_run")?.id ); expect(runMap.get("parent_run")?.parent_run_id).toBeNull(); + await waitUntil( + async () => { + try { + const runs_ = await toArray( + langchainClient.listRuns({ traceId: runs[0].trace_id }) + ); + return runs_.length === 5; + } catch (e) { + return false; + } + }, + 30_000, // Wait up to 30 seconds + 3000 // every 3 second + ); const traceRunsIter = langchainClient.listRuns({ traceId: runs[0].trace_id, diff --git a/js/src/tests/run_trees.test.ts b/js/src/tests/run_trees.test.ts index c9d7ea49e..253c32f45 100644 --- a/js/src/tests/run_trees.test.ts +++ b/js/src/tests/run_trees.test.ts @@ -31,7 +31,7 @@ test("Should work with manually set API key", async () => { project_name: projectName, }); await runTree.postRun(); - await new Promise((resolve) => setTimeout(resolve, 1000)); + await new Promise((resolve) => setTimeout(resolve, 1500)); expect(callSpy).toHaveBeenCalled(); } finally { process.env.LANGCHAIN_API_KEY = key; @@ -112,3 +112,12 @@ test("distributed", () => { "20210503T000000000001Z00000000-0000-0000-0000-00000000000.20210503T000001000002Z00000000-0000-0000-0000-00000000001", }); }); + +test("shared client between run trees", () => { + const runTree1 = new RunTree({ name: "tree_1" }); + const runTree2 = new RunTree({ name: "tree_2" }); + + expect(runTree1.client).toBeDefined(); + expect(runTree2.client).toBeDefined(); + expect(runTree1.client).toBe(runTree2.client); +}); diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index 523a194b3..ea8c009e3 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -1,7 +1,9 @@ +import { jest } from "@jest/globals"; import { RunTree, RunTreeConfig } from "../run_trees.js"; import { ROOT, traceable, withRunTree } from "../traceable.js"; import { getAssumedTreeFromCalls } from "./utils/tree.js"; import { mockClient } from "./utils/mock_client.js"; +import { Client, overrideFetchImplementation } from "../index.js"; test("basic traceable implementation", async () => { const { client, callSpy } = mockClient(); @@ -26,6 +28,37 @@ test("basic traceable implementation", async () => { }); }); +test("404s should only log, not throw an error", async () => { + const overriddenFetch = jest.fn(() => + Promise.resolve({ + ok: false, + status: 404, + statusText: "Expected test error", + json: () => Promise.resolve({}), + text: () => Promise.resolve("Expected test error."), + }) + ); + overrideFetchImplementation(overriddenFetch); + const client = new Client({ + apiUrl: "https://foobar.notreal", + }); + const llm = traceable( + async function* llm(input: string) { + const response = input.repeat(2).split(""); + for (const char of response) { + yield char; + } + }, + { client, tracingEnabled: true } + ); + + // eslint-disable-next-line @typescript-eslint/no-unused-vars + for await (const _ of llm("Hello world")) { + // pass + } + expect(overriddenFetch).toHaveBeenCalled(); +}); + test("nested traceable implementation", async () => { const { client, callSpy } = mockClient(); @@ -70,6 +103,64 @@ test("nested traceable implementation", async () => { }); }); +test("trace circular input and output objects", async () => { + const { client, callSpy } = mockClient(); + const a: Record = {}; + const b: Record = {}; + a.b = b; + b.a = a; + const llm = traceable( + async function foo(_: Record) { + return a; + }, + { client, tracingEnabled: true } + ); + + const input = { + a, + a2: a, + normalParam: { + test: true, + }, + }; + await llm(input); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: ["foo:0"], + edges: [], + data: { + "foo:0": { + inputs: { + a: { + b: { + a: { + result: "[Circular]", + }, + }, + }, + a2: { + b: { + a: { + result: "[Circular]", + }, + }, + }, + normalParam: { + test: true, + }, + }, + outputs: { + b: { + a: { + result: "[Circular]", + }, + }, + }, + }, + }, + }); +}); + test("passing run tree manually", async () => { const { client, callSpy } = mockClient(); const child = traceable( @@ -513,6 +604,44 @@ describe("async generators", () => { }, }); }); + + test("iterable with props", async () => { + const { client, callSpy } = mockClient(); + + const iterableTraceable = traceable( + function iterableWithProps() { + return { + *[Symbol.asyncIterator]() { + yield 0; + }, + prop: "value", + }; + }, + { + client, + tracingEnabled: true, + } + ); + + const numbers: number[] = []; + const iterableWithProps = await iterableTraceable(); + for await (const num of iterableWithProps) { + numbers.push(num); + } + + expect(numbers).toEqual([0]); + + expect(iterableWithProps.prop).toBe("value"); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: ["iterableWithProps:0"], + edges: [], + data: { + "iterableWithProps:0": { + outputs: { outputs: [0] }, + }, + }, + }); + }); }); describe("deferred input", () => { @@ -902,3 +1031,25 @@ test("argsConfigPath", async () => { }, }); }); + +test("traceable continues execution when client throws error", async () => { + const errorClient = { + createRun: jest.fn().mockRejectedValue(new Error("Client error") as never), + updateRun: jest.fn().mockRejectedValue(new Error("Client error") as never), + }; + + const tracedFunction = traceable( + async (value: number): Promise => value * 2, + { + client: errorClient as unknown as Client, + name: "errorTest", + tracingEnabled: true, + } + ); + + const result = await tracedFunction(5); + + expect(result).toBe(10); + expect(errorClient.createRun).toHaveBeenCalled(); + expect(errorClient.updateRun).toHaveBeenCalled(); +}); diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts index c5d03e027..47bb928ce 100644 --- a/js/src/tests/traceable_langchain.test.ts +++ b/js/src/tests/traceable_langchain.test.ts @@ -8,6 +8,7 @@ import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; import { BaseMessage, HumanMessage } from "@langchain/core/messages"; import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js"; +import { RunnableLambda } from "@langchain/core/runnables"; describe("to langchain", () => { const llm = new FakeChatModel({}); @@ -115,6 +116,7 @@ describe("to langchain", () => { const result = await main({ texts: ["Hello world", "Who are you?"] }); + await awaitAllCallbacks(); expect(result).toEqual(["Hello world", "Who are you?"]); expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ nodes: [ @@ -393,3 +395,124 @@ test("explicit nested", async () => { ], }); }); + +// skip until the @langchain/core 0.2.17 is out +describe.skip("automatic tracing", () => { + it("root langchain", async () => { + const { callSpy, langChainTracer } = mockClient(); + + const lc = RunnableLambda.from(async () => "Hello from LangChain"); + const ls = traceable(() => "Hello from LangSmith", { name: "traceable" }); + + const childA = RunnableLambda.from(async () => { + const results: string[] = []; + results.push(await lc.invoke({})); + results.push(await ls()); + return results.join("\n"); + }); + + const childB = traceable( + async () => [await lc.invoke({}), await ls()].join("\n"), + { name: "childB" } + ); + + const rootLC = RunnableLambda.from(async () => { + return [ + await childA.invoke({}, { runName: "childA" }), + await childB(), + ].join("\n"); + }); + + expect( + await rootLC.invoke( + {}, + { callbacks: [langChainTracer], runName: "rootLC" } + ) + ).toEqual( + [ + "Hello from LangChain", + "Hello from LangSmith", + "Hello from LangChain", + "Hello from LangSmith", + ].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "rootLC:0", + "childA:1", + "RunnableLambda:2", + "traceable:3", + "childB:4", + "RunnableLambda:5", + "traceable:6", + ], + edges: [ + ["rootLC:0", "childA:1"], + ["childA:1", "RunnableLambda:2"], + ["childA:1", "traceable:3"], + ["rootLC:0", "childB:4"], + ["childB:4", "RunnableLambda:5"], + ["childB:4", "traceable:6"], + ], + }); + }); + + it("root traceable", async () => { + const { client, callSpy } = mockClient(); + + const lc = RunnableLambda.from(async () => "Hello from LangChain"); + const ls = traceable(() => "Hello from LangSmith", { name: "traceable" }); + + const childA = RunnableLambda.from(async () => { + const results: string[] = []; + results.push(await lc.invoke({})); + results.push(await ls()); + return results.join("\n"); + }); + + const childB = traceable( + async () => [await lc.invoke({}), await ls()].join("\n"), + { name: "childB" } + ); + + const rootLS = traceable( + async () => { + return [ + await childA.invoke({}, { runName: "childA" }), + await childB(), + ].join("\n"); + }, + { name: "rootLS", client, tracingEnabled: true } + ); + + expect(await rootLS()).toEqual( + [ + "Hello from LangChain", + "Hello from LangSmith", + "Hello from LangChain", + "Hello from LangSmith", + ].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "rootLS:0", + "childA:1", + "RunnableLambda:2", + "traceable:3", + "childB:4", + "RunnableLambda:5", + "traceable:6", + ], + edges: [ + ["rootLS:0", "childA:1"], + ["childA:1", "RunnableLambda:2"], + ["childA:1", "traceable:3"], + ["rootLS:0", "childB:4"], + ["childB:4", "RunnableLambda:5"], + ["childB:4", "traceable:6"], + ], + }); + }); +}); diff --git a/js/src/tests/utils/mock_client.ts b/js/src/tests/utils/mock_client.ts index 7b985dc86..2c9195ae6 100644 --- a/js/src/tests/utils/mock_client.ts +++ b/js/src/tests/utils/mock_client.ts @@ -1,13 +1,25 @@ // eslint-disable-next-line import/no-extraneous-dependencies import { jest } from "@jest/globals"; +// eslint-disable-next-line import/no-extraneous-dependencies +import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; import { Client } from "../../index.js"; type ClientParams = Exclude[0], undefined>; export const mockClient = (config?: Omit) => { - const client = new Client({ ...config, autoBatchTracing: false }); + const client = new Client({ + ...config, + apiKey: "MOCK", + autoBatchTracing: false, + }); const callSpy = jest .spyOn((client as any).caller, "call") .mockResolvedValue({ ok: true, text: () => "" }); - return { client, callSpy }; + const langChainTracer = new LangChainTracer({ + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore Overriden client + client, + }); + + return { client, callSpy, langChainTracer }; }; diff --git a/js/src/tests/wrapped_ai_sdk.int.test.ts b/js/src/tests/wrapped_ai_sdk.int.test.ts new file mode 100644 index 000000000..fc97a44b7 --- /dev/null +++ b/js/src/tests/wrapped_ai_sdk.int.test.ts @@ -0,0 +1,78 @@ +import { openai } from "@ai-sdk/openai"; +import { + generateObject, + generateText, + streamObject, + streamText, + tool, +} from "ai"; +import { z } from "zod"; +import { wrapAISDKModel } from "../wrappers/vercel.js"; + +const DEBUG = false; +test("AI SDK generateText", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { text } = await generateText({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + }); + DEBUG && console.log(text); +}); + +test("AI SDK generateText with a tool", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { text } = await generateText({ + model: modelWithTracing, + prompt: + "Write a vegetarian lasagna recipe for 4 people. Get ingredients first.", + tools: { + getIngredients: tool({ + description: "get a list of ingredients", + parameters: z.object({ + ingredients: z.array(z.string()), + }), + execute: async () => + JSON.stringify(["pasta", "tomato", "cheese", "onions"]), + }), + }, + maxToolRoundtrips: 2, + }); + DEBUG && console.log(text); +}); + +test("AI SDK generateObject", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { object } = await generateObject({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + schema: z.object({ + ingredients: z.array(z.string()), + }), + }); + DEBUG && console.log(object); +}); + +test("AI SDK streamText", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { textStream } = await streamText({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + }); + for await (const chunk of textStream) { + DEBUG && console.log(chunk); + } +}); + +test("AI SDK streamObject", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { partialObjectStream } = await streamObject({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + schema: z.object({ + ingredients: z.array(z.string()), + }), + }); + for await (const chunk of partialObjectStream) { + DEBUG && console.log(chunk); + } +}); diff --git a/js/src/traceable.ts b/js/src/traceable.ts index ee977e58f..aa8137c1a 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -279,6 +279,7 @@ export function traceable any>( // eslint-disable-next-line @typescript-eslint/no-explicit-any aggregator?: (args: any[]) => any; argsConfigPath?: [number] | [number, string]; + __finalTracedIteratorKey?: string; /** * Extract invocation parameters from the arguments of the traced function. @@ -294,7 +295,12 @@ export function traceable any>( } ) { type Inputs = Parameters; - const { aggregator, argsConfigPath, ...runTreeConfig } = config ?? {}; + const { + aggregator, + argsConfigPath, + __finalTracedIteratorKey, + ...runTreeConfig + } = config ?? {}; const traceableFunc = ( ...args: Inputs | [RunTree, ...Inputs] | [RunnableConfigLike, ...Inputs] @@ -434,14 +440,54 @@ export function traceable any>( return chunks; } - async function* wrapAsyncGeneratorForTracing( - iterable: AsyncIterable, + function tapReadableStreamForTracing( + stream: ReadableStream, + snapshot: ReturnType | undefined + ) { + const reader = stream.getReader(); + let finished = false; + const chunks: unknown[] = []; + + const tappedStream = new ReadableStream({ + async start(controller) { + // eslint-disable-next-line no-constant-condition + while (true) { + const result = await (snapshot + ? snapshot(() => reader.read()) + : reader.read()); + if (result.done) { + finished = true; + await currentRunTree?.end( + handleRunOutputs(await handleChunks(chunks)) + ); + await handleEnd(); + controller.close(); + break; + } + chunks.push(result.value); + controller.enqueue(result.value); + } + }, + async cancel(reason) { + if (!finished) await currentRunTree?.end(undefined, "Cancelled"); + await currentRunTree?.end( + handleRunOutputs(await handleChunks(chunks)) + ); + await handleEnd(); + return reader.cancel(reason); + }, + }); + + return tappedStream; + } + + async function* wrapAsyncIteratorForTracing( + iterator: AsyncIterator, snapshot: ReturnType | undefined ) { let finished = false; const chunks: unknown[] = []; try { - const iterator = iterable[Symbol.asyncIterator](); while (true) { const { value, done } = await (snapshot ? snapshot(() => iterator.next()) @@ -465,6 +511,19 @@ export function traceable any>( } } + function wrapAsyncGeneratorForTracing( + iterable: AsyncIterable, + snapshot: ReturnType | undefined + ) { + if (isReadableStream(iterable)) { + return tapReadableStreamForTracing(iterable, snapshot); + } + const iterator = iterable[Symbol.asyncIterator](); + const wrappedIterator = wrapAsyncIteratorForTracing(iterator, snapshot); + iterable[Symbol.asyncIterator] = () => wrappedIterator; + return iterable; + } + async function handleEnd() { const onEnd = config?.on_end; if (onEnd) { @@ -504,6 +563,25 @@ export function traceable any>( return wrapAsyncGeneratorForTracing(returnValue, snapshot); } + if ( + !Array.isArray(returnValue) && + typeof returnValue === "object" && + returnValue != null && + __finalTracedIteratorKey !== undefined && + isAsyncIterable( + (returnValue as Record)[__finalTracedIteratorKey] + ) + ) { + const snapshot = AsyncLocalStorage.snapshot(); + return { + ...returnValue, + [__finalTracedIteratorKey]: wrapAsyncGeneratorForTracing( + (returnValue as Record)[__finalTracedIteratorKey], + snapshot + ), + }; + } + const tracedPromise = new Promise((resolve, reject) => { Promise.resolve(returnValue) .then( @@ -515,23 +593,48 @@ export function traceable any>( ); } + if ( + !Array.isArray(rawOutput) && + typeof rawOutput === "object" && + rawOutput != null && + __finalTracedIteratorKey !== undefined && + isAsyncIterable( + (rawOutput as Record)[__finalTracedIteratorKey] + ) + ) { + const snapshot = AsyncLocalStorage.snapshot(); + return { + ...rawOutput, + [__finalTracedIteratorKey]: wrapAsyncGeneratorForTracing( + (rawOutput as Record)[ + __finalTracedIteratorKey + ], + snapshot + ), + }; + } + if (isGenerator(wrappedFunc) && isIteratorLike(rawOutput)) { const chunks = gatherAll(rawOutput); - await currentRunTree?.end( - handleRunOutputs( - await handleChunks( - chunks.reduce((memo, { value, done }) => { - if (!done || typeof value !== "undefined") { - memo.push(value); - } - - return memo; - }, []) + try { + await currentRunTree?.end( + handleRunOutputs( + await handleChunks( + chunks.reduce((memo, { value, done }) => { + if (!done || typeof value !== "undefined") { + memo.push(value); + } + + return memo; + }, []) + ) ) - ) - ); - await handleEnd(); + ); + await handleEnd(); + } catch (e) { + console.error("Error occurred during handleEnd:", e); + } return (function* () { for (const ret of chunks) { diff --git a/js/src/utils/_uuid.ts b/js/src/utils/_uuid.ts index 714235131..51d71f020 100644 --- a/js/src/utils/_uuid.ts +++ b/js/src/utils/_uuid.ts @@ -1,7 +1,12 @@ import * as uuid from "uuid"; -export function assertUuid(str: string): void { +export function assertUuid(str: string, which?: string): string { if (!uuid.validate(str)) { - throw new Error(`Invalid UUID: ${str}`); + const msg = + which !== undefined + ? `Invalid UUID for ${which}: ${str}` + : `Invalid UUID: ${str}`; + throw new Error(msg); } + return str; } diff --git a/js/src/utils/async_caller.ts b/js/src/utils/async_caller.ts index 4f2989785..00eabf651 100644 --- a/js/src/utils/async_caller.ts +++ b/js/src/utils/async_caller.ts @@ -1,5 +1,6 @@ import pRetry from "p-retry"; import PQueueMod from "p-queue"; +import { _getFetchImplementation } from "../singletons/fetch.js"; const STATUS_NO_RETRY = [ 400, // Bad Request @@ -152,7 +153,10 @@ export class AsyncCaller { fetch(...args: Parameters): ReturnType { return this.call(() => - fetch(...args).then((res) => (res.ok ? res : Promise.reject(res))) + _getFetchImplementation()(...args).then( + (res: Awaited>) => + res.ok ? res : Promise.reject(res) + ) ); } } diff --git a/js/src/utils/env.ts b/js/src/utils/env.ts index 4c073a796..535ef2772 100644 --- a/js/src/utils/env.ts +++ b/js/src/utils/env.ts @@ -200,6 +200,15 @@ export function getEnvironmentVariable(name: string): string | undefined { } } +export function getLangSmithEnvironmentVariable( + name: string +): string | undefined { + return ( + getEnvironmentVariable(`LANGSMITH_${name}`) || + getEnvironmentVariable(`LANGCHAIN_${name}`) + ); +} + export function setEnvironmentVariable(name: string, value: string): void { if (typeof process !== "undefined") { // eslint-disable-next-line no-process-env diff --git a/js/src/utils/error.ts b/js/src/utils/error.ts index 8739cb091..7c2d7b52a 100644 --- a/js/src/utils/error.ts +++ b/js/src/utils/error.ts @@ -21,3 +21,73 @@ export function printErrorStackTrace(e: unknown) { if (stack == null) return; console.error(stack); } + +/** + * LangSmithConflictError + * + * Represents an error that occurs when there's a conflict during an operation, + * typically corresponding to HTTP 409 status code responses. + * + * This error is thrown when an attempt to create or modify a resource conflicts + * with the current state of the resource on the server. Common scenarios include: + * - Attempting to create a resource that already exists + * - Trying to update a resource that has been modified by another process + * - Violating a uniqueness constraint in the data + * + * @extends Error + * + * @example + * try { + * await createProject("existingProject"); + * } catch (error) { + * if (error instanceof ConflictError) { + * console.log("A conflict occurred:", error.message); + * // Handle the conflict, e.g., by suggesting a different project name + * } else { + * // Handle other types of errors + * } + * } + * + * @property {string} name - Always set to 'ConflictError' for easy identification + * @property {string} message - Detailed error message including server response + * + * @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/409 + */ +export class LangSmithConflictError extends Error { + constructor(message: string) { + super(message); + this.name = "LangSmithConflictError"; + } +} + +/** + * Throws an appropriate error based on the response status and body. + * + * @param response - The fetch Response object + * @param context - Additional context to include in the error message (e.g., operation being performed) + * @throws {LangSmithConflictError} When the response status is 409 + * @throws {Error} For all other non-ok responses + */ +export async function raiseForStatus( + response: Response, + context: string, + consume?: boolean +): Promise { + // consume the response body to release the connection + // https://undici.nodejs.org/#/?id=garbage-collection + let errorBody; + if (response.ok) { + if (consume) { + errorBody = await response.text(); + } + return; + } + errorBody = await response.text(); + const fullMessage = `Failed to ${context}. Received status [${response.status}]: ${response.statusText}. Server response: ${errorBody}`; + + if (response.status === 409) { + throw new LangSmithConflictError(fullMessage); + } + + throw new Error(fullMessage); +} diff --git a/js/src/utils/fast-safe-stringify/LICENSE b/js/src/utils/fast-safe-stringify/LICENSE new file mode 100644 index 000000000..bec900d11 --- /dev/null +++ b/js/src/utils/fast-safe-stringify/LICENSE @@ -0,0 +1,23 @@ +The MIT License (MIT) + +Copyright (c) 2016 David Mark Clements +Copyright (c) 2017 David Mark Clements & Matteo Collina +Copyright (c) 2018 David Mark Clements, Matteo Collina & Ruben Bridgewater + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/js/src/utils/fast-safe-stringify/index.ts b/js/src/utils/fast-safe-stringify/index.ts new file mode 100644 index 000000000..7ae29d887 --- /dev/null +++ b/js/src/utils/fast-safe-stringify/index.ts @@ -0,0 +1,230 @@ +/* eslint-disable */ +// @ts-nocheck +var LIMIT_REPLACE_NODE = "[...]"; +var CIRCULAR_REPLACE_NODE = { result: "[Circular]" }; + +var arr = []; +var replacerStack = []; + +function defaultOptions() { + return { + depthLimit: Number.MAX_SAFE_INTEGER, + edgesLimit: Number.MAX_SAFE_INTEGER, + }; +} + +// Regular stringify +export function stringify(obj, replacer?, spacer?, options?) { + if (typeof options === "undefined") { + options = defaultOptions(); + } + + decirc(obj, "", 0, [], undefined, 0, options); + var res; + try { + if (replacerStack.length === 0) { + res = JSON.stringify(obj, replacer, spacer); + } else { + res = JSON.stringify(obj, replaceGetterValues(replacer), spacer); + } + } catch (_) { + return JSON.stringify( + "[unable to serialize, circular reference is too complex to analyze]" + ); + } finally { + while (arr.length !== 0) { + var part = arr.pop(); + if (part.length === 4) { + Object.defineProperty(part[0], part[1], part[3]); + } else { + part[0][part[1]] = part[2]; + } + } + } + return res; +} + +function setReplace(replace, val, k, parent) { + var propertyDescriptor = Object.getOwnPropertyDescriptor(parent, k); + if (propertyDescriptor.get !== undefined) { + if (propertyDescriptor.configurable) { + Object.defineProperty(parent, k, { value: replace }); + arr.push([parent, k, val, propertyDescriptor]); + } else { + replacerStack.push([val, k, replace]); + } + } else { + parent[k] = replace; + arr.push([parent, k, val]); + } +} + +function decirc(val, k, edgeIndex, stack, parent, depth, options) { + depth += 1; + var i; + if (typeof val === "object" && val !== null) { + for (i = 0; i < stack.length; i++) { + if (stack[i] === val) { + setReplace(CIRCULAR_REPLACE_NODE, val, k, parent); + return; + } + } + + if ( + typeof options.depthLimit !== "undefined" && + depth > options.depthLimit + ) { + setReplace(LIMIT_REPLACE_NODE, val, k, parent); + return; + } + + if ( + typeof options.edgesLimit !== "undefined" && + edgeIndex + 1 > options.edgesLimit + ) { + setReplace(LIMIT_REPLACE_NODE, val, k, parent); + return; + } + + stack.push(val); + // Optimize for Arrays. Big arrays could kill the performance otherwise! + if (Array.isArray(val)) { + for (i = 0; i < val.length; i++) { + decirc(val[i], i, i, stack, val, depth, options); + } + } else { + var keys = Object.keys(val); + for (i = 0; i < keys.length; i++) { + var key = keys[i]; + decirc(val[key], key, i, stack, val, depth, options); + } + } + stack.pop(); + } +} + +// Stable-stringify +function compareFunction(a, b) { + if (a < b) { + return -1; + } + if (a > b) { + return 1; + } + return 0; +} + +function deterministicStringify(obj, replacer, spacer, options) { + if (typeof options === "undefined") { + options = defaultOptions(); + } + + var tmp = deterministicDecirc(obj, "", 0, [], undefined, 0, options) || obj; + var res; + try { + if (replacerStack.length === 0) { + res = JSON.stringify(tmp, replacer, spacer); + } else { + res = JSON.stringify(tmp, replaceGetterValues(replacer), spacer); + } + } catch (_) { + return JSON.stringify( + "[unable to serialize, circular reference is too complex to analyze]" + ); + } finally { + // Ensure that we restore the object as it was. + while (arr.length !== 0) { + var part = arr.pop(); + if (part.length === 4) { + Object.defineProperty(part[0], part[1], part[3]); + } else { + part[0][part[1]] = part[2]; + } + } + } + return res; +} + +function deterministicDecirc(val, k, edgeIndex, stack, parent, depth, options) { + depth += 1; + var i; + if (typeof val === "object" && val !== null) { + for (i = 0; i < stack.length; i++) { + if (stack[i] === val) { + setReplace(CIRCULAR_REPLACE_NODE, val, k, parent); + return; + } + } + try { + if (typeof val.toJSON === "function") { + return; + } + } catch (_) { + return; + } + + if ( + typeof options.depthLimit !== "undefined" && + depth > options.depthLimit + ) { + setReplace(LIMIT_REPLACE_NODE, val, k, parent); + return; + } + + if ( + typeof options.edgesLimit !== "undefined" && + edgeIndex + 1 > options.edgesLimit + ) { + setReplace(LIMIT_REPLACE_NODE, val, k, parent); + return; + } + + stack.push(val); + // Optimize for Arrays. Big arrays could kill the performance otherwise! + if (Array.isArray(val)) { + for (i = 0; i < val.length; i++) { + deterministicDecirc(val[i], i, i, stack, val, depth, options); + } + } else { + // Create a temporary object in the required way + var tmp = {}; + var keys = Object.keys(val).sort(compareFunction); + for (i = 0; i < keys.length; i++) { + var key = keys[i]; + deterministicDecirc(val[key], key, i, stack, val, depth, options); + tmp[key] = val[key]; + } + if (typeof parent !== "undefined") { + arr.push([parent, k, val]); + parent[k] = tmp; + } else { + return tmp; + } + } + stack.pop(); + } +} + +// wraps replacer function to handle values we couldn't replace +// and mark them as replaced value +function replaceGetterValues(replacer) { + replacer = + typeof replacer !== "undefined" + ? replacer + : function (k, v) { + return v; + }; + return function (key, val) { + if (replacerStack.length > 0) { + for (var i = 0; i < replacerStack.length; i++) { + var part = replacerStack[i]; + if (part[1] === key && part[0] === val) { + val = part[2]; + replacerStack.splice(i, 1); + break; + } + } + } + return replacer.call(this, key, val); + }; +} diff --git a/js/src/utils/lodash/LICENSE b/js/src/utils/lodash/LICENSE new file mode 100644 index 000000000..5b807415b --- /dev/null +++ b/js/src/utils/lodash/LICENSE @@ -0,0 +1,49 @@ +The MIT License + +Copyright JS Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. \ No newline at end of file diff --git a/js/src/utils/lodash/assignValue.ts b/js/src/utils/lodash/assignValue.ts new file mode 100644 index 000000000..f02ed4991 --- /dev/null +++ b/js/src/utils/lodash/assignValue.ts @@ -0,0 +1,27 @@ +import baseAssignValue from "./baseAssignValue.js"; +import eq from "./eq.js"; + +/** Used to check objects for own properties. */ +const hasOwnProperty = Object.prototype.hasOwnProperty; + +/** + * Assigns `value` to `key` of `object` if the existing value is not equivalent. + * + * @private + * @param {Object} object The object to modify. + * @param {string} key The key of the property to assign. + * @param {*} value The value to assign. + */ +function assignValue(object: Record, key: string, value: any) { + const objValue = object[key]; + + if (!(hasOwnProperty.call(object, key) && eq(objValue, value))) { + if (value !== 0 || 1 / value === 1 / objValue) { + baseAssignValue(object, key, value); + } + } else if (value === undefined && !(key in object)) { + baseAssignValue(object, key, value); + } +} + +export default assignValue; diff --git a/js/src/utils/lodash/baseAssignValue.ts b/js/src/utils/lodash/baseAssignValue.ts new file mode 100644 index 000000000..5d1d70d16 --- /dev/null +++ b/js/src/utils/lodash/baseAssignValue.ts @@ -0,0 +1,23 @@ +/** + * The base implementation of `assignValue` and `assignMergeValue` without + * value checks. + * + * @private + * @param {Object} object The object to modify. + * @param {string} key The key of the property to assign. + * @param {*} value The value to assign. + */ +function baseAssignValue(object: Record, key: string, value: any) { + if (key === "__proto__") { + Object.defineProperty(object, key, { + configurable: true, + enumerable: true, + value: value, + writable: true, + }); + } else { + object[key] = value; + } +} + +export default baseAssignValue; diff --git a/js/src/utils/lodash/baseSet.ts b/js/src/utils/lodash/baseSet.ts new file mode 100644 index 000000000..5db4ddf76 --- /dev/null +++ b/js/src/utils/lodash/baseSet.ts @@ -0,0 +1,52 @@ +// @ts-nocheck + +import assignValue from "./assignValue.js"; +import castPath from "./castPath.js"; +import isIndex from "./isIndex.js"; +import isObject from "./isObject.js"; +import toKey from "./toKey.js"; + +/** + * The base implementation of `set`. + * + * @private + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {*} value The value to set. + * @param {Function} [customizer] The function to customize path creation. + * @returns {Object} Returns `object`. + */ +function baseSet(object, path, value, customizer) { + if (!isObject(object)) { + return object; + } + path = castPath(path, object); + + const length = path.length; + const lastIndex = length - 1; + + let index = -1; + let nested = object; + + while (nested != null && ++index < length) { + const key = toKey(path[index]); + let newValue = value; + + if (index !== lastIndex) { + const objValue = nested[key]; + newValue = customizer ? customizer(objValue, key, nested) : undefined; + if (newValue === undefined) { + newValue = isObject(objValue) + ? objValue + : isIndex(path[index + 1]) + ? [] + : {}; + } + } + assignValue(nested, key, newValue); + nested = nested[key]; + } + return object; +} + +export default baseSet; diff --git a/js/src/utils/lodash/castPath.ts b/js/src/utils/lodash/castPath.ts new file mode 100644 index 000000000..4ae161c6f --- /dev/null +++ b/js/src/utils/lodash/castPath.ts @@ -0,0 +1,19 @@ +import isKey from "./isKey.js"; +import stringToPath from "./stringToPath.js"; + +/** + * Casts `value` to a path array if it's not one. + * + * @private + * @param {*} value The value to inspect. + * @param {Object} [object] The object to query keys on. + * @returns {Array} Returns the cast property path array. + */ +function castPath(value: any, object: Record) { + if (Array.isArray(value)) { + return value; + } + return isKey(value, object) ? [value] : stringToPath(value); +} + +export default castPath; diff --git a/js/src/utils/lodash/eq.ts b/js/src/utils/lodash/eq.ts new file mode 100644 index 000000000..11ece1229 --- /dev/null +++ b/js/src/utils/lodash/eq.ts @@ -0,0 +1,35 @@ +/** + * Performs a + * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * comparison between two values to determine if they are equivalent. + * + * @since 4.0.0 + * @category Lang + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if the values are equivalent, else `false`. + * @example + * + * const object = { 'a': 1 } + * const other = { 'a': 1 } + * + * eq(object, object) + * // => true + * + * eq(object, other) + * // => false + * + * eq('a', 'a') + * // => true + * + * eq('a', Object('a')) + * // => false + * + * eq(NaN, NaN) + * // => true + */ +function eq(value: any, other: any) { + return value === other || (value !== value && other !== other); +} + +export default eq; diff --git a/js/src/utils/lodash/getTag.ts b/js/src/utils/lodash/getTag.ts new file mode 100644 index 000000000..c616a26e0 --- /dev/null +++ b/js/src/utils/lodash/getTag.ts @@ -0,0 +1,19 @@ +// @ts-nocheck + +const toString = Object.prototype.toString; + +/** + * Gets the `toStringTag` of `value`. + * + * @private + * @param {*} value The value to query. + * @returns {string} Returns the `toStringTag`. + */ +function getTag(value) { + if (value == null) { + return value === undefined ? "[object Undefined]" : "[object Null]"; + } + return toString.call(value); +} + +export default getTag; diff --git a/js/src/utils/lodash/isIndex.ts b/js/src/utils/lodash/isIndex.ts new file mode 100644 index 000000000..eb956ca70 --- /dev/null +++ b/js/src/utils/lodash/isIndex.ts @@ -0,0 +1,30 @@ +// @ts-nocheck + +/** Used as references for various `Number` constants. */ +const MAX_SAFE_INTEGER = 9007199254740991; + +/** Used to detect unsigned integer values. */ +const reIsUint = /^(?:0|[1-9]\d*)$/; + +/** + * Checks if `value` is a valid array-like index. + * + * @private + * @param {*} value The value to check. + * @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index. + * @returns {boolean} Returns `true` if `value` is a valid index, else `false`. + */ +function isIndex(value, length) { + const type = typeof value; + length = length == null ? MAX_SAFE_INTEGER : length; + + return ( + !!length && + (type === "number" || (type !== "symbol" && reIsUint.test(value))) && + value > -1 && + value % 1 === 0 && + value < length + ); +} + +export default isIndex; diff --git a/js/src/utils/lodash/isKey.ts b/js/src/utils/lodash/isKey.ts new file mode 100644 index 000000000..5c46772b9 --- /dev/null +++ b/js/src/utils/lodash/isKey.ts @@ -0,0 +1,36 @@ +// @ts-nocheck +import isSymbol from "./isSymbol.js"; + +/** Used to match property names within property paths. */ +const reIsDeepProp = /\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/; +const reIsPlainProp = /^\w*$/; + +/** + * Checks if `value` is a property name and not a property path. + * + * @private + * @param {*} value The value to check. + * @param {Object} [object] The object to query keys on. + * @returns {boolean} Returns `true` if `value` is a property name, else `false`. + */ +function isKey(value, object) { + if (Array.isArray(value)) { + return false; + } + const type = typeof value; + if ( + type === "number" || + type === "boolean" || + value == null || + isSymbol(value) + ) { + return true; + } + return ( + reIsPlainProp.test(value) || + !reIsDeepProp.test(value) || + (object != null && value in Object(object)) + ); +} + +export default isKey; diff --git a/js/src/utils/lodash/isObject.ts b/js/src/utils/lodash/isObject.ts new file mode 100644 index 000000000..56c8930f8 --- /dev/null +++ b/js/src/utils/lodash/isObject.ts @@ -0,0 +1,31 @@ +// @ts-nocheck + +/** + * Checks if `value` is the + * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types) + * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`) + * + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an object, else `false`. + * @example + * + * isObject({}) + * // => true + * + * isObject([1, 2, 3]) + * // => true + * + * isObject(Function) + * // => true + * + * isObject(null) + * // => false + */ +function isObject(value) { + const type = typeof value; + return value != null && (type === "object" || type === "function"); +} + +export default isObject; diff --git a/js/src/utils/lodash/isSymbol.ts b/js/src/utils/lodash/isSymbol.ts new file mode 100644 index 000000000..94e65a60f --- /dev/null +++ b/js/src/utils/lodash/isSymbol.ts @@ -0,0 +1,28 @@ +// @ts-nocheck + +import getTag from "./getTag.js"; + +/** + * Checks if `value` is classified as a `Symbol` primitive or object. + * + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a symbol, else `false`. + * @example + * + * isSymbol(Symbol.iterator) + * // => true + * + * isSymbol('abc') + * // => false + */ +function isSymbol(value) { + const type = typeof value; + return ( + type === "symbol" || + (type === "object" && value != null && getTag(value) === "[object Symbol]") + ); +} + +export default isSymbol; diff --git a/js/src/utils/lodash/memoizeCapped.ts b/js/src/utils/lodash/memoizeCapped.ts new file mode 100644 index 000000000..c4696ddd3 --- /dev/null +++ b/js/src/utils/lodash/memoizeCapped.ts @@ -0,0 +1,69 @@ +// @ts-nocheck + +/** + * Creates a function that memoizes the result of `func`. If `resolver` is + * provided, it determines the cache key for storing the result based on the + * arguments provided to the memoized function. By default, the first argument + * provided to the memoized function is used as the map cache key. The `func` + * is invoked with the `this` binding of the memoized function. + * + * **Note:** The cache is exposed as the `cache` property on the memoized + * function. Its creation may be customized by replacing the `memoize.Cache` + * constructor with one whose instances implement the + * [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object) + * method interface of `clear`, `delete`, `get`, `has`, and `set`. + * + * @since 0.1.0 + * @category Function + * @param {Function} func The function to have its output memoized. + * @param {Function} [resolver] The function to resolve the cache key. + * @returns {Function} Returns the new memoized function. + * @example + * + * const object = { 'a': 1, 'b': 2 } + * const other = { 'c': 3, 'd': 4 } + * + * const values = memoize(values) + * values(object) + * // => [1, 2] + * + * values(other) + * // => [3, 4] + * + * object.a = 2 + * values(object) + * // => [1, 2] + * + * // Modify the result cache. + * values.cache.set(object, ['a', 'b']) + * values(object) + * // => ['a', 'b'] + * + * // Replace `memoize.Cache`. + * memoize.Cache = WeakMap + */ +function memoize(func, resolver) { + if ( + typeof func !== "function" || + (resolver != null && typeof resolver !== "function") + ) { + throw new TypeError("Expected a function"); + } + const memoized = function (...args) { + const key = resolver ? resolver.apply(this, args) : args[0]; + const cache = memoized.cache; + + if (cache.has(key)) { + return cache.get(key); + } + const result = func.apply(this, args); + memoized.cache = cache.set(key, result) || cache; + return result; + }; + memoized.cache = new (memoize.Cache || Map)(); + return memoized; +} + +memoize.Cache = Map; + +export default memoize; diff --git a/js/src/utils/lodash/set.ts b/js/src/utils/lodash/set.ts new file mode 100644 index 000000000..01f277ce4 --- /dev/null +++ b/js/src/utils/lodash/set.ts @@ -0,0 +1,39 @@ +// @ts-nocheck + +import baseSet from "./baseSet.js"; + +/** + * Sets the value at `path` of `object`. If a portion of `path` doesn't exist, + * it's created. Arrays are created for missing index properties while objects + * are created for all other missing properties. Use `setWith` to customize + * `path` creation. + * + * **Note:** This method mutates `object`. + * + * Inlined to just use set functionality and patch vulnerabilities + * on existing isolated "lodash.set" package. + * + * @since 3.7.0 + * @category Object + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {*} value The value to set. + * @returns {Object} Returns `object`. + * @see has, hasIn, get, unset + * @example + * + * const object = { 'a': [{ 'b': { 'c': 3 } }] } + * + * set(object, 'a[0].b.c', 4) + * console.log(object.a[0].b.c) + * // => 4 + * + * set(object, ['x', '0', 'y', 'z'], 5) + * console.log(object.x[0].y.z) + * // => 5 + */ +function set(object, path, value) { + return object == null ? object : baseSet(object, path, value); +} + +export default set; diff --git a/js/src/utils/lodash/stringToPath.ts b/js/src/utils/lodash/stringToPath.ts new file mode 100644 index 000000000..d4e99ab9f --- /dev/null +++ b/js/src/utils/lodash/stringToPath.ts @@ -0,0 +1,49 @@ +// @ts-nocheck + +import memoizeCapped from "./memoizeCapped.js"; + +const charCodeOfDot = ".".charCodeAt(0); +const reEscapeChar = /\\(\\)?/g; +const rePropName = RegExp( + // Match anything that isn't a dot or bracket. + "[^.[\\]]+" + + "|" + + // Or match property names within brackets. + "\\[(?:" + + // Match a non-string expression. + "([^\"'][^[]*)" + + "|" + + // Or match strings (supports escaping characters). + "([\"'])((?:(?!\\2)[^\\\\]|\\\\.)*?)\\2" + + ")\\]" + + "|" + + // Or match "" as the space between consecutive dots or empty brackets. + "(?=(?:\\.|\\[\\])(?:\\.|\\[\\]|$))", + "g" +); + +/** + * Converts `string` to a property path array. + * + * @private + * @param {string} string The string to convert. + * @returns {Array} Returns the property path array. + */ +const stringToPath = memoizeCapped((string: string) => { + const result = []; + if (string.charCodeAt(0) === charCodeOfDot) { + result.push(""); + } + string.replace(rePropName, (match, expression, quote, subString) => { + let key = match; + if (quote) { + key = subString.replace(reEscapeChar, "$1"); + } else if (expression) { + key = expression.trim(); + } + result.push(key); + }); + return result; +}); + +export default stringToPath; diff --git a/js/src/utils/lodash/toKey.ts b/js/src/utils/lodash/toKey.ts new file mode 100644 index 000000000..98b327455 --- /dev/null +++ b/js/src/utils/lodash/toKey.ts @@ -0,0 +1,23 @@ +// @ts-nocheck + +import isSymbol from "./isSymbol.js"; + +/** Used as references for various `Number` constants. */ +const INFINITY = 1 / 0; + +/** + * Converts `value` to a string key if it's not a string or symbol. + * + * @private + * @param {*} value The value to inspect. + * @returns {string|symbol} Returns the key. + */ +function toKey(value) { + if (typeof value === "string" || isSymbol(value)) { + return value; + } + const result = `${value}`; + return result === "0" && 1 / value === -INFINITY ? "-0" : result; +} + +export default toKey; diff --git a/js/src/utils/prompts.ts b/js/src/utils/prompts.ts new file mode 100644 index 000000000..53bbee3c4 --- /dev/null +++ b/js/src/utils/prompts.ts @@ -0,0 +1,45 @@ +import { parse as parseVersion } from "semver"; + +export function isVersionGreaterOrEqual( + current_version: string, + target_version: string +): boolean { + const current = parseVersion(current_version); + const target = parseVersion(target_version); + + if (!current || !target) { + throw new Error("Invalid version format."); + } + + return current.compare(target) >= 0; +} + +export function parsePromptIdentifier( + identifier: string +): [string, string, string] { + if ( + !identifier || + identifier.split("/").length > 2 || + identifier.startsWith("/") || + identifier.endsWith("/") || + identifier.split(":").length > 2 + ) { + throw new Error(`Invalid identifier format: ${identifier}`); + } + + const [ownerNamePart, commitPart] = identifier.split(":"); + const commit = commitPart || "latest"; + + if (ownerNamePart.includes("/")) { + const [owner, name] = ownerNamePart.split("/", 2); + if (!owner || !name) { + throw new Error(`Invalid identifier format: ${identifier}`); + } + return [owner, name, commit]; + } else { + if (!ownerNamePart) { + throw new Error(`Invalid identifier format: ${identifier}`); + } + return ["-", ownerNamePart, commit]; + } +} diff --git a/js/src/wrappers/generic.ts b/js/src/wrappers/generic.ts new file mode 100644 index 000000000..3b62bc0f8 --- /dev/null +++ b/js/src/wrappers/generic.ts @@ -0,0 +1,72 @@ +import type { RunTreeConfig } from "../index.js"; +import { traceable } from "../traceable.js"; + +export const _wrapClient = ( + sdk: T, + runName: string, + options?: Omit +): T => { + return new Proxy(sdk, { + get(target, propKey, receiver) { + const originalValue = target[propKey as keyof T]; + if (typeof originalValue === "function") { + return traceable(originalValue.bind(target), { + run_type: "llm", + ...options, + name: [runName, propKey.toString()].join("."), + }); + } else if ( + originalValue != null && + !Array.isArray(originalValue) && + // eslint-disable-next-line no-instanceof/no-instanceof + !(originalValue instanceof Date) && + typeof originalValue === "object" + ) { + return _wrapClient( + originalValue, + [runName, propKey.toString()].join("."), + options + ); + } else { + return Reflect.get(target, propKey, receiver); + } + }, + }); +}; + +type WrapSDKOptions = Partial< + RunTreeConfig & { + /** + * @deprecated Use `name` instead. + */ + runName: string; + } +>; + +/** + * Wrap an arbitrary SDK, enabling automatic LangSmith tracing. + * Method signatures are unchanged. + * + * Note that this will wrap and trace ALL SDK methods, not just + * LLM completion methods. If the passed SDK contains other methods, + * we recommend using the wrapped instance for LLM calls only. + * @param sdk An arbitrary SDK instance. + * @param options LangSmith options. + * @returns + */ +export const wrapSDK = ( + sdk: T, + options?: WrapSDKOptions +): T => { + const traceableOptions = options ? { ...options } : undefined; + if (traceableOptions != null) { + delete traceableOptions.runName; + delete traceableOptions.name; + } + + return _wrapClient( + sdk, + options?.name ?? options?.runName ?? sdk.constructor?.name, + traceableOptions + ); +}; diff --git a/js/src/wrappers/index.ts b/js/src/wrappers/index.ts index e8f265647..6ff1385b0 100644 --- a/js/src/wrappers/index.ts +++ b/js/src/wrappers/index.ts @@ -1 +1,2 @@ export * from "./openai.js"; +export { wrapSDK } from "./generic.js"; diff --git a/js/src/wrappers/openai.ts b/js/src/wrappers/openai.ts index 0ea56f882..fa5af83ce 100644 --- a/js/src/wrappers/openai.ts +++ b/js/src/wrappers/openai.ts @@ -1,6 +1,6 @@ import { OpenAI } from "openai"; import type { APIPromise } from "openai/core"; -import type { Client, RunTreeConfig } from "../index.js"; +import type { RunTreeConfig } from "../index.js"; import { isTraceableFunction, traceable } from "../traceable.js"; // Extra leniency around types in case multiple OpenAI SDK versions get installed @@ -263,7 +263,7 @@ export const wrapOpenAI = ( return { ls_provider: "openai", - ls_model_type: "text", + ls_model_type: "llm", ls_model_name: params.model, ls_max_tokens: params.max_tokens ?? undefined, ls_temperature: params.temperature ?? undefined, @@ -276,58 +276,3 @@ export const wrapOpenAI = ( return openai as PatchedOpenAIClient; }; - -const _wrapClient = ( - sdk: T, - runName: string, - options?: { client?: Client } -): T => { - return new Proxy(sdk, { - get(target, propKey, receiver) { - const originalValue = target[propKey as keyof T]; - if (typeof originalValue === "function") { - return traceable( - originalValue.bind(target), - Object.assign( - { name: [runName, propKey.toString()].join("."), run_type: "llm" }, - options - ) - ); - } else if ( - originalValue != null && - !Array.isArray(originalValue) && - // eslint-disable-next-line no-instanceof/no-instanceof - !(originalValue instanceof Date) && - typeof originalValue === "object" - ) { - return _wrapClient( - originalValue, - [runName, propKey.toString()].join("."), - options - ); - } else { - return Reflect.get(target, propKey, receiver); - } - }, - }); -}; - -/** - * Wrap an arbitrary SDK, enabling automatic LangSmith tracing. - * Method signatures are unchanged. - * - * Note that this will wrap and trace ALL SDK methods, not just - * LLM completion methods. If the passed SDK contains other methods, - * we recommend using the wrapped instance for LLM calls only. - * @param sdk An arbitrary SDK instance. - * @param options LangSmith options. - * @returns - */ -export const wrapSDK = ( - sdk: T, - options?: { client?: Client; runName?: string } -): T => { - return _wrapClient(sdk, options?.runName ?? sdk.constructor?.name, { - client: options?.client, - }); -}; diff --git a/js/src/wrappers/vercel.ts b/js/src/wrappers/vercel.ts new file mode 100644 index 000000000..dc022d7c8 --- /dev/null +++ b/js/src/wrappers/vercel.ts @@ -0,0 +1,109 @@ +import type { RunTreeConfig } from "../index.js"; +import { traceable } from "../traceable.js"; +import { _wrapClient } from "./generic.js"; + +/** + * Wrap a Vercel AI SDK model, enabling automatic LangSmith tracing. + * After wrapping a model, you can use it with the Vercel AI SDK Core + * methods as normal. + * + * @example + * ```ts + * import { anthropic } from "@ai-sdk/anthropic"; + * import { streamText } from "ai"; + * import { wrapAISDKModel } from "langsmith/wrappers/vercel"; + * + * const anthropicModel = anthropic("claude-3-haiku-20240307"); + * + * const modelWithTracing = wrapAISDKModel(anthropicModel); + * + * const { textStream } = await streamText({ + * model: modelWithTracing, + * prompt: "Write a vegetarian lasagna recipe for 4 people.", + * }); + * + * for await (const chunk of textStream) { + * console.log(chunk); + * } + * ``` + * @param model An AI SDK model instance. + * @param options LangSmith options. + * @returns + */ +export const wrapAISDKModel = ( + model: T, + options?: Partial +): T => { + if ( + !("doStream" in model) || + typeof model.doStream !== "function" || + !("doGenerate" in model) || + typeof model.doGenerate !== "function" + ) { + throw new Error( + `Received invalid input. This version of wrapAISDKModel only supports Vercel LanguageModelV1 instances.` + ); + } + const runName = options?.name ?? model.constructor?.name; + return new Proxy(model, { + get(target, propKey, receiver) { + const originalValue = target[propKey as keyof T]; + if (typeof originalValue === "function") { + let __finalTracedIteratorKey; + let aggregator; + if (propKey === "doStream") { + __finalTracedIteratorKey = "stream"; + aggregator = (chunks: any[]) => { + return chunks.reduce( + (aggregated, chunk) => { + if (chunk.type === "text-delta") { + return { + ...aggregated, + text: aggregated.text + chunk.textDelta, + }; + } else if (chunk.type === "tool-call") { + return { + ...aggregated, + ...chunk, + }; + } else if (chunk.type === "finish") { + return { + ...aggregated, + usage: chunk.usage, + finishReason: chunk.finishReason, + }; + } else { + return aggregated; + } + }, + { + text: "", + } + ); + }; + } + return traceable(originalValue.bind(target), { + run_type: "llm", + name: runName, + ...options, + __finalTracedIteratorKey, + aggregator, + }); + } else if ( + originalValue != null && + !Array.isArray(originalValue) && + // eslint-disable-next-line no-instanceof/no-instanceof + !(originalValue instanceof Date) && + typeof originalValue === "object" + ) { + return _wrapClient( + originalValue, + [runName, propKey.toString()].join("."), + options + ); + } else { + return Reflect.get(target, propKey, receiver); + } + }, + }); +}; diff --git a/js/tsconfig.json b/js/tsconfig.json index 92b1a3026..ab24d6247 100644 --- a/js/tsconfig.json +++ b/js/tsconfig.json @@ -42,6 +42,7 @@ "src/wrappers/index.ts", "src/anonymizer/index.ts", "src/wrappers/openai.ts", + "src/wrappers/vercel.ts", "src/singletons/traceable.ts" ] } diff --git a/js/yarn.lock b/js/yarn.lock index be071906d..2d3032272 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -2,6 +2,74 @@ # yarn lockfile v1 +"@ai-sdk/openai@^0.0.40": + version "0.0.40" + resolved "https://registry.yarnpkg.com/@ai-sdk/openai/-/openai-0.0.40.tgz#227df69c8edf8b26b17f78ae55daa03e58a58870" + integrity sha512-9Iq1UaBHA5ZzNv6j3govuKGXrbrjuWvZIgWNJv4xzXlDMHu9P9hnqlBr/Aiay54WwCuTVNhTzAUTfFgnTs2kbQ== + dependencies: + "@ai-sdk/provider" "0.0.14" + "@ai-sdk/provider-utils" "1.0.5" + +"@ai-sdk/provider-utils@1.0.5": + version "1.0.5" + resolved "https://registry.yarnpkg.com/@ai-sdk/provider-utils/-/provider-utils-1.0.5.tgz#765c60871019ded104d79b4cea0805ba563bb5aa" + integrity sha512-XfOawxk95X3S43arn2iQIFyWGMi0DTxsf9ETc6t7bh91RPWOOPYN1tsmS5MTKD33OGJeaDQ/gnVRzXUCRBrckQ== + dependencies: + "@ai-sdk/provider" "0.0.14" + eventsource-parser "1.1.2" + nanoid "3.3.6" + secure-json-parse "2.7.0" + +"@ai-sdk/provider@0.0.14": + version "0.0.14" + resolved "https://registry.yarnpkg.com/@ai-sdk/provider/-/provider-0.0.14.tgz#a07569c39a8828aa8312cf1ac6f35ce6ee1b2fce" + integrity sha512-gaQ5Y033nro9iX1YUjEDFDRhmMcEiCk56LJdIUbX5ozEiCNCfpiBpEqrjSp/Gp5RzBS2W0BVxfG7UGW6Ezcrzg== + dependencies: + json-schema "0.4.0" + +"@ai-sdk/react@0.0.30": + version "0.0.30" + resolved "https://registry.yarnpkg.com/@ai-sdk/react/-/react-0.0.30.tgz#51d586141a81d7f9b76798922b206e8c6faf04dc" + integrity sha512-VnHYRzwhiM4bZdL9DXwJltN8Qnz1MkFdRTa1y7KdmHSJ18ebCNWmPO5XJhnZiQdEXHYmrzZ3WiVt2X6pxK07FA== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + swr "2.2.5" + +"@ai-sdk/solid@0.0.23": + version "0.0.23" + resolved "https://registry.yarnpkg.com/@ai-sdk/solid/-/solid-0.0.23.tgz#712cf1a02bfc337806c5c1b486d16252bec57a15" + integrity sha512-GMojG2PsqwnOGfx7C1MyQPzPBIlC44qn3ykjp9OVnN2Fu47mcFp3QM6gwWoHwNqi7FQDjRy+s/p+8EqYIQcAwg== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + +"@ai-sdk/svelte@0.0.24": + version "0.0.24" + resolved "https://registry.yarnpkg.com/@ai-sdk/svelte/-/svelte-0.0.24.tgz#2519b84a0c104c82d5e48d3b8e9350e9dd4af6cf" + integrity sha512-ZjzzvfYLE01VTO0rOZf6z9sTGhJhe6IYZMxQiM3P+zemufRYe57NDcLYEb6h+2qhvU6Z+k/Q+Nh/spAt0JzGUg== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + sswr "2.1.0" + +"@ai-sdk/ui-utils@0.0.20": + version "0.0.20" + resolved "https://registry.yarnpkg.com/@ai-sdk/ui-utils/-/ui-utils-0.0.20.tgz#c68968185a7cc33f7d98d13999731e1c7b672cbb" + integrity sha512-6MRWigzXfuxUcAYEFMLP6cLbALJkg12Iz1Sl+wuPMpB6aw7di2ePiTuNakFUYjgP7TNsW4UxzpypBqqJ1KNB0A== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + secure-json-parse "2.7.0" + +"@ai-sdk/vue@0.0.24": + version "0.0.24" + resolved "https://registry.yarnpkg.com/@ai-sdk/vue/-/vue-0.0.24.tgz#2e72f7e755850ed51540f9a7b25dc6b228a8647a" + integrity sha512-0S+2dVSui6LFgaWoFx+3h5R7GIP9MxdJo63tFuLvgyKr2jmpo5S5kGcWl95vNdzKDqaesAXfOnky+tn5A2d49A== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + swrv "1.0.4" + "@ampproject/remapping@^2.2.0": version "2.2.1" resolved "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz" @@ -1300,49 +1368,55 @@ "@jridgewell/resolve-uri" "3.1.0" "@jridgewell/sourcemap-codec" "1.4.14" -"@langchain/core@0.2.0", "@langchain/core@>0.1.0 <0.3.0", "@langchain/core@>0.1.56 <0.3.0", "@langchain/core@^0.1.61", "@langchain/core@^0.2.0", "@langchain/core@~0.2.0": - version "0.2.0" - resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.2.0.tgz#19c6374a5ad80daf8e14cb58582bc988109a1403" - integrity sha512-UbCJUp9eh2JXd9AW/vhPbTgtZoMgTqJgSan5Wf/EP27X8JM65lWdCOpJW+gHyBXvabbyrZz3/EGaptTUL5gutw== +"@langchain/core@^0.3.1": + version "0.3.1" + resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.3.1.tgz#f06206809575b2a95eaef609b3273842223c0786" + integrity sha512-xYdTAgS9hYPt+h0/OwpyRcMB5HKR40LXutbSr2jw3hMVIOwD1DnvhnUEnWgBK4lumulVW2jrosNPyBKMhRZAZg== dependencies: ansi-styles "^5.0.0" camelcase "6" decamelize "1.2.0" js-tiktoken "^1.0.12" - langsmith "~0.1.7" - ml-distance "^4.0.0" + langsmith "^0.1.56-rc.1" mustache "^4.2.0" p-queue "^6.6.2" p-retry "4" - uuid "^9.0.0" + uuid "^10.0.0" zod "^3.22.4" zod-to-json-schema "^3.22.3" -"@langchain/langgraph@^0.0.19": - version "0.0.19" - resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.0.19.tgz#c1cfeee7d0e2b91dd31cba7144f8a7283babc61d" - integrity sha512-V0t40qbwUyzEpL3Q0jHPVTVljdLc3YJCHIF9Q+sw9HRWwfBO1nWJHHbCxgVzeJ2NsX1X/dUyNkq8LbSEsTYpTQ== +"@langchain/langgraph-checkpoint@~0.0.6": + version "0.0.6" + resolved "https://registry.yarnpkg.com/@langchain/langgraph-checkpoint/-/langgraph-checkpoint-0.0.6.tgz#69f0c5c9aeefd48dcf0fa1ffa0744d8139a9f27d" + integrity sha512-hQsznlUMFKyOCaN9VtqNSSemfKATujNy5ePM6NX7lruk/Mmi2t7R9SsBnf9G2Yts+IaIwv3vJJaAFYEHfqbc5g== + dependencies: + uuid "^10.0.0" + +"@langchain/langgraph@^0.2.3": + version "0.2.3" + resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.2.3.tgz#34072f68536706a42c7fb978f1ab5373c058e2f5" + integrity sha512-agBa79dgKk08B3gNE9+SSLYLmlhBwMaCPsME5BlIFJjs2j2lDnSgKtUfQ9nE4e3Q51L9AA4DjIxmxJiQtS3GOw== dependencies: - "@langchain/core" "^0.1.61" - uuid "^9.0.1" + "@langchain/langgraph-checkpoint" "~0.0.6" + double-ended-queue "^2.1.0-0" + uuid "^10.0.0" + zod "^3.23.8" -"@langchain/openai@~0.0.28": - version "0.0.33" - resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.0.33.tgz#af88d815ff0095018c879d3a1a5a32b2795b5c69" - integrity sha512-hTBo9y9bHtFvMT5ySBW7TrmKhLSA91iNahigeqAFBVrLmBDz+6rzzLFc1mpq6JEAR3fZKdaUXqso3nB23jIpTw== +"@langchain/openai@>=0.1.0 <0.4.0", "@langchain/openai@^0.3.0": + version "0.3.0" + resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.3.0.tgz#89329ab9350187269a471dac2c2f4fca5f1fc5a3" + integrity sha512-yXrz5Qn3t9nq3NQAH2l4zZOI4ev2CFdLC5kvmi5SdW4bggRuM40SXTUAY3VRld4I5eocYfk82VbrlA+6dvN5EA== dependencies: - "@langchain/core" ">0.1.56 <0.3.0" js-tiktoken "^1.0.12" - openai "^4.41.1" + openai "^4.57.3" zod "^3.22.4" zod-to-json-schema "^3.22.3" -"@langchain/textsplitters@~0.0.0": - version "0.0.2" - resolved "https://registry.yarnpkg.com/@langchain/textsplitters/-/textsplitters-0.0.2.tgz#500baa8341fb7fc86fca531a4192665a319504a3" - integrity sha512-6bQOuYHTGYlkgPY/8M5WPq4nnXZpEysGzRopQCYjg2WLcEoIPUMMrXsAaNNdvU3BOeMrhin8izvpDPD165hX6Q== +"@langchain/textsplitters@>=0.0.0 <0.2.0": + version "0.1.0" + resolved "https://registry.yarnpkg.com/@langchain/textsplitters/-/textsplitters-0.1.0.tgz#f37620992192df09ecda3dfbd545b36a6bcbae46" + integrity sha512-djI4uw9rlkAb5iMhtLED+xJebDdAG935AdP4eRTB02R7OB/act55Bj9wsskhZsvuyQRpO4O1wQOp85s6T6GWmw== dependencies: - "@langchain/core" ">0.1.0 <0.3.0" js-tiktoken "^1.0.12" "@nodelib/fs.scandir@2.1.5": @@ -1366,6 +1440,11 @@ "@nodelib/fs.scandir" "2.1.5" fastq "^1.6.0" +"@opentelemetry/api@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/api/-/api-1.9.0.tgz#d03eba68273dc0f7509e2a3d5cba21eae10379fe" + integrity sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg== + "@sinclair/typebox@^0.25.16": version "0.25.24" resolved "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz" @@ -1443,6 +1522,11 @@ dependencies: "@babel/types" "^7.20.7" +"@types/diff-match-patch@^1.0.36": + version "1.0.36" + resolved "https://registry.yarnpkg.com/@types/diff-match-patch/-/diff-match-patch-1.0.36.tgz#dcef10a69d357fe9d43ac4ff2eca6b85dbf466af" + integrity sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg== + "@types/graceful-fs@^4.1.3": version "4.1.6" resolved "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.6.tgz" @@ -1487,18 +1571,6 @@ resolved "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz" integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ== -"@types/lodash.set@^4.3.9": - version "4.3.9" - resolved "https://registry.yarnpkg.com/@types/lodash.set/-/lodash.set-4.3.9.tgz#55d95bce407b42c6655f29b2d0811fd428e698f0" - integrity sha512-KOxyNkZpbaggVmqbpr82N2tDVTx05/3/j0f50Es1prxrWB0XYf9p3QNxqcbWb7P1Q9wlvsUSlCFnwlPCIJ46PQ== - dependencies: - "@types/lodash" "*" - -"@types/lodash@*": - version "4.17.4" - resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.17.4.tgz#0303b64958ee070059e3a7184048a55159fe20b7" - integrity sha512-wYCP26ZLxaT3R39kiN2+HcJ4kTd3U1waI/cY7ivWYqFP6pW3ZNpvi6Wd6PHZx7T/t8z0vlkXMg3QYLa7DZ/IJQ== - "@types/node-fetch@^2.6.4": version "2.6.11" resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.11.tgz#9b39b78665dae0e82a08f02f4967d62c66f95d24" @@ -1524,6 +1596,11 @@ resolved "https://registry.npmjs.org/@types/prettier/-/prettier-2.7.2.tgz" integrity sha512-KufADq8uQqo1pYKVIYzfKbJfBAc0sOeXqGbFaSpv8MRmC/zXgowNZmFcbngndGk922QDmOASEXUZCaY48gs4cg== +"@types/qs@^6.9.15": + version "6.9.16" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.16.tgz#52bba125a07c0482d26747d5d4947a64daf8f794" + integrity sha512-7i+zxXdPD0T4cKDuxCUXJ4wHcsJLwENa6Z3dCu8cfCK743OGy5Nu1RmAGqDPsoTDINVEcdXKRvR/zre+P2Ku1A== + "@types/retry@0.12.0": version "0.12.0" resolved "https://registry.yarnpkg.com/@types/retry/-/retry-0.12.0.tgz#2b35eccfcee7d38cd72ad99232fbd58bffb3c84d" @@ -1539,10 +1616,10 @@ resolved "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz" integrity sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw== -"@types/uuid@^9.0.1": - version "9.0.1" - resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.1.tgz#98586dc36aee8dacc98cc396dbca8d0429647aa6" - integrity sha512-rFT3ak0/2trgvp4yYZo5iKFEPsET7vKydKF+VRCxlQ9bpheehyAJH89dAkaLEq/j/RZXJIqcgsmPJKUP1Z28HA== +"@types/uuid@^10.0.0": + version "10.0.0" + resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-10.0.0.tgz#e9c07fe50da0f53dc24970cca94d619ff03f6f6d" + integrity sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ== "@types/yargs-parser@*": version "21.0.0" @@ -1669,6 +1746,26 @@ agentkeepalive@^4.2.1: dependencies: humanize-ms "^1.2.1" +ai@^3.2.37: + version "3.2.37" + resolved "https://registry.yarnpkg.com/ai/-/ai-3.2.37.tgz#148ed3124e6b0a01c703597471718520ef1c498d" + integrity sha512-waqKYZOE1zJwKEHx69R4v/xNG0a1o0He8TDgX29hUu36Zk0yrBJoVSlXbC9KoFuxW4eRpt+gZv1kqd1nVc1CGg== + dependencies: + "@ai-sdk/provider" "0.0.14" + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/react" "0.0.30" + "@ai-sdk/solid" "0.0.23" + "@ai-sdk/svelte" "0.0.24" + "@ai-sdk/ui-utils" "0.0.20" + "@ai-sdk/vue" "0.0.24" + "@opentelemetry/api" "1.9.0" + eventsource-parser "1.1.2" + json-schema "0.4.0" + jsondiffpatch "0.6.0" + nanoid "3.3.6" + secure-json-parse "2.7.0" + zod-to-json-schema "3.22.5" + ajv@^6.10.0, ajv@^6.12.4: version "6.12.6" resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz" @@ -1883,16 +1980,6 @@ base64-js@^1.5.1: resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== -binary-extensions@^2.2.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522" - integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw== - -binary-search@^1.3.5: - version "1.3.6" - resolved "https://registry.yarnpkg.com/binary-search/-/binary-search-1.3.6.tgz#e32426016a0c5092f0f3598836a1c7da3560565c" - integrity sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA== - brace-expansion@^1.1.7: version "1.1.11" resolved "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz" @@ -1901,7 +1988,7 @@ brace-expansion@^1.1.7: balanced-match "^1.0.0" concat-map "0.0.1" -braces@^3.0.2: +braces@^3.0.3: version "3.0.3" resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== @@ -1945,6 +2032,17 @@ call-bind@^1.0.0, call-bind@^1.0.2: function-bind "^1.1.1" get-intrinsic "^1.0.2" +call-bind@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" + integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + set-function-length "^1.2.1" + callsites@^3.0.0: version "3.1.0" resolved "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz" @@ -1982,6 +2080,11 @@ chalk@^4.0.0: ansi-styles "^4.1.0" supports-color "^7.1.0" +chalk@^5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.3.0.tgz#67c20a7ebef70e7f3970a01f90fa210cb6860385" + integrity sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w== + char-regex@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz" @@ -1997,6 +2100,11 @@ cjs-module-lexer@^1.0.0: resolved "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.2.tgz" integrity sha512-cOU9usZw8/dXIXKtwa8pM0OTJQuJkxMN6w30csNRUerHfeQ5R6U3kkU/FtJeIf3M202OHfY2U8ccInBG7/xogA== +client-only@^0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/client-only/-/client-only-0.0.1.tgz#38bba5d403c41ab150bff64a95c85013cf73bca1" + integrity sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA== + cliui@^8.0.1: version "8.0.1" resolved "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz" @@ -2129,6 +2237,15 @@ deepmerge@^4.2.2: resolved "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz" integrity sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A== +define-data-property@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" + integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + gopd "^1.0.1" + define-properties@^1.1.3, define-properties@^1.1.4, define-properties@^1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/define-properties/-/define-properties-1.2.0.tgz" @@ -2147,6 +2264,11 @@ detect-newline@^3.0.0: resolved "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz" integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA== +diff-match-patch@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/diff-match-patch/-/diff-match-patch-1.0.5.tgz#abb584d5f10cd1196dfc55aa03701592ae3f7b37" + integrity sha512-IayShXAgj/QMXgB0IWmKx+rOPuGMhqm5w6jvFxmVenXKIzRqTAAsbBPT3kWQeGANj3jGgvcvv4yK6SxqYmikgw== + diff-sequences@^29.4.3: version "29.4.3" resolved "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.4.3.tgz" @@ -2183,6 +2305,11 @@ dotenv@^16.1.3: resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.1.3.tgz#0c67e90d0ddb48d08c570888f709b41844928210" integrity sha512-FYssxsmCTtKL72fGBSvb1K9dRz0/VZeWqFme/vSb7r7323x4CRaHu4LvQ5JG3+s6yt2YPbBrkpiEODktfyjI9A== +double-ended-queue@^2.1.0-0: + version "2.1.0-0" + resolved "https://registry.yarnpkg.com/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz#103d3527fd31528f40188130c841efdd78264e5c" + integrity sha512-+BNfZ+deCo8hMNpDqDnvT+c0XpJ5cUa6mqYq89bho2Ifze4URTqRkcwR399hWoTrTkbZ/XJYDgP6rc7pRgffEQ== + electron-to-chromium@^1.4.411: version "1.4.414" resolved "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.414.tgz" @@ -2245,6 +2372,18 @@ es-abstract@^1.19.0, es-abstract@^1.20.4: unbox-primitive "^1.0.2" which-typed-array "^1.1.9" +es-define-property@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845" + integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ== + dependencies: + get-intrinsic "^1.2.4" + +es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + es-set-tostringtag@^2.0.1: version "2.0.1" resolved "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz" @@ -2463,6 +2602,11 @@ eventemitter3@^4.0.4: resolved "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz" integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== +eventsource-parser@1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/eventsource-parser/-/eventsource-parser-1.1.2.tgz#ed6154a4e3dbe7cda9278e5e35d2ffc58b309f89" + integrity sha512-v0eOBUbiaFojBu2s2NPBfYUoRR9GjcDNvCXVaqEf5vVfpIAh9f8RCo4vXTP8c63QRKCFwoLpMpTdPwwhEKVgzA== + execa@^5.0.0: version "5.1.1" resolved "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz" @@ -2626,6 +2770,11 @@ function-bind@^1.1.1: resolved "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz" integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== +function-bind@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" + integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== + function.prototype.name@^1.1.5: version "1.1.5" resolved "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz" @@ -2661,6 +2810,17 @@ get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@ has-proto "^1.0.1" has-symbols "^1.0.3" +get-intrinsic@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" + integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + has-proto "^1.0.1" + has-symbols "^1.0.3" + hasown "^2.0.0" + get-package-type@^0.1.0: version "0.1.0" resolved "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz" @@ -2780,6 +2940,13 @@ has-property-descriptors@^1.0.0: dependencies: get-intrinsic "^1.1.1" +has-property-descriptors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" + integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg== + dependencies: + es-define-property "^1.0.0" + has-proto@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz" @@ -2804,6 +2971,13 @@ has@^1.0.3: dependencies: function-bind "^1.1.1" +hasown@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== + dependencies: + function-bind "^1.1.2" + html-escaper@^2.0.0: version "2.0.2" resolved "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz" @@ -2869,11 +3043,6 @@ internal-slot@^1.0.5: has "^1.0.3" side-channel "^1.0.4" -is-any-array@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/is-any-array/-/is-any-array-2.0.1.tgz#9233242a9c098220290aa2ec28f82ca7fa79899e" - integrity sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ== - is-array-buffer@^3.0.1, is-array-buffer@^3.0.2: version "3.0.2" resolved "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.2.tgz" @@ -3473,6 +3642,11 @@ json-schema-traverse@^0.4.1: resolved "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz" integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== +json-schema@0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.4.0.tgz#f7de4cf6efab838ebaeb3236474cbba5a1930ab5" + integrity sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA== + json-stable-stringify-without-jsonify@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz" @@ -3490,6 +3664,15 @@ json5@^2.2.2, json5@^2.2.3: resolved "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz" integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== +jsondiffpatch@0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/jsondiffpatch/-/jsondiffpatch-0.6.0.tgz#daa6a25bedf0830974c81545568d5f671c82551f" + integrity sha512-3QItJOXp2AP1uv7waBkao5nCvhEv+QmJAd38Ybq7wNI74Q+BBmnLn4EDKz6yI9xGAIQoUF87qHt+kc1IVxB4zQ== + dependencies: + "@types/diff-match-patch" "^1.0.36" + chalk "^5.3.0" + diff-match-patch "^1.0.5" + jsonpointer@^5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-5.0.1.tgz#2110e0af0900fd37467b5907ecd13a7884a1b559" @@ -3500,43 +3683,35 @@ kleur@^3.0.3: resolved "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== -langchain@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/langchain/-/langchain-0.2.0.tgz#555d84538962720cd7223f6c3ca4bd060978ebf3" - integrity sha512-8c7Dg9OIPk4lFIQGyfOytXbUGLLSsxs9MV53cLODspkOGzaUpwy5FGBie30SrOxIEFJo+FDaJgpDAFO3Xi4NMw== +langchain@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/langchain/-/langchain-0.3.2.tgz#aec3e679d3d6c36f469448380affa475c92fbd86" + integrity sha512-kd2kz1cS/PIVrLEDFlrZsAasQfPLbY1UqCZbRKa3/QcpB33/n6xPDvXSMfBuKhvNj0bjW6MXDR9HZTduXjJBgg== dependencies: - "@langchain/core" "~0.2.0" - "@langchain/openai" "~0.0.28" - "@langchain/textsplitters" "~0.0.0" - binary-extensions "^2.2.0" + "@langchain/openai" ">=0.1.0 <0.4.0" + "@langchain/textsplitters" ">=0.0.0 <0.2.0" js-tiktoken "^1.0.12" js-yaml "^4.1.0" jsonpointer "^5.0.1" - langchainhub "~0.0.8" - langsmith "~0.1.7" - ml-distance "^4.0.0" + langsmith "^0.1.56-rc.1" openapi-types "^12.1.3" p-retry "4" - uuid "^9.0.0" + uuid "^10.0.0" yaml "^2.2.1" zod "^3.22.4" zod-to-json-schema "^3.22.3" -langchainhub@~0.0.8: - version "0.0.10" - resolved "https://registry.yarnpkg.com/langchainhub/-/langchainhub-0.0.10.tgz#7579440a3255d67571b7046f3910593c5664f064" - integrity sha512-mOVso7TGTMSlvTTUR1b4zUIMtu8zgie/pcwRm1SeooWwuHYMQovoNXjT6gEjvWEZ6cjt4gVH+1lu2tp1/phyIQ== - -langsmith@~0.1.7: - version "0.1.25" - resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.25.tgz#3d06b6fc62abb1a6fc16540d40ddb48bd795f128" - integrity sha512-Hft4Y1yoMgFgCUXVQklRZ7ndmLQ/6FmRZE9P3u5BRdMq5Fa0hpg8R7jd7bLLBXkAjqcFvWo0AGhpb8MMY5FAiA== +langsmith@^0.1.56-rc.1: + version "0.1.56-rc.1" + resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.56-rc.1.tgz#20900ff0dee51baea359c6f16a4acc260f07fbb7" + integrity sha512-XsOxlhBAlTCGR9hNEL2VSREmiz8v6czNuX3CIwec9fH9T0WbNPle8Q/7Jy/h9UCbS9vuzTjfgc4qO5Dc9cu5Ig== dependencies: - "@types/uuid" "^9.0.1" + "@types/uuid" "^10.0.0" commander "^10.0.1" p-queue "^6.6.2" p-retry "4" - uuid "^9.0.0" + semver "^7.6.3" + uuid "^10.0.0" leven@^3.1.0: version "3.1.0" @@ -3585,11 +3760,6 @@ lodash.merge@^4.6.2: resolved "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz" integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== -lodash.set@^4.3.2: - version "4.3.2" - resolved "https://registry.yarnpkg.com/lodash.set/-/lodash.set-4.3.2.tgz#d8757b1da807dde24816b0d6a84bea1a76230b23" - integrity sha512-4hNPN5jlm/N/HLMCO43v8BXKq9Z7QdAGc/VGrRD61w8gN9g/6jF9A4L1pbUgBLCffi0w9VsXfTOij5x8iTyFvg== - lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz" @@ -3634,11 +3804,11 @@ merge2@^1.3.0, merge2@^1.4.1: integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== micromatch@^4.0.4: - version "4.0.5" - resolved "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz" - integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + version "4.0.8" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== dependencies: - braces "^3.0.2" + braces "^3.0.3" picomatch "^2.3.1" mime-db@1.52.0: @@ -3670,42 +3840,6 @@ minimist@^1.2.0, minimist@^1.2.6: resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== -ml-array-mean@^1.1.6: - version "1.1.6" - resolved "https://registry.yarnpkg.com/ml-array-mean/-/ml-array-mean-1.1.6.tgz#d951a700dc8e3a17b3e0a583c2c64abd0c619c56" - integrity sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ== - dependencies: - ml-array-sum "^1.1.6" - -ml-array-sum@^1.1.6: - version "1.1.6" - resolved "https://registry.yarnpkg.com/ml-array-sum/-/ml-array-sum-1.1.6.tgz#d1d89c20793cd29c37b09d40e85681aa4515a955" - integrity sha512-29mAh2GwH7ZmiRnup4UyibQZB9+ZLyMShvt4cH4eTK+cL2oEMIZFnSyB3SS8MlsTh6q/w/yh48KmqLxmovN4Dw== - dependencies: - is-any-array "^2.0.0" - -ml-distance-euclidean@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ml-distance-euclidean/-/ml-distance-euclidean-2.0.0.tgz#3a668d236649d1b8fec96380b9435c6f42c9a817" - integrity sha512-yC9/2o8QF0A3m/0IXqCTXCzz2pNEzvmcE/9HFKOZGnTjatvBbsn4lWYJkxENkA4Ug2fnYl7PXQxnPi21sgMy/Q== - -ml-distance@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/ml-distance/-/ml-distance-4.0.1.tgz#4741d17a1735888c5388823762271dfe604bd019" - integrity sha512-feZ5ziXs01zhyFUUUeZV5hwc0f5JW0Sh0ckU1koZe/wdVkJdGxcP06KNQuF0WBTj8FttQUzcvQcpcrOp/XrlEw== - dependencies: - ml-array-mean "^1.1.6" - ml-distance-euclidean "^2.0.0" - ml-tree-similarity "^1.0.0" - -ml-tree-similarity@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/ml-tree-similarity/-/ml-tree-similarity-1.0.0.tgz#24705a107e32829e24d945e87219e892159c53f0" - integrity sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg== - dependencies: - binary-search "^1.3.5" - num-sort "^2.0.0" - ms@2.1.2: version "2.1.2" resolved "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz" @@ -3721,6 +3855,11 @@ mustache@^4.2.0: resolved "https://registry.yarnpkg.com/mustache/-/mustache-4.2.0.tgz#e5892324d60a12ec9c2a73359edca52972bf6f64" integrity sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ== +nanoid@3.3.6: + version "3.3.6" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.6.tgz#443380c856d6e9f9824267d960b4236ad583ea4c" + integrity sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA== + natural-compare-lite@^1.4.0: version "1.4.0" resolved "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz" @@ -3765,16 +3904,16 @@ npm-run-path@^4.0.1: dependencies: path-key "^3.0.0" -num-sort@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/num-sort/-/num-sort-2.1.0.tgz#1cbb37aed071329fdf41151258bc011898577a9b" - integrity sha512-1MQz1Ed8z2yckoBeSfkQHHO9K1yDRxxtotKSJ9yvcTUUxSvfvzEq5GwBrjjHEpMlq/k5gvXdmJ1SbYxWtpNoVg== - object-inspect@^1.12.3, object-inspect@^1.9.0: version "1.12.3" resolved "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz" integrity sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g== +object-inspect@^1.13.1: + version "1.13.2" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff" + integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g== + object-keys@^1.1.1: version "1.1.1" resolved "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz" @@ -3813,10 +3952,10 @@ onetime@^5.1.2: dependencies: mimic-fn "^2.1.0" -openai@^4.38.5, openai@^4.41.1: - version "4.47.1" - resolved "https://registry.yarnpkg.com/openai/-/openai-4.47.1.tgz#1d23c7a8eb3d7bcdc69709cd905f4c9af0181dba" - integrity sha512-WWSxhC/69ZhYWxH/OBsLEirIjUcfpQ5+ihkXKp06hmeYXgBBIUCa9IptMzYx6NdkiOCsSGYCnTIsxaic3AjRCQ== +openai@^4.38.5: + version "4.52.7" + resolved "https://registry.yarnpkg.com/openai/-/openai-4.52.7.tgz#e32b000142287a9e8eda8512ba28df33d11ec1f1" + integrity sha512-dgxA6UZHary6NXUHEDj5TWt8ogv0+ibH+b4pT5RrWMjiRZVylNwLcw/2ubDrX5n0oUmHX/ZgudMJeemxzOvz7A== dependencies: "@types/node" "^18.11.18" "@types/node-fetch" "^2.6.4" @@ -3827,6 +3966,21 @@ openai@^4.38.5, openai@^4.41.1: node-fetch "^2.6.7" web-streams-polyfill "^3.2.1" +openai@^4.57.3: + version "4.61.1" + resolved "https://registry.yarnpkg.com/openai/-/openai-4.61.1.tgz#1fe2fa231b6de54fad32785528d7628dbbf68ab4" + integrity sha512-jZ2WRn+f4QWZkYnrUS+xzEUIBllsGN75dUCaXmMIHcv2W9yn7O8amaReTbGHCNEYkL43vuDOcxPUWfNPUmoD3Q== + dependencies: + "@types/node" "^18.11.18" + "@types/node-fetch" "^2.6.4" + "@types/qs" "^6.9.15" + abort-controller "^3.0.0" + agentkeepalive "^4.2.1" + form-data-encoder "1.7.2" + formdata-node "^4.3.2" + node-fetch "^2.6.7" + qs "^6.10.3" + openapi-types@^12.1.3: version "12.1.3" resolved "https://registry.yarnpkg.com/openapi-types/-/openapi-types-12.1.3.tgz#471995eb26c4b97b7bd356aacf7b91b73e777dd3" @@ -4013,6 +4167,13 @@ pure-rand@^6.0.0: resolved "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.2.tgz" integrity sha512-6Yg0ekpKICSjPswYOuC5sku/TSWaRYlA0qsXqJgM/d/4pLPHPuTxK7Nbf7jFKzAeedUhR8C7K9Uv63FBsSo8xQ== +qs@^6.10.3: + version "6.13.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" + integrity sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg== + dependencies: + side-channel "^1.0.6" + queue-microtask@^1.2.2: version "1.2.3" resolved "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz" @@ -4144,6 +4305,11 @@ safe-regex-test@^1.0.0: get-intrinsic "^1.1.3" is-regex "^1.1.4" +secure-json-parse@2.7.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/secure-json-parse/-/secure-json-parse-2.7.0.tgz#5a5f9cd6ae47df23dba3151edd06855d47e09862" + integrity sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw== + semver@7.x, semver@^7.3.5, semver@^7.3.7: version "7.5.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" @@ -4156,6 +4322,23 @@ semver@^6.0.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== +semver@^7.6.3: + version "7.6.3" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" + integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== + +set-function-length@^1.2.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" + integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg== + dependencies: + define-data-property "^1.1.4" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + gopd "^1.0.1" + has-property-descriptors "^1.0.2" + shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz" @@ -4177,6 +4360,16 @@ side-channel@^1.0.4: get-intrinsic "^1.0.2" object-inspect "^1.9.0" +side-channel@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2" + integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" + object-inspect "^1.13.1" + signal-exit@^3.0.3, signal-exit@^3.0.7: version "3.0.7" resolved "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz" @@ -4210,6 +4403,13 @@ sprintf-js@~1.0.2: resolved "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz" integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== +sswr@2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/sswr/-/sswr-2.1.0.tgz#1eb64cd647cc9e11f871e7f43554abd8c64e1103" + integrity sha512-Cqc355SYlTAaUt8iDPaC/4DPPXK925PePLMxyBKuWd5kKc5mwsG3nT9+Mq2tyguL5s7b4Jg+IRMpTRsNTAfpSQ== + dependencies: + swrev "^4.0.0" + stack-utils@^2.0.3: version "2.0.6" resolved "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz" @@ -4314,6 +4514,24 @@ supports-preserve-symlinks-flag@^1.0.0: resolved "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz" integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== +swr@2.2.5: + version "2.2.5" + resolved "https://registry.yarnpkg.com/swr/-/swr-2.2.5.tgz#063eea0e9939f947227d5ca760cc53696f46446b" + integrity sha512-QtxqyclFeAsxEUeZIYmsaQ0UjimSq1RZ9Un7I68/0ClKK/U3LoyQunwkQfJZr2fc22DfIXLNDc2wFyTEikCUpg== + dependencies: + client-only "^0.0.1" + use-sync-external-store "^1.2.0" + +swrev@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/swrev/-/swrev-4.0.0.tgz#83da6983c7ef9d71ac984a9b169fc197cbf18ff8" + integrity sha512-LqVcOHSB4cPGgitD1riJ1Hh4vdmITOp+BkmfmXRh4hSF/t7EnS4iD+SOTmq7w5pPm/SiPeto4ADbKS6dHUDWFA== + +swrv@1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/swrv/-/swrv-1.0.4.tgz#278b4811ed4acbb1ae46654972a482fd1847e480" + integrity sha512-zjEkcP8Ywmj+xOJW3lIT65ciY/4AL4e/Or7Gj0MzU3zBJNMdJiT8geVZhINavnlHRMMCcJLHhraLTAiDOTmQ9g== + test-exclude@^6.0.0: version "6.0.0" resolved "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz" @@ -4494,10 +4712,15 @@ uri-js@^4.2.2: dependencies: punycode "^2.1.0" -uuid@^9.0.0, uuid@^9.0.1: - version "9.0.1" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" - integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA== +use-sync-external-store@^1.2.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/use-sync-external-store/-/use-sync-external-store-1.2.2.tgz#c3b6390f3a30eba13200d2302dcdf1e7b57b2ef9" + integrity sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw== + +uuid@^10.0.0: + version "10.0.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-10.0.0.tgz#5a95aa454e6e002725c79055fd42aaba30ca6294" + integrity sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ== v8-compile-cache-lib@^3.0.1: version "3.0.1" @@ -4648,6 +4871,11 @@ yocto-queue@^0.1.0: resolved "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== +zod-to-json-schema@3.22.5: + version "3.22.5" + resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.22.5.tgz#3646e81cfc318dbad2a22519e5ce661615418673" + integrity sha512-+akaPo6a0zpVCCseDed504KBJUQpEW5QZw7RMneNmKw+fGaML1Z9tUNLnHHAC8x6dzVRO1eB2oEMyZRnuBZg7Q== + zod-to-json-schema@^3.22.3: version "3.22.4" resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.22.4.tgz#f8cc691f6043e9084375e85fb1f76ebafe253d70" @@ -4657,3 +4885,8 @@ zod@^3.22.4: version "3.22.4" resolved "https://registry.yarnpkg.com/zod/-/zod-3.22.4.tgz#f31c3a9386f61b1f228af56faa9255e845cf3fff" integrity sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg== + +zod@^3.23.8: + version "3.23.8" + resolved "https://registry.yarnpkg.com/zod/-/zod-3.23.8.tgz#e37b957b5d52079769fb8097099b592f0ef4067d" + integrity sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g== diff --git a/python/Makefile b/python/Makefile index c8646ed56..4ef65ad35 100644 --- a/python/Makefile +++ b/python/Makefile @@ -1,7 +1,20 @@ -.PHONY: tests lint format build publish doctest integration_tests integration_tests_fast evals +.PHONY: tests lint format build publish doctest integration_tests integration_tests_fast evals benchmark benchmark-fast + + +OUTPUT ?= out/benchmark.json + +benchmark: + mkdir -p out + rm -f $(OUTPUT) + poetry run python -m bench -o $(OUTPUT) --rigorous + +benchmark-fast: + mkdir -p out + rm -f $(OUTPUT) + poetry run python -m bench -o $(OUTPUT) --fast tests: - poetry run python -m pytest --disable-socket --allow-unix-socket -n auto --durations=10 tests/unit_tests + PYTHONDEVMODE=1 PYTHONASYNCIODEBUG=1 poetry run python -m pytest --disable-socket --allow-unix-socket -n auto --durations=10 tests/unit_tests tests_watch: poetry run ptw --now . -- -vv -x tests/unit_tests @@ -20,7 +33,7 @@ evals: lint: poetry run ruff check . - poetry run mypy . + poetry run mypy langsmith poetry run black . --check format: diff --git a/python/README.md b/python/README.md index 97fbfb296..85de1e11a 100644 --- a/python/README.md +++ b/python/README.md @@ -70,6 +70,7 @@ Tracing can be activated by setting the following environment variables or by ma import os os.environ["LANGSMITH_TRACING_V2"] = "true" os.environ["LANGSMITH_ENDPOINT"] = "https://api.smith.langchain.com" +# os.environ["LANGSMITH_ENDPOINT"] = "https://eu.api.smith.langchain.com" # If signed up in the EU region os.environ["LANGSMITH_API_KEY"] = "" # os.environ["LANGSMITH_PROJECT"] = "My Project Name" # Optional: "default" is used if not set ``` diff --git a/python/bench/__init__.py b/python/bench/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/bench/__main__.py b/python/bench/__main__.py new file mode 100644 index 000000000..c3e206af9 --- /dev/null +++ b/python/bench/__main__.py @@ -0,0 +1,76 @@ +from pyperf._runner import Runner + +from bench.create_run_tree import create_run_trees +from bench.dumps_json import ( + DeeplyNestedModel, + DeeplyNestedModelV1, + create_nested_instance, +) +from langsmith.client import _dumps_json + + +class MyClass: + def __init__(self): + self.vals = {} + + +benchmarks = ( + ( + "create_5_000_run_trees", + create_run_trees, + 5_000, + ), + ( + "create_10_000_run_trees", + create_run_trees, + 10_000, + ), + ( + "create_20_000_run_trees", + create_run_trees, + 10_000, + ), + ( + "dumps_class_nested_py_branch_and_leaf_200x150", + lambda x: _dumps_json({"input": x}), + create_nested_instance( + 200, 150, branch_constructor=MyClass, leaf_constructor=MyClass + ), + ), + ( + "dumps_class_nested_py_leaf_50x100", + lambda x: _dumps_json({"input": x}), + create_nested_instance(50, 100, leaf_constructor=MyClass), + ), + ( + "dumps_class_nested_py_leaf_200x400", + lambda x: _dumps_json({"input": x}), + create_nested_instance(200, 400, leaf_constructor=MyClass), + ), + ( + "dumps_dataclass_nested_200x150", + lambda x: _dumps_json({"input": x}), + create_nested_instance(200, 150), + ), + ( + "dumps_pydantic_nested_200x400", + lambda x: _dumps_json({"input": x}), + create_nested_instance(200, 400, branch_constructor=DeeplyNestedModel), + ), + ( + "dumps_pydantic_nested_50x100", + lambda x: _dumps_json({"input": x}), + create_nested_instance(50, 100, branch_constructor=DeeplyNestedModel), + ), + ( + "dumps_pydanticv1_nested_200x150", + lambda x: _dumps_json({"input": x}), + create_nested_instance(200, 150, branch_constructor=DeeplyNestedModelV1), + ), +) + + +r = Runner() + +for name, fn, input_ in benchmarks: + r.bench_func(name, fn, input_) diff --git a/python/bench/create_run_tree.py b/python/bench/create_run_tree.py new file mode 100644 index 000000000..29cc84f44 --- /dev/null +++ b/python/bench/create_run_tree.py @@ -0,0 +1,9 @@ +from unittest.mock import patch + +from langsmith import RunTree + + +def create_run_trees(N: int): + with patch("langsmith.client.requests.Session", autospec=True): + for i in range(N): + RunTree(name=str(i)).post() diff --git a/python/bench/dumps_json.py b/python/bench/dumps_json.py new file mode 100644 index 000000000..9937fc062 --- /dev/null +++ b/python/bench/dumps_json.py @@ -0,0 +1,84 @@ +import uuid +from dataclasses import dataclass, field +from datetime import datetime +from decimal import Decimal +from typing import Any, Callable, Dict, Optional + +import numpy as np +from pydantic import BaseModel, Field +from pydantic.v1 import BaseModel as BaseModelV1 +from pydantic.v1 import Field as FieldV1 + + +def _default(): + return { + "some_val": "😈", + "uuid_val": uuid.uuid4(), + "datetime_val": datetime.now(), + "list_val": [238928376271863487] * 5, + "decimal_val": Decimal("3.14"), + "set_val": {1, 2, 3}, + "tuple_val": (4, 5, 6), + "bytes_val": b"hello world", + "arr": np.random.random(10), + } + + +@dataclass +class DeeplyNested: + """An object.""" + + vals: Dict[str, Any] = field(default_factory=_default) + + +class DeeplyNestedModel(BaseModel): + vals: Dict[str, Any] = Field(default_factory=_default) + + +class DeeplyNestedModelV1(BaseModelV1): + vals: Dict[str, Any] = FieldV1(default_factory=_default) + + +def create_nested_instance( + depth: int = 5, + width: int = 5, + branch_constructor: Optional[Callable] = DeeplyNested, + leaf_constructor: Optional[Callable] = None, +) -> DeeplyNested: + top_level = DeeplyNested() + current_level = top_level + root_constructor = leaf_constructor or DeeplyNested + for i in range(depth): + for j in range(width): + key = f"key_{i}_{j}" + if i < depth - 1: + value = branch_constructor() + current_level.vals[key] = value + if j == 0: + next_level = value + else: + current_level.vals[key] = root_constructor() + + if i < depth - 1: + current_level = next_level + return top_level + + +if __name__ == "__main__": + import time + + from langsmith.client import _dumps_json + + class MyClass: + def __init__(self): + self.vals = {} + + def run(): + res = create_nested_instance(200, 150, leaf_constructor=MyClass) + start_time = time.time() + res = _dumps_json({"input": res}) + end_time = time.time() + print(f"Size: {len(res) / 1024:.2f} KB") + print(f"Time taken: {end_time - start_time:.2f} seconds") + + run() diff --git a/python/docs/.gitignore b/python/docs/.gitignore new file mode 100644 index 000000000..ac2deeb04 --- /dev/null +++ b/python/docs/.gitignore @@ -0,0 +1,4 @@ +_build/ +langsmith/ +index.rst + diff --git a/python/docs/.python-version b/python/docs/.python-version new file mode 100644 index 000000000..2c0733315 --- /dev/null +++ b/python/docs/.python-version @@ -0,0 +1 @@ +3.11 diff --git a/python/docs/Makefile b/python/docs/Makefile new file mode 100644 index 000000000..7ac449a0d --- /dev/null +++ b/python/docs/Makefile @@ -0,0 +1,34 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= -j auto +SPHINXBUILD ?= sphinx-build +SPHINXAUTOBUILD ?= sphinx-autobuild +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile +# Generate API reference RST files +generate-api-rst: + python ./create_api_rst.py + +# Combined target to generate API RST and build HTML +api-docs: generate-api-rst build-html + +.PHONY: generate-api-rst build-html api-docs + +clobber: clean + rm -rf langsmith + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @echo "SOURCEDIR: $(SOURCEDIR)" + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + diff --git a/python/docs/_extensions/gallery_directive.py b/python/docs/_extensions/gallery_directive.py new file mode 100644 index 000000000..80642c545 --- /dev/null +++ b/python/docs/_extensions/gallery_directive.py @@ -0,0 +1,144 @@ +"""A directive to generate a gallery of images from structured data. + +Generating a gallery of images that are all the same size is a common +pattern in documentation, and this can be cumbersome if the gallery is +generated programmatically. This directive wraps this particular use-case +in a helper-directive to generate it with a single YAML configuration file. + +It currently exists for maintainers of the pydata-sphinx-theme, +but might be abstracted into a standalone package if it proves useful. +""" + +from pathlib import Path +from typing import Any, ClassVar, Dict, List + +from docutils import nodes +from docutils.parsers.rst import directives +from sphinx.application import Sphinx +from sphinx.util import logging +from sphinx.util.docutils import SphinxDirective +from yaml import safe_load + +logger = logging.getLogger(__name__) + + +TEMPLATE_GRID = """ +`````{{grid}} {columns} +{options} + +{content} + +````` +""" + +GRID_CARD = """ +````{{grid-item-card}} {title} +{options} + +{content} +```` +""" + + +class GalleryGridDirective(SphinxDirective): + """A directive to show a gallery of images and links in a Bootstrap grid. + + The grid can be generated from a YAML file that contains a list of items, or + from the content of the directive (also formatted in YAML). Use the parameter + "class-card" to add an additional CSS class to all cards. When specifying the grid + items, you can use all parameters from "grid-item-card" directive to customize + individual cards + ["image", "header", "content", "title"]. + + Danger: + This directive can only be used in the context of a Myst documentation page as + the templates use Markdown flavored formatting. + """ + + name = "gallery-grid" + has_content = True + required_arguments = 0 + optional_arguments = 1 + final_argument_whitespace = True + option_spec: ClassVar[dict[str, Any]] = { + # A class to be added to the resulting container + "grid-columns": directives.unchanged, + "class-container": directives.unchanged, + "class-card": directives.unchanged, + } + + def run(self) -> List[nodes.Node]: + """Create the gallery grid.""" + if self.arguments: + # If an argument is given, assume it's a path to a YAML file + # Parse it and load it into the directive content + path_data_rel = Path(self.arguments[0]) + path_doc, _ = self.get_source_info() + path_doc = Path(path_doc).parent + path_data = (path_doc / path_data_rel).resolve() + if not path_data.exists(): + logger.info(f"Could not find grid data at {path_data}.") + nodes.text("No grid data found at {path_data}.") + return + yaml_string = path_data.read_text() + else: + yaml_string = "\n".join(self.content) + + # Use all the element with an img-bottom key as sites to show + # and generate a card item for each of them + grid_items = [] + for item in safe_load(yaml_string): + # remove parameters that are not needed for the card options + title = item.pop("title", "") + + # build the content of the card using some extra parameters + header = f"{item.pop('header')} \n^^^ \n" if "header" in item else "" + image = f"![image]({item.pop('image')}) \n" if "image" in item else "" + content = f"{item.pop('content')} \n" if "content" in item else "" + + # optional parameter that influence all cards + if "class-card" in self.options: + item["class-card"] = self.options["class-card"] + + loc_options_str = "\n".join(f":{k}: {v}" for k, v in item.items()) + " \n" + + card = GRID_CARD.format( + options=loc_options_str, content=header + image + content, title=title + ) + grid_items.append(card) + + # Parse the template with Sphinx Design to create an output container + # Prep the options for the template grid + class_ = "gallery-directive" + f' {self.options.get("class-container", "")}' + options = {"gutter": 2, "class-container": class_} + options_str = "\n".join(f":{k}: {v}" for k, v in options.items()) + + # Create the directive string for the grid + grid_directive = TEMPLATE_GRID.format( + columns=self.options.get("grid-columns", "1 2 3 4"), + options=options_str, + content="\n".join(grid_items), + ) + + # Parse content as a directive so Sphinx Design processes it + container = nodes.container() + self.state.nested_parse([grid_directive], 0, container) + + # Sphinx Design outputs a container too, so just use that + return [container.children[0]] + + +def setup(app: Sphinx) -> Dict[str, Any]: + """Add custom configuration to sphinx app. + + Args: + app: the Sphinx application + + Returns: + the 2 parallel parameters set to ``True``. + """ + app.add_directive("gallery-grid", GalleryGridDirective) + + return { + "parallel_read_safe": True, + "parallel_write_safe": True, + } diff --git a/python/docs/_static/css/custom.css b/python/docs/_static/css/custom.css new file mode 100644 index 000000000..87195de8f --- /dev/null +++ b/python/docs/_static/css/custom.css @@ -0,0 +1,411 @@ +@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;700&display=swap'); + +/******************************************************************************* +* master color map. Only the colors that actually differ between light and dark +* themes are specified separately. +* +* To see the full list of colors see https://www.figma.com/file/rUrrHGhUBBIAAjQ82x6pz9/PyData-Design-system---proposal-for-implementation-(2)?node-id=1234%3A765&t=ifcFT1JtnrSshGfi-1 +*/ +/** +* Function to get items from nested maps +*/ +/* Assign base colors for the PyData theme */ +:root { + --pst-teal-50: #f4fbfc; + --pst-teal-100: #e9f6f8; + --pst-teal-200: #d0ecf1; + --pst-teal-300: #abdde6; + --pst-teal-400: #3fb1c5; + --pst-teal-500: #0a7d91; + --pst-teal-600: #085d6c; + --pst-teal-700: #064752; + --pst-teal-800: #042c33; + --pst-teal-900: #021b1f; + --pst-violet-50: #f4eefb; + --pst-violet-100: #e0c7ff; + --pst-violet-200: #d5b4fd; + --pst-violet-300: #b780ff; + --pst-violet-400: #9c5ffd; + --pst-violet-500: #8045e5; + --pst-violet-600: #6432bd; + --pst-violet-700: #4b258f; + --pst-violet-800: #341a61; + --pst-violet-900: #1e0e39; + --pst-gray-50: #f9f9fa; + --pst-gray-100: #f3f4f5; + --pst-gray-200: #e5e7ea; + --pst-gray-300: #d1d5da; + --pst-gray-400: #9ca4af; + --pst-gray-500: #677384; + --pst-gray-600: #48566b; + --pst-gray-700: #29313d; + --pst-gray-800: #222832; + --pst-gray-900: #14181e; + --pst-pink-50: #fcf8fd; + --pst-pink-100: #fcf0fa; + --pst-pink-200: #f8dff5; + --pst-pink-300: #f3c7ee; + --pst-pink-400: #e47fd7; + --pst-pink-500: #c132af; + --pst-pink-600: #912583; + --pst-pink-700: #6e1c64; + --pst-pink-800: #46123f; + --pst-pink-900: #2b0b27; + --pst-foundation-white: #ffffff; + --pst-foundation-black: #14181e; + --pst-green-10: #f1fdfd; + --pst-green-50: #E0F7F6; + --pst-green-100: #B3E8E6; + --pst-green-200: #80D6D3; + --pst-green-300: #4DC4C0; + --pst-green-400: #4FB2AD; + --pst-green-500: #287977; + --pst-green-600: #246161; + --pst-green-700: #204F4F; + --pst-green-800: #1C3C3C; + --pst-green-900: #0D2427; + --pst-lilac-50: #f4eefb; + --pst-lilac-100: #DAD6FE; + --pst-lilac-200: #BCB2FD; + --pst-lilac-300: #9F8BFA; + --pst-lilac-400: #7F5CF6; + --pst-lilac-500: #6F3AED; + --pst-lilac-600: #6028D9; + --pst-lilac-700: #5021B6; + --pst-lilac-800: #431D95; + --pst-lilac-900: #1e0e39; + --pst-header-height: 2.5rem; +} + +html { + --pst-font-family-base: 'Inter'; + --pst-font-family-heading: 'Inter Tight', sans-serif; +} + +/******************************************************************************* +* write the color rules for each theme (light/dark) +*/ +/* NOTE: + * Mixins enable us to reuse the same definitions for the different modes + * https://sass-lang.com/documentation/at-rules/mixin + * something inserts a variable into a CSS selector or property name + * https://sass-lang.com/documentation/interpolation + */ +/* Defaults to light mode if data-theme is not set */ +html:not([data-theme]) { + --pst-color-primary: #287977; + --pst-color-primary-bg: #80D6D3; + --pst-color-secondary: #6F3AED; + --pst-color-secondary-bg: #DAD6FE; + --pst-color-accent: #c132af; + --pst-color-accent-bg: #f8dff5; + --pst-color-info: #276be9; + --pst-color-info-bg: #dce7fc; + --pst-color-warning: #f66a0a; + --pst-color-warning-bg: #f8e3d0; + --pst-color-success: #00843f; + --pst-color-success-bg: #d6ece1; + --pst-color-attention: var(--pst-color-warning); + --pst-color-attention-bg: var(--pst-color-warning-bg); + --pst-color-danger: #d72d47; + --pst-color-danger-bg: #f9e1e4; + --pst-color-text-base: #222832; + --pst-color-text-muted: #48566b; + --pst-color-heading-color: #ffffff; + --pst-color-shadow: rgba(0, 0, 0, 0.1); + --pst-color-border: #d1d5da; + --pst-color-border-muted: rgba(23, 23, 26, 0.2); + --pst-color-inline-code: #912583; + --pst-color-inline-code-links: #246161; + --pst-color-target: #f3cf95; + --pst-color-background: #ffffff; + --pst-color-on-background: #F4F9F8; + --pst-color-surface: #F4F9F8; + --pst-color-on-surface: #222832; +} +html:not([data-theme]) { + --pst-color-link: var(--pst-color-primary); + --pst-color-link-hover: var(--pst-color-secondary); +} +html:not([data-theme]) .only-dark, +html:not([data-theme]) .only-dark ~ figcaption { + display: none !important; +} + +/* NOTE: @each {...} is like a for-loop + * https://sass-lang.com/documentation/at-rules/control/each + */ +html[data-theme=light] { + --pst-color-primary: #287977; + --pst-color-primary-bg: #80D6D3; + --pst-color-secondary: #6F3AED; + --pst-color-secondary-bg: #DAD6FE; + --pst-color-accent: #c132af; + --pst-color-accent-bg: #f8dff5; + --pst-color-info: #276be9; + --pst-color-info-bg: #dce7fc; + --pst-color-warning: #f66a0a; + --pst-color-warning-bg: #f8e3d0; + --pst-color-success: #00843f; + --pst-color-success-bg: #d6ece1; + --pst-color-attention: var(--pst-color-warning); + --pst-color-attention-bg: var(--pst-color-warning-bg); + --pst-color-danger: #d72d47; + --pst-color-danger-bg: #f9e1e4; + --pst-color-text-base: #222832; + --pst-color-text-muted: #48566b; + --pst-color-heading-color: #ffffff; + --pst-color-shadow: rgba(0, 0, 0, 0.1); + --pst-color-border: #d1d5da; + --pst-color-border-muted: rgba(23, 23, 26, 0.2); + --pst-color-inline-code: #912583; + --pst-color-inline-code-links: #246161; + --pst-color-target: #f3cf95; + --pst-color-background: #ffffff; + --pst-color-on-background: #F4F9F8; + --pst-color-surface: #F4F9F8; + --pst-color-on-surface: #222832; + color-scheme: light; +} +html[data-theme=light] { + --pst-color-link: var(--pst-color-primary); + --pst-color-link-hover: var(--pst-color-secondary); +} +html[data-theme=light] .only-dark, +html[data-theme=light] .only-dark ~ figcaption { + display: none !important; +} + +html[data-theme=dark] { + --pst-color-primary: #4FB2AD; + --pst-color-primary-bg: #1C3C3C; + --pst-color-secondary: #7F5CF6; + --pst-color-secondary-bg: #431D95; + --pst-color-accent: #e47fd7; + --pst-color-accent-bg: #46123f; + --pst-color-info: #79a3f2; + --pst-color-info-bg: #06245d; + --pst-color-warning: #ff9245; + --pst-color-warning-bg: #652a02; + --pst-color-success: #5fb488; + --pst-color-success-bg: #002f17; + --pst-color-attention: var(--pst-color-warning); + --pst-color-attention-bg: var(--pst-color-warning-bg); + --pst-color-danger: #e78894; + --pst-color-danger-bg: #4e111b; + --pst-color-text-base: #ced6dd; + --pst-color-text-muted: #9ca4af; + --pst-color-heading-color: #14181e; + --pst-color-shadow: rgba(0, 0, 0, 0.2); + --pst-color-border: #48566b; + --pst-color-border-muted: #29313d; + --pst-color-inline-code: #f3c7ee; + --pst-color-inline-code-links: #4FB2AD; + --pst-color-target: #675c04; + --pst-color-background: #14181e; + --pst-color-on-background: #222832; + --pst-color-surface: #29313d; + --pst-color-on-surface: #f3f4f5; + /* Adjust images in dark mode (unless they have class .only-dark or + * .dark-light, in which case assume they're already optimized for dark + * mode). + */ + /* Give images a light background in dark mode in case they have + * transparency and black text (unless they have class .only-dark or .dark-light, in + * which case assume they're already optimized for dark mode). + */ + color-scheme: dark; +} +html[data-theme=dark] { + --pst-color-link: var(--pst-color-primary); + --pst-color-link-hover: var(--pst-color-secondary); +} +html[data-theme=dark] .only-light, +html[data-theme=dark] .only-light ~ figcaption { + display: none !important; +} +html[data-theme=dark] img:not(.only-dark):not(.dark-light) { + filter: brightness(0.8) contrast(1.2); +} +html[data-theme=dark] .bd-content img:not(.only-dark):not(.dark-light) { + background: rgb(255, 255, 255); + border-radius: 0.25rem; +} +html[data-theme=dark] .MathJax_SVG * { + fill: var(--pst-color-text-base); +} + +.pst-color-primary { + color: var(--pst-color-primary); +} + +.pst-color-secondary { + color: var(--pst-color-secondary); +} + +.pst-color-accent { + color: var(--pst-color-accent); +} + +.pst-color-info { + color: var(--pst-color-info); +} + +.pst-color-warning { + color: var(--pst-color-warning); +} + +.pst-color-success { + color: var(--pst-color-success); +} + +.pst-color-attention { + color: var(--pst-color-attention); +} + +.pst-color-danger { + color: var(--pst-color-danger); +} + +.pst-color-text-base { + color: var(--pst-color-text-base); +} + +.pst-color-text-muted { + color: var(--pst-color-text-muted); +} + +.pst-color-heading-color { + color: var(--pst-color-heading-color); +} + +.pst-color-shadow { + color: var(--pst-color-shadow); +} + +.pst-color-border { + color: var(--pst-color-border); +} + +.pst-color-border-muted { + color: var(--pst-color-border-muted); +} + +.pst-color-inline-code { + color: var(--pst-color-inline-code); +} + +.pst-color-inline-code-links { + color: var(--pst-color-inline-code-links); +} + +.pst-color-target { + color: var(--pst-color-target); +} + +.pst-color-background { + color: var(--pst-color-background); +} + +.pst-color-on-background { + color: var(--pst-color-on-background); +} + +.pst-color-surface { + color: var(--pst-color-surface); +} + +.pst-color-on-surface { + color: var(--pst-color-on-surface); +} + + + +/* Adjust the height of the navbar */ +.bd-header .bd-header__inner{ + height: 52px; /* Adjust this value as needed */ +} + +.navbar-nav > li > a { + line-height: 52px; /* Vertically center the navbar links */ +} + +/* Make sure the navbar items align properly */ +.navbar-nav { + display: flex; +} + + +.bd-header .navbar-header-items__start{ + margin-left: 0rem +} + +.bd-header button.primary-toggle { + margin-right: 0rem; +} + +.bd-header ul.navbar-nav .dropdown .dropdown-menu { + overflow-y: auto; /* Enable vertical scrolling */ + max-height: 80vh +} + +.bd-sidebar-primary { + width: 22%; /* Adjust this value to your preference */ + line-height: 1.4; +} + +.bd-sidebar-secondary { + line-height: 1.4; +} + +.toc-entry a.nav-link, .toc-entry a>code { + background-color: transparent; + border-color: transparent; +} + +.bd-sidebar-primary code{ + background-color: transparent; + border-color: transparent; +} + + +.toctree-wrapper li[class^=toctree-l1]>a { + font-size: 1.3em +} + +.toctree-wrapper li[class^=toctree-l1] { + margin-bottom: 2em; +} + +.toctree-wrapper li[class^=toctree-l]>ul { + margin-top: 0.5em; + font-size: 0.9em; +} + +*, :after, :before { + font-style: normal; +} + +div.deprecated { + margin-top: 0.5em; + margin-bottom: 2em; +} + +.admonition-beta.admonition, div.admonition-beta.admonition { + border-color: var(--pst-color-warning); + margin-top:0.5em; + margin-bottom: 2em; +} + +.admonition-beta>.admonition-title, div.admonition-beta>.admonition-title { + background-color: var(--pst-color-warning-bg); +} + +dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd { + margin-left: 1rem; +} + +p { + font-size: 0.9rem; + margin-bottom: 0.5rem; +} \ No newline at end of file diff --git a/python/docs/_static/img/brand/favicon.png b/python/docs/_static/img/brand/favicon.png new file mode 100644 index 000000000..e0335bcb6 Binary files /dev/null and b/python/docs/_static/img/brand/favicon.png differ diff --git a/python/docs/_static/wordmark-api-dark.svg b/python/docs/_static/wordmark-api-dark.svg new file mode 100644 index 000000000..c33ec9253 --- /dev/null +++ b/python/docs/_static/wordmark-api-dark.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/python/docs/_static/wordmark-api.svg b/python/docs/_static/wordmark-api.svg new file mode 100644 index 000000000..a9f8f59db --- /dev/null +++ b/python/docs/_static/wordmark-api.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/python/docs/conf.py b/python/docs/conf.py new file mode 100644 index 000000000..dae18242c --- /dev/null +++ b/python/docs/conf.py @@ -0,0 +1,261 @@ +"""Configuration file for the Sphinx documentation builder.""" + +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +import os +import sys +from pathlib import Path + +import toml +from docutils import nodes +from docutils.parsers.rst.directives.admonitions import BaseAdmonition +from docutils.statemachine import StringList +from sphinx.util.docutils import SphinxDirective + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +_DIR = Path(__file__).parent.absolute() +sys.path.insert(0, os.path.abspath(".")) +sys.path.insert(0, os.path.abspath("../python")) + +with (_DIR.parent / "pyproject.toml").open("r") as f: + data = toml.load(f) + + +class ExampleLinksDirective(SphinxDirective): + """Directive to generate a list of links to examples. + + We have a script that extracts links to API reference docs + from our notebook examples. This directive uses that information + to backlink to the examples from the API reference docs. + """ + + has_content = False + required_arguments = 1 + + def run(self): + """Run the directive. + + Called any time :example_links:`ClassName` is used + in the template *.rst files. + """ + class_or_func_name = self.arguments[0] + links = {} + list_node = nodes.bullet_list() + for doc_name, link in sorted(links.items()): + item_node = nodes.list_item() + para_node = nodes.paragraph() + link_node = nodes.reference() + link_node["refuri"] = link + link_node.append(nodes.Text(doc_name)) + para_node.append(link_node) + item_node.append(para_node) + list_node.append(item_node) + if list_node.children: + title_node = nodes.rubric() + title_node.append(nodes.Text(f"Examples using {class_or_func_name}")) + return [title_node, list_node] + return [list_node] + + +class Beta(BaseAdmonition): + required_arguments = 0 + node_class = nodes.admonition + + def run(self): + self.content = self.content or StringList( + [ + ( + "This feature is in beta. It is actively being worked on, so the " + "API may change." + ) + ] + ) + self.arguments = self.arguments or ["Beta"] + return super().run() + + +def setup(app): + app.add_directive("example_links", ExampleLinksDirective) + app.add_directive("beta", Beta) + + +# -- Project information ----------------------------------------------------- + +project = "🦜️🛠️ LangSmith" +copyright = "2024, LangChain Inc" +author = "LangChain, Inc" + +html_favicon = "_static/img/brand/favicon.png" +html_last_updated_fmt = "%b %d, %Y" + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autodoc.typehints", + "sphinx.ext.autosummary", + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", + "sphinxcontrib.autodoc_pydantic", + "IPython.sphinxext.ipython_console_highlighting", + "myst_parser", + "_extensions.gallery_directive", + "sphinx_design", + "sphinx_copybutton", +] +source_suffix = [".rst", ".md"] + +# some autodoc pydantic options are repeated in the actual template. +# potentially user error, but there may be bugs in the sphinx extension +# with options not being passed through correctly (from either the location in the code) +autodoc_pydantic_model_show_json = False +autodoc_pydantic_field_list_validators = False +autodoc_pydantic_config_members = False +autodoc_pydantic_model_show_config_summary = False +autodoc_pydantic_model_show_validator_members = False +autodoc_pydantic_model_show_validator_summary = False +autodoc_pydantic_model_signature_prefix = "class" +autodoc_pydantic_field_signature_prefix = "param" +autodoc_member_order = "groupwise" +autoclass_content = "both" +autodoc_typehints_format = "short" +autodoc_typehints = "both" + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["templates"] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +# The theme to use for HTML and HTML Help pages. +html_theme = "pydata_sphinx_theme" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + # # -- General configuration ------------------------------------------------ + "sidebar_includehidden": True, + "use_edit_page_button": False, + # # "analytics": { + # # "plausible_analytics_domain": "scikit-learn.org", + # # "plausible_analytics_url": "https://views.scientific-python.org/js/script.js", + # # }, + # # If "prev-next" is included in article_footer_items, then setting show_prev_next + # # to True would repeat prev and next links. See + # # https://github.com/pydata/pydata-sphinx-theme/blob/b731dc230bc26a3d1d1bb039c56c977a9b3d25d8/src/pydata_sphinx_theme/theme/pydata_sphinx_theme/layout.html#L118-L129 + "show_prev_next": False, + "search_bar_text": "Search", + "navigation_with_keys": True, + "collapse_navigation": True, + "navigation_depth": 3, + "show_nav_level": 1, + "show_toc_level": 3, + "navbar_align": "left", + "header_links_before_dropdown": 5, + "header_dropdown_text": "Modules", + "logo": { + "image_light": "_static/wordmark-api.svg", + "image_dark": "_static/wordmark-api-dark.svg", + }, + "surface_warnings": True, + # # -- Template placement in theme layouts ---------------------------------- + "navbar_start": ["navbar-logo"], + # # Note that the alignment of navbar_center is controlled by navbar_align + "navbar_center": ["navbar-nav"], + "navbar_end": ["langsmith_docs", "theme-switcher", "navbar-icon-links"], + # # navbar_persistent is persistent right (even when on mobiles) + "navbar_persistent": ["search-field"], + "article_header_start": ["breadcrumbs"], + "article_header_end": [], + "article_footer_items": [], + "content_footer_items": [], + # # Use html_sidebars that map page patterns to list of sidebar templates + # "primary_sidebar_end": [], + "footer_start": ["copyright"], + "footer_center": [], + "footer_end": [], + # # When specified as a dictionary, the keys should follow glob-style patterns, as in + # # https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-exclude_patterns + # # In particular, "**" specifies the default for all pages + # # Use :html_theme.sidebar_secondary.remove: for file-wide removal + # "secondary_sidebar_items": {"**": ["page-toc", "sourcelink"]}, + # "show_version_warning_banner": True, + # "announcement": None, + "icon_links": [ + { + # Label for this link + "name": "GitHub", + # URL where the link will redirect + "url": "https://github.com/langchain-ai/langsmith-sdk", # required + # Icon class (if "type": "fontawesome"), or path to local image (if "type": "local") + "icon": "fa-brands fa-square-github", + # The type of image to be used (see below for details) + "type": "fontawesome", + }, + { + "name": "X / Twitter", + "url": "https://twitter.com/langchainai", + "icon": "fab fa-twitter-square", + }, + ], + "icon_links_label": "Quick Links", + "external_links": [], +} + + +html_context = { + "display_github": True, # Integrate GitHub + "github_user": "langchain-ai", # Username + "github_repo": "langsmith-sdk", # Repo name + "github_version": "master", # Version + "conf_py_path": "/docs/api_reference", # Path in the checkout to the docs root +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# These paths are either relative to html_static_path +# or fully qualified paths (e.g. https://...) +html_css_files = ["css/custom.css"] +html_use_index = False + +myst_enable_extensions = ["colon_fence"] + +# generate autosummary even if no references +autosummary_generate = True + +html_copy_source = False +html_show_sourcelink = False + +# Set canonical URL from the Read the Docs Domain +html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "") + +# Tell Jinja2 templates the build is running on Read the Docs +if os.environ.get("READTHEDOCS", "") == "True": + html_context["READTHEDOCS"] = True + +master_doc = "index" diff --git a/python/docs/create_api_rst.py b/python/docs/create_api_rst.py new file mode 100644 index 000000000..253352767 --- /dev/null +++ b/python/docs/create_api_rst.py @@ -0,0 +1,384 @@ +"""Script for auto-generating api_reference.rst.""" + +import importlib +import inspect +import logging +import os +import sys +from enum import Enum +from pathlib import Path +from typing import Dict, List, Literal, Sequence, TypedDict, Union + +import toml +from pydantic import BaseModel + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +ROOT_DIR = Path(__file__).parents[1].absolute() +HERE = Path(__file__).parent +sys.path.insert(0, os.path.abspath(".")) +sys.path.insert(0, os.path.abspath("../")) + +PACKAGE_DIR = ROOT_DIR / "langsmith" +ClassKind = Literal["TypedDict", "Regular", "Pydantic", "enum"] + + +class ClassInfo(TypedDict): + name: str + qualified_name: str + kind: ClassKind + is_public: bool + is_deprecated: bool + + +class FunctionInfo(TypedDict): + name: str + qualified_name: str + is_public: bool + is_deprecated: bool + + +class ModuleMembers(TypedDict): + classes_: Sequence[ClassInfo] + functions: Sequence[FunctionInfo] + + +_EXCLUDED_NAMES = { + "close_session", + "convert_prompt_to_anthropic_format", + "convert_prompt_to_openai_format", + "BaseMessageLike", + "TracingQueueItem", + "filter_logs", + "StringEvaluator", + "LLMEvaluator", + "ensure_traceable", + "RunLikeDict", + "RunTypeEnum", + "is_traceable_function", + "is_async", + "get_run_tree_context", + "as_runnable", + "SupportsLangsmithExtra", + "get_tracing_context", +} + +_EXCLUDED_MODULES = {"cli"} + +_INCLUDED_UTILS = { + "ContextThreadPoolExecutor", + "LangSmithAPIError", + "LangSmithAuthError", + "LangSmithConflictError", + "LangSmithConnectionError", + "LangSmithError", + "LangSmithMissingAPIKeyWarning", + "LangSmithNotFoundError", + "LangSmithRateLimitError", + "LangSmithRetry", + "LangSmithUserError", + "LangSmithWarning", +} + + +def _load_module_members(module_path: str, namespace: str) -> ModuleMembers: + classes_: List[ClassInfo] = [] + functions: List[FunctionInfo] = [] + module = importlib.import_module(module_path) + for name, type_ in inspect.getmembers(module): + if "evaluation" in module_path: + print(module_path, name) + if ( + not hasattr(type_, "__module__") + or type_.__module__ != module_path + or name in _EXCLUDED_NAMES + or (module_path.endswith("utils") and name not in _INCLUDED_UTILS) + ): + logger.info(f"Excluding {module_path}.{name}") + continue + + if inspect.isclass(type_): + kind: ClassKind = ( + "TypedDict" + if type(type_).__name__ in ("_TypedDictMeta", "_TypedDictMeta") + else ( + "enum" + if issubclass(type_, Enum) + else "Pydantic" if issubclass(type_, BaseModel) else "Regular" + ) + ) + if hasattr(type_, "__slots__"): + for func_name, func_type in inspect.getmembers(type_): + if inspect.isfunction(func_type): + functions.append( + FunctionInfo( + name=func_name, + qualified_name=f"{namespace}.{name}.{func_name}", + is_public=not func_name.startswith("_"), + is_deprecated=".. deprecated::" + in (func_type.__doc__ or ""), + ) + ) + classes_.append( + ClassInfo( + name=name, + qualified_name=f"{namespace}.{name}", + kind=kind, + is_public=not name.startswith("_"), + is_deprecated=".. deprecated::" in (type_.__doc__ or ""), + ) + ) + elif inspect.isfunction(type_): + functions.append( + FunctionInfo( + name=name, + qualified_name=f"{namespace}.{name}", + is_public=not name.startswith("_"), + is_deprecated=".. deprecated::" in (type_.__doc__ or ""), + ) + ) + + return ModuleMembers(classes_=classes_, functions=functions) + + +def _load_package_modules( + package_directory: Union[str, Path], +) -> Dict[str, ModuleMembers]: + package_path = Path(package_directory) + modules_by_namespace = {} + package_name = package_path.name + + for file_path in package_path.rglob("*.py"): + if file_path.name.startswith("_") or any( + part.startswith("_") for part in file_path.relative_to(package_path).parts + ): + if file_path.name not in { + "_runner.py", + "_arunner.py", + "_testing.py", + "_expect.py", + }: + continue + + namespace = ( + str(file_path.relative_to(package_path)) + .replace(".py", "") + .replace("/", ".") + ) + top_namespace = namespace.split(".")[0] + if top_namespace in _EXCLUDED_MODULES: + logger.info(f"Excluding module {top_namespace}") + continue + + try: + module_members = _load_module_members( + f"{package_name}.{namespace}", namespace + ) + if top_namespace in modules_by_namespace: + existing = modules_by_namespace[top_namespace] + modules_by_namespace[top_namespace] = ModuleMembers( + classes_=existing["classes_"] + module_members["classes_"], + functions=existing["functions"] + module_members["functions"], + ) + else: + modules_by_namespace[top_namespace] = module_members + except ImportError as e: + print(f"Error: Unable to import module '{namespace}' with error: {e}") + + return modules_by_namespace + + +module_order = [ + "client", + "async_client", + "evaluation", + "run_helpers", + "run_trees", + "schemas", + "utils", + "anonymizer", +] + + +def _construct_doc( + package_namespace: str, + members_by_namespace: Dict[str, ModuleMembers], + package_version: str, +) -> List[tuple[str, str]]: + docs = [] + index_doc = f"""\ +:html_theme.sidebar_secondary.remove: + +.. currentmodule:: {package_namespace} + +.. _{package_namespace}: + +{package_namespace.replace('_', '-')}: {package_version} +{'=' * (len(package_namespace) + len(package_version) + 2)} + +.. automodule:: {package_namespace} + :no-members: + :no-inherited-members: + +.. toctree:: + :maxdepth: 2 + +""" + + def _priority(mod: str): + if mod in module_order: + return module_order.index(mod) + print(mod, "not in ", module_order) + return len(module_order) + hash(mod) + + for module in sorted(members_by_namespace, key=lambda x: _priority(x)): + index_doc += f" {module}\n" + module_doc = f"""\ +.. currentmodule:: {package_namespace} + +.. _{package_namespace}_{module}: + +:mod:`{module}` +{'=' * (len(module) + 7)} + +.. automodule:: {package_namespace}.{module} + :no-members: + :no-inherited-members: + +""" + _members = members_by_namespace[module] + classes = [ + el + for el in _members["classes_"] + if el["is_public"] and not el["is_deprecated"] + ] + functions = [ + el + for el in _members["functions"] + if el["is_public"] and not el["is_deprecated"] + ] + deprecated_classes = [ + el for el in _members["classes_"] if el["is_public"] and el["is_deprecated"] + ] + deprecated_functions = [ + el + for el in _members["functions"] + if el["is_public"] and el["is_deprecated"] + ] + + if classes: + module_doc += f"""\ +**Classes** + +.. currentmodule:: {package_namespace} + +.. autosummary:: + :toctree: {module} +""" + for class_ in sorted(classes, key=lambda c: c["qualified_name"]): + template = ( + "typeddict.rst" + if class_["kind"] == "TypedDict" + else ( + "enum.rst" + if class_["kind"] == "enum" + else ( + "pydantic.rst" + if class_["kind"] == "Pydantic" + else "class.rst" + ) + ) + ) + module_doc += f"""\ + :template: {template} + + {class_["qualified_name"]} + +""" + + if functions: + qualnames = "\n ".join(sorted(f["qualified_name"] for f in functions)) + module_doc += f"""**Functions** + +.. currentmodule:: {package_namespace} + +.. autosummary:: + :toctree: {module} + :template: function.rst + + {qualnames} + +""" + + if deprecated_classes: + module_doc += f"""**Deprecated classes** + +.. currentmodule:: {package_namespace} + +.. autosummary:: + :toctree: {module} +""" + for class_ in sorted(deprecated_classes, key=lambda c: c["qualified_name"]): + template = ( + "typeddict.rst" + if class_["kind"] == "TypedDict" + else ( + "enum.rst" + if class_["kind"] == "enum" + else ( + "pydantic.rst" + if class_["kind"] == "Pydantic" + else "class.rst" + ) + ) + ) + module_doc += f""" :template: {template} + + {class_["qualified_name"]} + +""" + + if deprecated_functions: + qualnames = "\n ".join( + sorted(f["qualified_name"] for f in deprecated_functions) + ) + module_doc += f"""**Deprecated functions** + +.. currentmodule:: {package_namespace} + +.. autosummary:: + :toctree: {module} + :template: function.rst + + {qualnames} + +""" + docs.append((f"{module}.rst", module_doc)) + docs.append(("index.rst", index_doc)) + return docs + + +def _get_package_version(package_dir: Path) -> str: + try: + with open(package_dir.parent / "pyproject.toml") as f: + pyproject = toml.load(f) + return pyproject["tool"]["poetry"]["version"] + except FileNotFoundError: + print(f"pyproject.toml not found in {package_dir.parent}. Aborting the build.") + sys.exit(1) + + +def main() -> None: + print("Starting to build API reference files.") + package_members = _load_package_modules(PACKAGE_DIR) + package_version = _get_package_version(PACKAGE_DIR) + rsts = _construct_doc("langsmith", package_members, package_version) + for name, rst in rsts: + with open(HERE / name, "w") as f: + f.write(rst) + print("API reference files built.") + + +if __name__ == "__main__": + main() diff --git a/python/docs/make.bat b/python/docs/make.bat new file mode 100644 index 000000000..922152e96 --- /dev/null +++ b/python/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/python/docs/requirements.txt b/python/docs/requirements.txt new file mode 100644 index 000000000..c93e13128 --- /dev/null +++ b/python/docs/requirements.txt @@ -0,0 +1,12 @@ +autodoc_pydantic>=1,<2 +sphinx<=7 +myst-parser>=3 +sphinx-autobuild>=2024 +pydata-sphinx-theme>=0.15 +toml>=0.10.2 +myst-nb>=1.1.1 +pyyaml +sphinx-design +sphinx-copybutton +beautifulsoup4 +-e python diff --git a/python/docs/scripts/custom_formatter.py b/python/docs/scripts/custom_formatter.py new file mode 100644 index 000000000..ba85484e9 --- /dev/null +++ b/python/docs/scripts/custom_formatter.py @@ -0,0 +1,41 @@ +import sys +from glob import glob +from pathlib import Path + +from bs4 import BeautifulSoup + +CUR_DIR = Path(__file__).parents[1] + + +def process_toc_h3_elements(html_content: str) -> str: + """Update Class.method() TOC headers to just method().""" + # Create a BeautifulSoup object + soup = BeautifulSoup(html_content, "html.parser") + + # Find all
  • elements with class "toc-h3" + toc_h3_elements = soup.find_all("li", class_="toc-h3") + + # Process each element + for element in toc_h3_elements: + element = element.a.code.span + # Get the text content of the element + content = element.get_text() + + # Apply the regex substitution + modified_content = content.split(".")[-1] + + # Update the element's content + element.string = modified_content + + # Return the modified HTML + return str(soup) + + +if __name__ == "__main__": + dir = sys.argv[1] + for fn in glob(str(f"{dir.rstrip('/')}/**/*.html"), recursive=True): + with open(fn) as f: + html = f.read() + processed_html = process_toc_h3_elements(html) + with open(fn, "w") as f: + f.write(processed_html) diff --git a/python/docs/templates/COPYRIGHT.txt b/python/docs/templates/COPYRIGHT.txt new file mode 100644 index 000000000..d4cc36d6b --- /dev/null +++ b/python/docs/templates/COPYRIGHT.txt @@ -0,0 +1,27 @@ +Copyright (c) 2007-2023 The scikit-learn developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/python/docs/templates/langsmith_docs.html b/python/docs/templates/langsmith_docs.html new file mode 100644 index 000000000..7e51aa56e --- /dev/null +++ b/python/docs/templates/langsmith_docs.html @@ -0,0 +1,12 @@ + + + + + +Docs + diff --git a/python/docs/templates/redirects.html b/python/docs/templates/redirects.html new file mode 100644 index 000000000..d76738f5f --- /dev/null +++ b/python/docs/templates/redirects.html @@ -0,0 +1,16 @@ +{% set redirect = pathto(redirects[pagename]) %} + + + + + + + + + + LangSmith Python SDK Reference Documentation. + + +

    You will be automatically redirected to the new location of this page.

    + + diff --git a/python/langsmith/__init__.py b/python/langsmith/__init__.py index 23f8901b4..f3a1de90a 100644 --- a/python/langsmith/__init__.py +++ b/python/langsmith/__init__.py @@ -5,6 +5,7 @@ if TYPE_CHECKING: from langsmith._expect import expect from langsmith._testing import test, unit + from langsmith.async_client import AsyncClient from langsmith.client import Client from langsmith.evaluation import aevaluate, evaluate from langsmith.evaluation.evaluator import EvaluationResult, RunEvaluator @@ -16,6 +17,9 @@ tracing_context, ) from langsmith.run_trees import RunTree + from langsmith.utils import ( + ContextThreadPoolExecutor, + ) def __getattr__(name: str) -> Any: @@ -30,6 +34,10 @@ def __getattr__(name: str) -> Any: from langsmith.client import Client return Client + elif name == "AsyncClient": + from langsmith.async_client import AsyncClient + + return AsyncClient elif name == "RunTree": from langsmith.run_trees import RunTree @@ -64,10 +72,19 @@ def __getattr__(name: str) -> Any: from langsmith.evaluation import evaluate return evaluate + + elif name == "evaluate_existing": + from langsmith.evaluation import evaluate_existing + + return evaluate_existing elif name == "aevaluate": from langsmith.evaluation import aevaluate return aevaluate + elif name == "aevaluate_existing": + from langsmith.evaluation import aevaluate_existing + + return aevaluate_existing elif name == "tracing_context": from langsmith.run_helpers import tracing_context @@ -87,6 +104,12 @@ def __getattr__(name: str) -> Any: from langsmith._testing import unit return unit + elif name == "ContextThreadPoolExecutor": + from langsmith.utils import ( + ContextThreadPoolExecutor, + ) + + return ContextThreadPoolExecutor raise AttributeError(f"module {__name__!r} has no attribute {name!r}") @@ -108,4 +131,6 @@ def __getattr__(name: str) -> Any: "tracing_context", "get_tracing_context", "get_current_run_tree", + "ContextThreadPoolExecutor", + "AsyncClient", ] diff --git a/python/langsmith/_expect.py b/python/langsmith/_expect.py index fe459e409..dabd22c38 100644 --- a/python/langsmith/_expect.py +++ b/python/langsmith/_expect.py @@ -46,7 +46,6 @@ def test_output_semantically_close(): from __future__ import annotations import atexit -import concurrent.futures import inspect from typing import ( TYPE_CHECKING, @@ -60,6 +59,7 @@ def test_output_semantically_close(): from langsmith import client as ls_client from langsmith import run_helpers as rh +from langsmith import run_trees as rt from langsmith import utils as ls_utils if TYPE_CHECKING: @@ -91,22 +91,20 @@ def __init__( client: Optional[ls_client.Client], key: str, value: Any, - _executor: Optional[concurrent.futures.ThreadPoolExecutor] = None, + _executor: Optional[ls_utils.ContextThreadPoolExecutor] = None, run_id: Optional[str] = None, ): self._client = client self.key = key self.value = value - self._executor = _executor or concurrent.futures.ThreadPoolExecutor( - max_workers=3 - ) + self._executor = _executor or ls_utils.ContextThreadPoolExecutor(max_workers=3) rt = rh.get_current_run_tree() self._run_id = rt.trace_id if rt else run_id def _submit_feedback(self, score: int, message: Optional[str] = None) -> None: if not ls_utils.test_tracking_is_disabled(): if not self._client: - self._client = ls_client.Client() + self._client = rt.get_cached_client() self._executor.submit( self._client.create_feedback, run_id=self._run_id, @@ -255,7 +253,7 @@ class _Expect: def __init__(self, *, client: Optional[ls_client.Client] = None): self._client = client - self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3) + self.executor = ls_utils.ContextThreadPoolExecutor(max_workers=3) atexit.register(self.executor.shutdown, wait=True) def embedding_distance( @@ -434,7 +432,7 @@ def _submit_feedback(self, key: str, results: dict): run_id = current_run.trace_id if current_run else None if not ls_utils.test_tracking_is_disabled(): if not self._client: - self._client = ls_client.Client() + self._client = rt.get_cached_client() self.executor.submit( self._client.create_feedback, run_id=run_id, key=key, **results ) diff --git a/python/langsmith/_internal/_aiter.py b/python/langsmith/_internal/_aiter.py index aeb9d857a..b3ff4d88a 100644 --- a/python/langsmith/_internal/_aiter.py +++ b/python/langsmith/_internal/_aiter.py @@ -6,6 +6,8 @@ """ import asyncio +import contextvars +import functools import inspect from collections import deque from typing import ( @@ -251,13 +253,20 @@ def __aiter__(self): def aiter_with_concurrency( - n: Optional[int], generator: AsyncIterator[Coroutine[None, None, T]] + n: Optional[int], + generator: AsyncIterator[Coroutine[None, None, T]], + *, + _eager_consumption_timeout: float = 0, ) -> AsyncGenerator[T, None]: """Process async generator with max parallelism. Args: n: The number of tasks to run concurrently. generator: The async generator to process. + _eager_consumption_timeout: If set, check for completed tasks after + each iteration and yield their results. This can be used to + consume the generator eagerly while still respecting the concurrency + limit. Yields: The processed items yielded by the async generator. @@ -269,27 +278,50 @@ async def consume(): yield await item return consume() - semaphore = asyncio.Semaphore(n) if n is not None else NoLock() + semaphore = cast( + asyncio.Semaphore, asyncio.Semaphore(n) if n is not None else NoLock() + ) - async def process_item(item): + async def process_item(ix: int, item): async with semaphore: - return await item + res = await item + return (ix, res) async def process_generator(): - tasks = [] + tasks = {} + accepts_context = asyncio_accepts_context() + ix = 0 async for item in generator: - task = asyncio.create_task(process_item(item)) - tasks.append(task) + if accepts_context: + context = contextvars.copy_context() + task = asyncio.create_task(process_item(ix, item), context=context) + else: + task = asyncio.create_task(process_item(ix, item)) + tasks[ix] = task + ix += 1 + if _eager_consumption_timeout > 0: + try: + for _fut in asyncio.as_completed( + tasks.values(), + timeout=_eager_consumption_timeout, + ): + task_idx, res = await _fut + yield res + del tasks[task_idx] + except asyncio.TimeoutError: + pass if n is not None and len(tasks) >= n: - done, pending = await asyncio.wait( - tasks, return_when=asyncio.FIRST_COMPLETED + done, _ = await asyncio.wait( + tasks.values(), return_when=asyncio.FIRST_COMPLETED ) - tasks = list(pending) for task in done: - yield task.result() + task_idx, res = task.result() + yield res + del tasks[task_idx] - for task in asyncio.as_completed(tasks): - yield await task + for task in asyncio.as_completed(tasks.values()): + _, res = await task + yield res return process_generator() @@ -300,3 +332,28 @@ def accepts_context(callable: Callable[..., Any]) -> bool: return inspect.signature(callable).parameters.get("context") is not None except ValueError: return False + + +# Ported from Python 3.9+ to support Python 3.8 +async def aio_to_thread( + func, /, *args, __ctx: Optional[contextvars.Context] = None, **kwargs +): + """Asynchronously run function *func* in a separate thread. + + Any *args and **kwargs supplied for this function are directly passed + to *func*. Also, the current :class:`contextvars.Context` is propagated, + allowing context variables from the main thread to be accessed in the + separate thread. + + Return a coroutine that can be awaited to get the eventual result of *func*. + """ + loop = asyncio.get_running_loop() + ctx = __ctx or contextvars.copy_context() + func_call = functools.partial(ctx.run, func, *args, **kwargs) + return await loop.run_in_executor(None, func_call) + + +@functools.lru_cache(maxsize=1) +def asyncio_accepts_context(): + """Check if the current asyncio event loop accepts a context argument.""" + return accepts_context(asyncio.create_task) diff --git a/python/langsmith/beta/_utils.py b/python/langsmith/_internal/_beta_decorator.py similarity index 62% rename from python/langsmith/beta/_utils.py rename to python/langsmith/_internal/_beta_decorator.py index d1ebcbe1b..12350d533 100644 --- a/python/langsmith/beta/_utils.py +++ b/python/langsmith/_internal/_beta_decorator.py @@ -7,12 +7,15 @@ class LangSmithBetaWarning(UserWarning): """This is a warning specific to the LangSmithBeta module.""" +@functools.lru_cache(maxsize=100) +def _warn_once(message: str) -> None: + warnings.warn(message, LangSmithBetaWarning, stacklevel=2) + + def warn_beta(func: Callable) -> Callable: @functools.wraps(func) def wrapper(*args, **kwargs): - warnings.warn( - f"Function {func.__name__} is in beta.", UserWarning, stacklevel=2 - ) + _warn_once(f"Function {func.__name__} is in beta.") return func(*args, **kwargs) return wrapper diff --git a/python/langsmith/_internal/_embedding_distance.py b/python/langsmith/_internal/_embedding_distance.py index dff2d1f00..8450c7ccf 100644 --- a/python/langsmith/_internal/_embedding_distance.py +++ b/python/langsmith/_internal/_embedding_distance.py @@ -63,7 +63,7 @@ def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray: def _get_openai_encoder() -> Callable[[Sequence[str]], Sequence[Sequence[float]]]: """Get the OpenAI GPT-3 encoder.""" try: - from openai import Client + from openai import Client as OpenAIClient except ImportError: raise ImportError( "THe default encoder for the EmbeddingDistance class uses the OpenAI API. " @@ -72,7 +72,7 @@ def _get_openai_encoder() -> Callable[[Sequence[str]], Sequence[Sequence[float]] ) def encode_text(texts: Sequence[str]) -> Sequence[Sequence[float]]: - client = Client() + client = OpenAIClient() response = client.embeddings.create( input=list(texts), model="text-embedding-3-small" ) diff --git a/python/langsmith/_testing.py b/python/langsmith/_testing.py index 42cec872b..d4a3305f1 100644 --- a/python/langsmith/_testing.py +++ b/python/langsmith/_testing.py @@ -1,7 +1,6 @@ from __future__ import annotations import atexit -import concurrent.futures import datetime import functools import inspect @@ -19,6 +18,7 @@ from langsmith import client as ls_client from langsmith import env as ls_env from langsmith import run_helpers as rh +from langsmith import run_trees as rt from langsmith import schemas as ls_schemas from langsmith import utils as ls_utils @@ -388,11 +388,11 @@ def __init__( experiment: ls_schemas.TracerSession, dataset: ls_schemas.Dataset, ): - self.client = client or ls_client.Client() + self.client = client or rt.get_cached_client() self._experiment = experiment self._dataset = dataset self._version: Optional[datetime.datetime] = None - self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + self._executor = ls_utils.ContextThreadPoolExecutor(max_workers=1) atexit.register(_end_tests, self) @property @@ -414,7 +414,7 @@ def from_test( func: Callable, test_suite_name: Optional[str] = None, ) -> _LangSmithTestSuite: - client = client or ls_client.Client() + client = client or rt.get_cached_client() test_suite_name = test_suite_name or _get_test_suite_name(func) with cls._lock: if not cls._instances: @@ -527,7 +527,7 @@ def _get_test_repr(func: Callable, sig: inspect.Signature) -> str: def _ensure_example( func: Callable, *args: Any, langtest_extra: _UTExtra, **kwargs: Any ) -> Tuple[_LangSmithTestSuite, uuid.UUID]: - client = langtest_extra["client"] or ls_client.Client() + client = langtest_extra["client"] or rt.get_cached_client() output_keys = langtest_extra["output_keys"] signature = inspect.signature(func) inputs: dict = rh._get_inputs_safe(signature, *args, **kwargs) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index 77e1136f6..02954d460 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -82,6 +82,11 @@ class RuleNodeProcessor(StringNodeProcessor): """String node processor that uses a list of rules to replace sensitive data.""" rules: List[StringNodeRule] + """List of rules to apply for replacing sensitive data. + + Each rule is a StringNodeRule, which contains a regex pattern to match + and an optional replacement string. + """ def __init__(self, rules: List[StringNodeRule]): """Initialize the processor with a list of rules.""" @@ -110,7 +115,17 @@ class CallableNodeProcessor(StringNodeProcessor): """String node processor that uses a callable function to replace sensitive data.""" func: Union[Callable[[str], str], Callable[[str, List[Union[str, int]]], str]] + """The callable function used to replace sensitive data. + + It can be either a function that takes a single string argument and returns a string, + or a function that takes a string and a list of path elements (strings or integers) + and returns a string.""" + accepts_path: bool + """Indicates whether the callable function accepts a path argument. + + If True, the function expects two arguments: the string to be processed and the path to that string. + If False, the function expects only the string to be processed.""" def __init__( self, diff --git a/python/langsmith/async_client.py b/python/langsmith/async_client.py new file mode 100644 index 000000000..8245edbab --- /dev/null +++ b/python/langsmith/async_client.py @@ -0,0 +1,948 @@ +"""The Async LangSmith Client.""" + +from __future__ import annotations + +import asyncio +import datetime +import uuid +from typing import ( + Any, + AsyncIterator, + Dict, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, + cast, +) + +import httpx + +from langsmith import client as ls_client +from langsmith import schemas as ls_schemas +from langsmith import utils as ls_utils +from langsmith._internal import _beta_decorator as ls_beta + + +class AsyncClient: + """Async Client for interacting with the LangSmith API.""" + + __slots__ = ("_retry_config", "_client", "_web_url") + + def __init__( + self, + api_url: Optional[str] = None, + api_key: Optional[str] = None, + timeout_ms: Optional[ + Union[ + int, Tuple[Optional[int], Optional[int], Optional[int], Optional[int]] + ] + ] = None, + retry_config: Optional[Mapping[str, Any]] = None, + web_url: Optional[str] = None, + ): + """Initialize the async client.""" + ls_beta._warn_once("Class AsyncClient is in beta.") + self._retry_config = retry_config or {"max_retries": 3} + _headers = { + "Content-Type": "application/json", + } + api_key = ls_utils.get_api_key(api_key) + api_url = ls_utils.get_api_url(api_url) + if api_key: + _headers[ls_client.X_API_KEY] = api_key + ls_client._validate_api_key_if_hosted(api_url, api_key) + + if isinstance(timeout_ms, int): + timeout_: Union[Tuple, float] = (timeout_ms / 1000, None, None, None) + elif isinstance(timeout_ms, tuple): + timeout_ = tuple([t / 1000 if t is not None else None for t in timeout_ms]) + else: + timeout_ = 10 + self._client = httpx.AsyncClient( + base_url=api_url, headers=_headers, timeout=timeout_ + ) + self._web_url = web_url + + async def __aenter__(self) -> AsyncClient: + """Enter the async client.""" + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """Exit the async client.""" + await self.aclose() + + async def aclose(self): + """Close the async client.""" + await self._client.aclose() + + @property + def _api_url(self): + return str(self._client.base_url) + + @property + def _host_url(self) -> str: + """The web host url.""" + return ls_utils.get_host_url(self._web_url, self._api_url) + + async def _arequest_with_retries( + self, + method: str, + endpoint: str, + **kwargs: Any, + ) -> httpx.Response: + """Make an async HTTP request with retries.""" + max_retries = cast(int, self._retry_config.get("max_retries", 3)) + for attempt in range(max_retries): + try: + response = await self._client.request(method, endpoint, **kwargs) + ls_utils.raise_for_status_with_text(response) + return response + except httpx.HTTPStatusError as e: + if attempt == max_retries - 1: + raise ls_utils.LangSmithAPIError(f"HTTP error: {repr(e)}") + await asyncio.sleep(2**attempt) + except httpx.RequestError as e: + if attempt == max_retries - 1: + raise ls_utils.LangSmithConnectionError(f"Request error: {repr(e)}") + await asyncio.sleep(2**attempt) + raise ls_utils.LangSmithAPIError( + "Unexpected error connecting to the LangSmith API" + ) + + async def _aget_paginated_list( + self, + path: str, + params: Optional[Dict[str, Any]] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """Get a paginated list of items.""" + params = params or {} + offset = params.get("offset", 0) + params["limit"] = params.get("limit", 100) + while True: + params["offset"] = offset + response = await self._arequest_with_retries("GET", path, params=params) + items = response.json() + if not items: + break + for item in items: + yield item + if len(items) < params["limit"]: + break + offset += len(items) + + async def _aget_cursor_paginated_list( + self, + path: str, + *, + body: Optional[dict] = None, + request_method: str = "POST", + data_key: str = "runs", + ) -> AsyncIterator[dict]: + """Get a cursor paginated list of items.""" + params_ = body.copy() if body else {} + while True: + response = await self._arequest_with_retries( + request_method, + path, + content=ls_client._dumps_json(params_), + ) + response_body = response.json() + if not response_body: + break + if not response_body.get(data_key): + break + for run in response_body[data_key]: + yield run + cursors = response_body.get("cursors") + if not cursors: + break + if not cursors.get("next"): + break + params_["cursor"] = cursors["next"] + + async def create_run( + self, + name: str, + inputs: Dict[str, Any], + run_type: str, + *, + project_name: Optional[str] = None, + revision_id: Optional[ls_client.ID_TYPE] = None, + **kwargs: Any, + ) -> None: + """Create a run.""" + run_create = { + "name": name, + "id": kwargs.get("id") or uuid.uuid4(), + "inputs": inputs, + "run_type": run_type, + "session_name": project_name or ls_utils.get_tracer_project(), + "revision_id": revision_id, + **kwargs, + } + await self._arequest_with_retries( + "POST", "/runs", content=ls_client._dumps_json(run_create) + ) + + async def update_run( + self, + run_id: ls_client.ID_TYPE, + **kwargs: Any, + ) -> None: + """Update a run.""" + data = {**kwargs, "id": ls_client._as_uuid(run_id)} + await self._arequest_with_retries( + "PATCH", + f"/runs/{ls_client._as_uuid(run_id)}", + content=ls_client._dumps_json(data), + ) + + async def read_run(self, run_id: ls_client.ID_TYPE) -> ls_schemas.Run: + """Read a run.""" + response = await self._arequest_with_retries( + "GET", + f"/runs/{ls_client._as_uuid(run_id)}", + ) + return ls_schemas.Run(**response.json()) + + async def list_runs( + self, + *, + project_id: Optional[ + Union[ls_client.ID_TYPE, Sequence[ls_client.ID_TYPE]] + ] = None, + project_name: Optional[Union[str, Sequence[str]]] = None, + run_type: Optional[str] = None, + trace_id: Optional[ls_client.ID_TYPE] = None, + reference_example_id: Optional[ls_client.ID_TYPE] = None, + query: Optional[str] = None, + filter: Optional[str] = None, + trace_filter: Optional[str] = None, + tree_filter: Optional[str] = None, + is_root: Optional[bool] = None, + parent_run_id: Optional[ls_client.ID_TYPE] = None, + start_time: Optional[datetime.datetime] = None, + error: Optional[bool] = None, + run_ids: Optional[Sequence[ls_client.ID_TYPE]] = None, + select: Optional[Sequence[str]] = None, + limit: Optional[int] = None, + **kwargs: Any, + ) -> AsyncIterator[ls_schemas.Run]: + """List runs from the LangSmith API. + + Parameters + ---------- + project_id : UUID or None, default=None + The ID(s) of the project to filter by. + project_name : str or None, default=None + The name(s) of the project to filter by. + run_type : str or None, default=None + The type of the runs to filter by. + trace_id : UUID or None, default=None + The ID of the trace to filter by. + reference_example_id : UUID or None, default=None + The ID of the reference example to filter by. + query : str or None, default=None + The query string to filter by. + filter : str or None, default=None + The filter string to filter by. + trace_filter : str or None, default=None + Filter to apply to the ROOT run in the trace tree. This is meant to + be used in conjunction with the regular `filter` parameter to let you + filter runs by attributes of the root run within a trace. + tree_filter : str or None, default=None + Filter to apply to OTHER runs in the trace tree, including + sibling and child runs. This is meant to be used in conjunction with + the regular `filter` parameter to let you filter runs by attributes + of any run within a trace. + is_root : bool or None, default=None + Whether to filter by root runs. + parent_run_id : UUID or None, default=None + The ID of the parent run to filter by. + start_time : datetime or None, default=None + The start time to filter by. + error : bool or None, default=None + Whether to filter by error status. + run_ids : List[str or UUID] or None, default=None + The IDs of the runs to filter by. + limit : int or None, default=None + The maximum number of runs to return. + **kwargs : Any + Additional keyword arguments. + + Yields: + ------ + Run + The runs. + + Examples: + -------- + List all runs in a project: + + .. code-block:: python + + project_runs = client.list_runs(project_name="") + + List LLM and Chat runs in the last 24 hours: + + .. code-block:: python + + todays_llm_runs = client.list_runs( + project_name="", + start_time=datetime.now() - timedelta(days=1), + run_type="llm", + ) + + List root traces in a project: + + .. code-block:: python + + root_runs = client.list_runs(project_name="", is_root=1) + + List runs without errors: + + .. code-block:: python + + correct_runs = client.list_runs(project_name="", error=False) + + List runs and only return their inputs/outputs (to speed up the query): + + .. code-block:: python + + input_output_runs = client.list_runs( + project_name="", select=["inputs", "outputs"] + ) + + List runs by run ID: + + .. code-block:: python + + run_ids = [ + "a36092d2-4ad5-4fb4-9c0d-0dba9a2ed836", + "9398e6be-964f-4aa4-8ae9-ad78cd4b7074", + ] + selected_runs = client.list_runs(id=run_ids) + + List all "chain" type runs that took more than 10 seconds and had + `total_tokens` greater than 5000: + + .. code-block:: python + + chain_runs = client.list_runs( + project_name="", + filter='and(eq(run_type, "chain"), gt(latency, 10), gt(total_tokens, 5000))', + ) + + List all runs called "extractor" whose root of the trace was assigned feedback "user_score" score of 1: + + .. code-block:: python + + good_extractor_runs = client.list_runs( + project_name="", + filter='eq(name, "extractor")', + trace_filter='and(eq(feedback_key, "user_score"), eq(feedback_score, 1))', + ) + + List all runs that started after a specific timestamp and either have "error" not equal to null or a "Correctness" feedback score equal to 0: + + .. code-block:: python + + complex_runs = client.list_runs( + project_name="", + filter='and(gt(start_time, "2023-07-15T12:34:56Z"), or(neq(error, null), and(eq(feedback_key, "Correctness"), eq(feedback_score, 0.0))))', + ) + + List all runs where `tags` include "experimental" or "beta" and `latency` is greater than 2 seconds: + + .. code-block:: python + + tagged_runs = client.list_runs( + project_name="", + filter='and(or(has(tags, "experimental"), has(tags, "beta")), gt(latency, 2))', + ) + """ + project_ids = [] + if isinstance(project_id, (uuid.UUID, str)): + project_ids.append(project_id) + elif isinstance(project_id, list): + project_ids.extend(project_id) + if project_name is not None: + if isinstance(project_name, str): + project_name = [project_name] + projects = await asyncio.gather( + *[self.read_project(project_name=name) for name in project_name] + ) + project_ids.extend([project.id for project in projects]) + + body_query: Dict[str, Any] = { + "session": project_ids if project_ids else None, + "run_type": run_type, + "reference_example": ( + [reference_example_id] if reference_example_id else None + ), + "query": query, + "filter": filter, + "trace_filter": trace_filter, + "tree_filter": tree_filter, + "is_root": is_root, + "parent_run": parent_run_id, + "start_time": start_time.isoformat() if start_time else None, + "error": error, + "id": run_ids, + "trace": trace_id, + "select": select, + **kwargs, + } + if project_ids: + body_query["session"] = [ + str(ls_client._as_uuid(id_)) for id_ in project_ids + ] + body = {k: v for k, v in body_query.items() if v is not None} + ix = 0 + async for run in self._aget_cursor_paginated_list("/runs/query", body=body): + yield ls_schemas.Run(**run) + ix += 1 + if limit is not None and ix >= limit: + break + + async def share_run( + self, run_id: ls_client.ID_TYPE, *, share_id: Optional[ls_client.ID_TYPE] = None + ) -> str: + """Get a share link for a run asynchronously. + + Args: + run_id (ID_TYPE): The ID of the run to share. + share_id (Optional[ID_TYPE], optional): Custom share ID. + If not provided, a random UUID will be generated. + + Returns: + str: The URL of the shared run. + + Raises: + httpx.HTTPStatusError: If the API request fails. + """ + run_id_ = ls_client._as_uuid(run_id, "run_id") + data = { + "run_id": str(run_id_), + "share_token": str(share_id or uuid.uuid4()), + } + response = await self._arequest_with_retries( + "PUT", + f"/runs/{run_id_}/share", + content=ls_client._dumps_json(data), + ) + ls_utils.raise_for_status_with_text(response) + share_token = response.json()["share_token"] + return f"{self._host_url}/public/{share_token}/r" + + async def run_is_shared(self, run_id: ls_client.ID_TYPE) -> bool: + """Get share state for a run asynchronously.""" + link = await self.read_run_shared_link(ls_client._as_uuid(run_id, "run_id")) + return link is not None + + async def read_run_shared_link(self, run_id: ls_client.ID_TYPE) -> Optional[str]: + """Retrieve the shared link for a specific run asynchronously. + + Args: + run_id (ID_TYPE): The ID of the run. + + Returns: + Optional[str]: The shared link for the run, or None if the link is not + available. + + Raises: + httpx.HTTPStatusError: If the API request fails. + """ + response = await self._arequest_with_retries( + "GET", + f"/runs/{ls_client._as_uuid(run_id, 'run_id')}/share", + ) + ls_utils.raise_for_status_with_text(response) + result = response.json() + if result is None or "share_token" not in result: + return None + return f"{self._host_url}/public/{result['share_token']}/r" + + async def create_project( + self, + project_name: str, + **kwargs: Any, + ) -> ls_schemas.TracerSession: + """Create a project.""" + data = {"name": project_name, **kwargs} + response = await self._arequest_with_retries( + "POST", "/sessions", content=ls_client._dumps_json(data) + ) + return ls_schemas.TracerSession(**response.json()) + + async def read_project( + self, + project_name: Optional[str] = None, + project_id: Optional[ls_client.ID_TYPE] = None, + ) -> ls_schemas.TracerSession: + """Read a project.""" + if project_id: + response = await self._arequest_with_retries( + "GET", f"/sessions/{ls_client._as_uuid(project_id)}" + ) + elif project_name: + response = await self._arequest_with_retries( + "GET", "/sessions", params={"name": project_name} + ) + else: + raise ValueError("Either project_name or project_id must be provided") + + data = response.json() + if isinstance(data, list): + if not data: + raise ls_utils.LangSmithNotFoundError( + f"Project {project_name} not found" + ) + return ls_schemas.TracerSession(**data[0]) + return ls_schemas.TracerSession(**data) + + async def delete_project( + self, *, project_name: Optional[str] = None, project_id: Optional[str] = None + ) -> None: + """Delete a project from LangSmith. + + Parameters + ---------- + project_name : str or None, default=None + The name of the project to delete. + project_id : str or None, default=None + The ID of the project to delete. + """ + if project_id is None and project_name is None: + raise ValueError("Either project_name or project_id must be provided") + if project_id is None: + project = await self.read_project(project_name=project_name) + project_id = str(project.id) + if not project_id: + raise ValueError("Project not found") + await self._arequest_with_retries( + "DELETE", + f"/sessions/{ls_client._as_uuid(project_id)}", + ) + + async def create_dataset( + self, + dataset_name: str, + **kwargs: Any, + ) -> ls_schemas.Dataset: + """Create a dataset.""" + data = {"name": dataset_name, **kwargs} + response = await self._arequest_with_retries( + "POST", "/datasets", content=ls_client._dumps_json(data) + ) + return ls_schemas.Dataset(**response.json()) + + async def read_dataset( + self, + dataset_name: Optional[str] = None, + dataset_id: Optional[ls_client.ID_TYPE] = None, + ) -> ls_schemas.Dataset: + """Read a dataset.""" + if dataset_id: + response = await self._arequest_with_retries( + "GET", f"/datasets/{ls_client._as_uuid(dataset_id)}" + ) + elif dataset_name: + response = await self._arequest_with_retries( + "GET", "/datasets", params={"name": dataset_name} + ) + else: + raise ValueError("Either dataset_name or dataset_id must be provided") + + data = response.json() + if isinstance(data, list): + if not data: + raise ls_utils.LangSmithNotFoundError( + f"Dataset {dataset_name} not found" + ) + return ls_schemas.Dataset(**data[0]) + return ls_schemas.Dataset(**data) + + async def delete_dataset(self, dataset_id: ls_client.ID_TYPE) -> None: + """Delete a dataset.""" + await self._arequest_with_retries( + "DELETE", + f"/datasets/{ls_client._as_uuid(dataset_id)}", + ) + + async def list_datasets( + self, + **kwargs: Any, + ) -> AsyncIterator[ls_schemas.Dataset]: + """List datasets.""" + async for dataset in self._aget_paginated_list("/datasets", params=kwargs): + yield ls_schemas.Dataset(**dataset) + + async def create_example( + self, + inputs: Dict[str, Any], + outputs: Optional[Dict[str, Any]] = None, + dataset_id: Optional[ls_client.ID_TYPE] = None, + dataset_name: Optional[str] = None, + **kwargs: Any, + ) -> ls_schemas.Example: + """Create an example.""" + if dataset_id is None and dataset_name is None: + raise ValueError("Either dataset_id or dataset_name must be provided") + if dataset_id is None: + dataset = await self.read_dataset(dataset_name=dataset_name) + dataset_id = dataset.id + + data = { + "inputs": inputs, + "outputs": outputs, + "dataset_id": str(dataset_id), + **kwargs, + } + response = await self._arequest_with_retries( + "POST", "/examples", content=ls_client._dumps_json(data) + ) + return ls_schemas.Example(**response.json()) + + async def read_example(self, example_id: ls_client.ID_TYPE) -> ls_schemas.Example: + """Read an example.""" + response = await self._arequest_with_retries( + "GET", f"/examples/{ls_client._as_uuid(example_id)}" + ) + return ls_schemas.Example(**response.json()) + + async def list_examples( + self, + *, + dataset_id: Optional[ls_client.ID_TYPE] = None, + dataset_name: Optional[str] = None, + **kwargs: Any, + ) -> AsyncIterator[ls_schemas.Example]: + """List examples.""" + params = kwargs.copy() + if dataset_id: + params["dataset"] = ls_client._as_uuid(dataset_id) + elif dataset_name: + dataset = await self.read_dataset(dataset_name=dataset_name) + params["dataset"] = dataset.id + + async for example in self._aget_paginated_list("/examples", params=params): + yield ls_schemas.Example(**example) + + async def create_feedback( + self, + run_id: Optional[ls_client.ID_TYPE], + key: str, + score: Optional[float] = None, + value: Optional[Any] = None, + comment: Optional[str] = None, + **kwargs: Any, + ) -> ls_schemas.Feedback: + """Create feedback for a run. + + Args: + run_id (Optional[ls_client.ID_TYPE]): The ID of the run to provide feedback for. + Can be None for project-level feedback. + key (str): The name of the metric or aspect this feedback is about. + score (Optional[float]): The score to rate this run on the metric or aspect. + value (Optional[Any]): The display value or non-numeric value for this feedback. + comment (Optional[str]): A comment about this feedback. + **kwargs: Additional keyword arguments to include in the feedback data. + + Returns: + ls_schemas.Feedback: The created feedback object. + + Raises: + httpx.HTTPStatusError: If the API request fails. + """ # noqa: E501 + data = { + "run_id": ls_client._ensure_uuid(run_id, accept_null=True), + "key": key, + "score": score, + "value": value, + "comment": comment, + **kwargs, + } + response = await self._arequest_with_retries( + "POST", "/feedback", content=ls_client._dumps_json(data) + ) + return ls_schemas.Feedback(**response.json()) + + async def create_feedback_from_token( + self, + token_or_url: Union[str, uuid.UUID], + score: Union[float, int, bool, None] = None, + *, + value: Union[float, int, bool, str, dict, None] = None, + correction: Union[dict, None] = None, + comment: Union[str, None] = None, + metadata: Optional[dict] = None, + ) -> None: + """Create feedback from a presigned token or URL. + + Args: + token_or_url (Union[str, uuid.UUID]): The token or URL from which to create + feedback. + score (Union[float, int, bool, None], optional): The score of the feedback. + Defaults to None. + value (Union[float, int, bool, str, dict, None], optional): The value of the + feedback. Defaults to None. + correction (Union[dict, None], optional): The correction of the feedback. + Defaults to None. + comment (Union[str, None], optional): The comment of the feedback. Defaults + to None. + metadata (Optional[dict], optional): Additional metadata for the feedback. + Defaults to None. + + Raises: + ValueError: If the source API URL is invalid. + + Returns: + None: This method does not return anything. + """ + source_api_url, token_uuid = ls_client._parse_token_or_url( + token_or_url, self._api_url, num_parts=1 + ) + if source_api_url != self._api_url: + raise ValueError(f"Invalid source API URL. {source_api_url}") + response = await self._arequest_with_retries( + "POST", + f"/feedback/tokens/{ls_client._as_uuid(token_uuid)}", + content=ls_client._dumps_json( + { + "score": score, + "value": value, + "correction": correction, + "comment": comment, + "metadata": metadata, + # TODO: Add ID once the API supports it. + } + ), + ) + ls_utils.raise_for_status_with_text(response) + + async def create_presigned_feedback_token( + self, + run_id: ls_client.ID_TYPE, + feedback_key: str, + *, + expiration: Optional[datetime.datetime | datetime.timedelta] = None, + feedback_config: Optional[ls_schemas.FeedbackConfig] = None, + feedback_id: Optional[ls_client.ID_TYPE] = None, + ) -> ls_schemas.FeedbackIngestToken: + """Create a pre-signed URL to send feedback data to. + + This is useful for giving browser-based clients a way to upload + feedback data directly to LangSmith without accessing the + API key. + + Args: + run_id: + feedback_key: + expiration: The expiration time of the pre-signed URL. + Either a datetime or a timedelta offset from now. + Default to 3 hours. + feedback_config: FeedbackConfig or None. + If creating a feedback_key for the first time, + this defines how the metric should be interpreted, + such as a continuous score (w/ optional bounds), + or distribution over categorical values. + feedback_id: The ID of the feedback to create. If not provided, a new + feedback will be created. + + Returns: + The pre-signed URL for uploading feedback data. + """ + body: Dict[str, Any] = { + "run_id": run_id, + "feedback_key": feedback_key, + "feedback_config": feedback_config, + "id": feedback_id or str(uuid.uuid4()), + } + if expiration is None: + body["expires_in"] = ls_schemas.TimeDeltaInput( + days=0, + hours=3, + minutes=0, + ) + elif isinstance(expiration, datetime.datetime): + body["expires_at"] = expiration.isoformat() + elif isinstance(expiration, datetime.timedelta): + body["expires_in"] = ls_schemas.TimeDeltaInput( + days=expiration.days, + hours=expiration.seconds // 3600, + minutes=(expiration.seconds % 3600) // 60, + ) + else: + raise ValueError( + f"Invalid expiration type: {type(expiration)}. " + "Expected datetime.datetime or datetime.timedelta." + ) + + response = await self._arequest_with_retries( + "POST", + "/feedback/tokens", + content=ls_client._dumps_json(body), + ) + return ls_schemas.FeedbackIngestToken(**response.json()) + + async def read_feedback( + self, feedback_id: ls_client.ID_TYPE + ) -> ls_schemas.Feedback: + """Read feedback.""" + response = await self._arequest_with_retries( + "GET", f"/feedback/{ls_client._as_uuid(feedback_id)}" + ) + return ls_schemas.Feedback(**response.json()) + + async def list_feedback( + self, + *, + run_ids: Optional[Sequence[ls_client.ID_TYPE]] = None, + feedback_key: Optional[Sequence[str]] = None, + feedback_source_type: Optional[Sequence[ls_schemas.FeedbackSourceType]] = None, + limit: Optional[int] = None, + **kwargs: Any, + ) -> AsyncIterator[ls_schemas.Feedback]: + """List feedback.""" + params = { + "run": ( + [str(ls_client._as_uuid(id_)) for id_ in run_ids] if run_ids else None + ), + "limit": min(limit, 100) if limit is not None else 100, + **kwargs, + } + if feedback_key is not None: + params["key"] = feedback_key + if feedback_source_type is not None: + params["source"] = feedback_source_type + ix = 0 + async for feedback in self._aget_paginated_list("/feedback", params=params): + yield ls_schemas.Feedback(**feedback) + ix += 1 + if limit is not None and ix >= limit: + break + + @ls_beta.warn_beta + async def index_dataset( + self, + *, + dataset_id: ls_client.ID_TYPE, + tag: str = "latest", + **kwargs: Any, + ) -> None: + """Enable dataset indexing. Examples are indexed by their inputs. + + This enables searching for similar examples by inputs with + ``client.similar_examples()``. + + Args: + dataset_id (UUID): The ID of the dataset to index. + tag (str, optional): The version of the dataset to index. If 'latest' + then any updates to the dataset (additions, updates, deletions of + examples) will be reflected in the index. + + Returns: + None + + Raises: + requests.HTTPError + """ # noqa: E501 + dataset_id = ls_client._as_uuid(dataset_id, "dataset_id") + resp = await self._arequest_with_retries( + "POST", + f"/datasets/{dataset_id}/index", + content=ls_client._dumps_json({"tag": tag, **kwargs}), + ) + ls_utils.raise_for_status_with_text(resp) + + @ls_beta.warn_beta + async def similar_examples( + self, + inputs: dict, + /, + *, + limit: int, + dataset_id: ls_client.ID_TYPE, + filter: Optional[str] = None, + **kwargs: Any, + ) -> List[ls_schemas.ExampleSearch]: + r"""Retrieve the dataset examples whose inputs best match the current inputs. + + **Note**: Must have few-shot indexing enabled for the dataset. See + ``client.index_dataset()``. + + Args: + inputs (dict): The inputs to use as a search query. Must match the dataset + input schema. Must be JSON serializable. + limit (int): The maximum number of examples to return. + dataset_id (str or UUID): The ID of the dataset to search over. + filter (str, optional): A filter string to apply to the search results. Uses + the same syntax as the `filter` parameter in `list_runs()`. Only a subset + of operations are supported. Defaults to None. + kwargs (Any): Additional keyword args to pass as part of request body. + + Returns: + List of ExampleSearch objects. + + Example: + .. code-block:: python + + from langsmith import Client + + client = Client() + await client.similar_examples( + {"question": "When would i use the runnable generator"}, + limit=3, + dataset_id="...", + ) + + .. code-block:: pycon + + [ + ExampleSearch( + inputs={'question': 'How do I cache a Chat model? What caches can I use?'}, + outputs={'answer': 'You can use LangChain\'s caching layer for Chat Models. This can save you money by reducing the number of API calls you make to the LLM provider, if you\'re often requesting the same completion multiple times, and speed up your application.\n\n```python\n\nfrom langchain.cache import InMemoryCache\nlangchain.llm_cache = InMemoryCache()\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\')\n\n```\n\nYou can also use SQLite Cache which uses a SQLite database:\n\n```python\n rm .langchain.db\n\nfrom langchain.cache import SQLiteCache\nlangchain.llm_cache = SQLiteCache(database_path=".langchain.db")\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\') \n```\n'}, + metadata=None, + id=UUID('b2ddd1c4-dff6-49ae-8544-f48e39053398'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ExampleSearch( + inputs={'question': "What's a runnable lambda?"}, + outputs={'answer': "A runnable lambda is an object that implements LangChain's `Runnable` interface and runs a callbale (i.e., a function). Note the function must accept a single argument."}, + metadata=None, + id=UUID('f94104a7-2434-4ba7-8293-6a283f4860b4'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ExampleSearch( + inputs={'question': 'Show me how to use RecursiveURLLoader'}, + outputs={'answer': 'The RecursiveURLLoader comes from the langchain.document_loaders.recursive_url_loader module. Here\'s an example of how to use it:\n\n```python\nfrom langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader\n\n# Create an instance of RecursiveUrlLoader with the URL you want to load\nloader = RecursiveUrlLoader(url="https://example.com")\n\n# Load all child links from the URL page\nchild_links = loader.load()\n\n# Print the child links\nfor link in child_links:\n print(link)\n```\n\nMake sure to replace "https://example.com" with the actual URL you want to load. The load() method returns a list of child links found on the URL page. You can iterate over this list to access each child link.'}, + metadata=None, + id=UUID('0308ea70-a803-4181-a37d-39e95f138f8c'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ] + + """ # noqa: E501 + dataset_id = ls_client._as_uuid(dataset_id, "dataset_id") + req = { + "inputs": inputs, + "limit": limit, + **kwargs, + } + if filter: + req["filter"] = filter + + resp = await self._arequest_with_retries( + "POST", + f"/datasets/{dataset_id}/search", + content=ls_client._dumps_json(req), + ) + ls_utils.raise_for_status_with_text(resp) + examples = [] + for ex in resp.json()["examples"]: + examples.append(ls_schemas.ExampleSearch(**ex, dataset_id=dataset_id)) + return examples diff --git a/python/langsmith/beta/__init__.py b/python/langsmith/beta/__init__.py index 9240296a3..f712c1adb 100644 --- a/python/langsmith/beta/__init__.py +++ b/python/langsmith/beta/__init__.py @@ -1,6 +1,6 @@ """Beta functionality prone to change.""" +from langsmith._internal._beta_decorator import warn_beta from langsmith.beta._evals import compute_test_metrics, convert_runs_to_test -from langsmith.beta._utils import warn_beta __all__ = ["convert_runs_to_test", "compute_test_metrics", "warn_beta"] diff --git a/python/langsmith/beta/_evals.py b/python/langsmith/beta/_evals.py index f41bc8785..3afa37fa1 100644 --- a/python/langsmith/beta/_evals.py +++ b/python/langsmith/beta/_evals.py @@ -4,15 +4,15 @@ """ import collections -import concurrent.futures import datetime import itertools import uuid from typing import DefaultDict, List, Optional, Sequence, Tuple, TypeVar -import langsmith.beta._utils as beta_utils +import langsmith.run_trees as rt import langsmith.schemas as ls_schemas from langsmith import evaluation as ls_eval +from langsmith._internal._beta_decorator import warn_beta from langsmith.client import Client @@ -66,7 +66,7 @@ def _convert_root_run(root: ls_schemas.Run, run_to_example_map: dict) -> List[di return result -@beta_utils.warn_beta +@warn_beta def convert_runs_to_test( runs: Sequence[ls_schemas.Run], *, @@ -122,7 +122,7 @@ def convert_runs_to_test( """ if not runs: raise ValueError(f"""Expected a non-empty sequence of runs. Received: {runs}""") - client = client or Client() + client = client or rt.get_cached_client() ds = client.create_dataset(dataset_name=dataset_name) outputs = [r.outputs for r in runs] if include_outputs else None client.create_examples( @@ -197,7 +197,7 @@ def _outer_product(list1: List[T], list2: List[U]) -> List[Tuple[T, U]]: return list(itertools.product(list1, list2)) -@beta_utils.warn_beta +@warn_beta def compute_test_metrics( project_name: str, *, @@ -218,6 +218,8 @@ def compute_test_metrics( Returns: None: This function does not return any value. """ + from langsmith import ContextThreadPoolExecutor + evaluators_: List[ls_eval.RunEvaluator] = [] for func in evaluators: if isinstance(func, ls_eval.RunEvaluator): @@ -228,9 +230,9 @@ def compute_test_metrics( raise NotImplementedError( f"Evaluation not yet implemented for evaluator of type {type(func)}" ) - client = client or Client() + client = client or rt.get_cached_client() traces = _load_nested_traces(project_name, client) - with concurrent.futures.ThreadPoolExecutor(max_workers=max_concurrency) as executor: + with ContextThreadPoolExecutor(max_workers=max_concurrency) as executor: results = executor.map( client.evaluate_run, *zip(*_outer_product(traces, evaluators_)) ) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index 2d6722926..732fbd95b 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -1,8 +1,7 @@ # Don't change this file. Instead, copy it to .env and change the values there. The default values will work out of the box as long as you provide your license key. -_LANGSMITH_IMAGE_VERSION=0.6.9 +_LANGSMITH_IMAGE_VERSION=0.7.39 # Change to the desired Langsmith image version LANGSMITH_LICENSE_KEY=your-license-key # Change to your Langsmith license key -OPENAI_API_KEY=your-openai-api-key # Needed for Magic Query features -AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0 +AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0. Set to mixed for basic auth. OAUTH_CLIENT_ID=your-client-id # Required if AUTH_TYPE=oauth OAUTH_ISSUER_URL=https://your-issuer-url # Required if AUTH_TYPE=oauth API_KEY_SALT=super # Change to your desired API key salt. Can be any random value. Must be set if AUTH_TYPE=oauth @@ -19,3 +18,18 @@ CLICKHOUSE_TLS=false # Change to true if you are using TLS to connect to Clickho CLICKHOUSE_PASSWORD=password # Change to your Clickhouse password if needed CLICKHOUSE_NATIVE_PORT=9000 # Change to your Clickhouse native port if needed ORG_CREATION_DISABLED=false # Set to true if you want to disable org creation +WORKSPACE_SCOPE_ORG_INVITES_ENABLED=false # Set to true if you want to disable workspace scope org invites +PERSONAL_ORGS_DISABLED=false # Set to true if you want to disable personal orgs +TTL_ENABLED=true # Set to true if you want to enable TTL for your data +SHORT_LIVED_TTL_SECONDS=1209600 # Set to your desired TTL for short-lived traces. Default is 14 days +LONG_LIVED_TTL_SECONDS=34560000 # Set to your desired TTL for long-lived traces. Default is 400 days +BLOB_STORAGE_ENABLED=false # Set to true if you want to enable blob storage +BLOB_STORAGE_BUCKET_NAME=langsmith-blob-storage # Change to your desired blob storage bucket name +BLOB_STORAGE_API_URL=https://s3.us-west-2.amazonaws.com # Change to your desired blob storage API URL +BLOB_STORAGE_ACCESS_KEY=your-access-key # Change to your desired blob storage access key +BLOB_STORAGE_ACCESS_KEY_SECRET=your-access-key-secret # Change to your desired blob storage access key secret +CH_SEARCH_ENABLED=true # Set to false if you do not want to store tokenized inputs/outputs in clickhouse +BASIC_AUTH_ENABLED=false # Set to true if you want to enable basic auth +BASIC_AUTH_JWT_SECRET=your-jwt-secret # Change to your desired basic auth JWT secret +INITIAL_ORG_ADMIN_EMAIL=your-email # Change to your desired initial org admin email. Only used if BASIC_AUTH_ENABLED=true +INITIAL_ORG_ADMIN_PASSWORD=your-password # Change to your desired initial org admin password. Only used if BASIC_AUTH_ENABLED=true diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index 87130aa13..d78b060f6 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -1,11 +1,11 @@ version: "4" services: langchain-playground: - image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.6.9} + image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.7.39} ports: - 3001:3001 langchain-frontend: - image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} + image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.7.39} environment: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} @@ -16,18 +16,19 @@ services: - langchain-backend - langchain-playground langchain-backend: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.7.39} environment: - PORT=1984 - LANGCHAIN_ENV=local_docker - GO_ENDPOINT=http://langchain-platform-backend:1986 + - SMITH_BACKEND_ENDPOINT=http://langchain-backend:1984 - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} - - OPENAI_API_KEY=${OPENAI_API_KEY} - - LOG_LEVEL=${LOG_LEVEL:-warning} + - LOG_LEVEL=${LOG_LEVEL:-info} - AUTH_TYPE=${AUTH_TYPE:-none} - OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} - OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} - API_KEY_SALT=${API_KEY_SALT} + - X_SERVICE_AUTH_JWT_SECRET=${API_KEY_SALT} - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} - CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse} @@ -37,6 +38,19 @@ services: - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} - FF_ORG_CREATION_DISABLED=${ORG_CREATION_DISABLED:-false} + - FF_TRACE_TIERS_ENABLED=${TTL_ENABLED:-true} + - FF_UPGRADE_TRACE_TIER_ENABLED=${TTL_ENABLED:-true} + - FF_S3_STORAGE_ENABLED=${BLOB_STORAGE_ENABLED:-false} + - S3_BUCKET_NAME=${BLOB_STORAGE_BUCKET_NAME:-langsmith-s3-assets} + - S3_RUN_MANIFEST_BUCKET_NAME=${BLOB_STORAGE_BUCKET_NAME:-langsmith-s3-assets} + - S3_API_URL=${BLOB_STORAGE_API_URL:-https://s3.us-west-2.amazonaws.com} + - S3_ACCESS_KEY=${BLOB_STORAGE_ACCESS_KEY} + - S3_ACCESS_KEY_SECRET=${BLOB_STORAGE_ACCESS_KEY_SECRET} + - FF_CH_SEARCH_ENABLED=${CH_SEARCH_ENABLED:-true} + - BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false} + - BASIC_AUTH_JWT_SECRET=${BASIC_AUTH_JWT_SECRET} + - INITIAL_ORG_ADMIN_EMAIL=${INITIAL_ORG_ADMIN_EMAIL} + - INITIAL_ORG_ADMIN_PASSWORD=${INITIAL_ORG_ADMIN_PASSWORD} ports: - 1984:1984 depends_on: @@ -50,7 +64,7 @@ services: condition: service_completed_successfully restart: always langchain-platform-backend: - image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} + image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.7.39} environment: - PORT=1986 - LANGCHAIN_ENV=local_docker @@ -61,8 +75,11 @@ services: - OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} - OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} - API_KEY_SALT=${API_KEY_SALT} + - X_SERVICE_AUTH_JWT_SECRET=${API_KEY_SALT} - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} + - BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false} + - BASIC_AUTH_JWT_SECRET=${BASIC_AUTH_JWT_SECRET} ports: - 1986:1986 depends_on: @@ -76,26 +93,38 @@ services: condition: service_completed_successfully restart: always langchain-queue: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.7.39} environment: - LANGCHAIN_ENV=local_docker + - GO_ENDPOINT=http://langchain-platform-backend:1986 + - SMITH_BACKEND_ENDPOINT=http://langchain-backend:1984 - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} - - OPENAI_API_KEY=${OPENAI_API_KEY} - - LOG_LEVEL=${LOG_LEVEL:-warning} + - LOG_LEVEL=${LOG_LEVEL:-info} - AUTH_TYPE=${AUTH_TYPE:-none} - OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} - OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} - API_KEY_SALT=${API_KEY_SALT} + - X_SERVICE_AUTH_JWT_SECRET=${API_KEY_SALT} - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} - - MAX_ASYNC_JOBS_PER_WORKER=${MAX_ASYNC_JOBS_PER_WORKER:-10} - - ASYNCPG_POOL_MAX_SIZE=${ASYNCPG_POOL_MAX_SIZE:-3} - CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse} - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} + - FF_ORG_CREATION_DISABLED=${ORG_CREATION_DISABLED:-false} + - FF_TRACE_TIERS_ENABLED=${TTL_ENABLED:-true} + - FF_UPGRADE_TRACE_TIER_ENABLED=${TTL_ENABLED:-true} + - FF_S3_STORAGE_ENABLED=${BLOB_STORAGE_ENABLED:-false} + - S3_BUCKET_NAME=${BLOB_STORAGE_BUCKET_NAME:-langsmith-s3-assets} + - S3_RUN_MANIFEST_BUCKET_NAME=${BLOB_STORAGE_BUCKET_NAME:-langsmith-s3-assets} + - S3_API_URL=${BLOB_STORAGE_API_URL:-https://s3.us-west-2.amazonaws.com} + - S3_ACCESS_KEY=${BLOB_STORAGE_ACCESS_KEY} + - S3_ACCESS_KEY_SECRET=${BLOB_STORAGE_ACCESS_KEY_SECRET} + - FF_CH_SEARCH_ENABLED=${CH_SEARCH_ENABLED:-true} + - BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false} + - BASIC_AUTH_JWT_SECRET=${BASIC_AUTH_JWT_SECRET} command: - "saq" - "app.workers.queues.single_queue_worker.settings" @@ -164,7 +193,7 @@ services: timeout: 2s retries: 30 clickhouse-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.7.39} depends_on: langchain-clickhouse: condition: service_healthy @@ -175,15 +204,15 @@ services: - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} + - CLICKHOUSE_NATIVE_PORT=${CLICKHOUSE_NATIVE_PORT:-9000} - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} - entrypoint: + command: [ "bash", - "-c", - "migrate -source file://clickhouse/migrations -database 'clickhouse://${CLICKHOUSE_HOST}:${CLICKHOUSE_NATIVE_PORT}?username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}&database=${CLICKHOUSE_DB}&x-multi-statement=true&x-migrations-table-engine=MergeTree' up", + "scripts/wait_for_clickhouse_and_migrate.sh" ] postgres-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.7.39} depends_on: langchain-db: condition: service_healthy @@ -205,9 +234,10 @@ services: - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} + - CLICKHOUSE_NATIVE_PORT=${CLICKHOUSE_NATIVE_PORT:-9000} - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} restart: "on-failure:10" - entrypoint: + command: [ "bash", "-c", diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 5d05a2e7e..6df0f9004 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -1,9 +1,21 @@ -"""The LangSmith Client.""" +"""Client for interacting with the LangSmith API. + +Use the client to customize API keys / workspace ocnnections, SSl certs, +etc. for tracing. + +Also used to create, read, update, and delete LangSmith resources +such as runs (~trace spans), datasets, examples (~records), +feedback (~metrics), projects (tracer sessions/groups), etc. + +For detailed API documentation, visit: https://docs.smith.langchain.com/. +""" from __future__ import annotations import atexit import collections +import concurrent.futures as cf +import contextlib import datetime import functools import importlib @@ -14,10 +26,10 @@ import os import random import re -import socket import sys import threading import time +import traceback import typing import uuid import warnings @@ -47,12 +59,14 @@ import orjson import requests from requests import adapters as requests_adapters +from typing_extensions import TypeGuard from urllib3.util import Retry import langsmith from langsmith import env as ls_env from langsmith import schemas as ls_schemas from langsmith import utils as ls_utils +from langsmith._internal._beta_decorator import warn_beta if TYPE_CHECKING: import pandas as pd # type: ignore @@ -65,29 +79,11 @@ X_API_KEY = "x-api-key" -def _is_localhost(url: str) -> bool: - """Check if the URL is localhost. - - Parameters - ---------- - url : str - The URL to check. - - Returns: - ------- - bool - True if the URL is localhost, False otherwise. - """ - try: - netloc = urllib_parse.urlsplit(url).netloc.split(":")[0] - ip = socket.gethostbyname(netloc) - return ip == "127.0.0.1" or ip.startswith("0.0.0.0") or ip.startswith("::") - except socket.gaierror: - return False - - def _parse_token_or_url( - url_or_token: Union[str, uuid.UUID], api_url: str, num_parts: int = 2 + url_or_token: Union[str, uuid.UUID], + api_url: str, + num_parts: int = 2, + kind: str = "dataset", ) -> Tuple[str, str]: """Parse a public dataset URL or share token.""" try: @@ -103,7 +99,7 @@ def _parse_token_or_url( if len(path_parts) >= num_parts: token_uuid = path_parts[-num_parts] else: - raise ls_utils.LangSmithUserError(f"Invalid public dataset URL: {url_or_token}") + raise ls_utils.LangSmithUserError(f"Invalid public {kind} URL: {url_or_token}") return api_url, token_uuid @@ -266,7 +262,9 @@ def _dumps_json_single( ensure_ascii=True, ).encode("utf-8") try: - result = orjson.dumps(orjson.loads(result.decode("utf-8", errors="lossy"))) + result = orjson.dumps( + orjson.loads(result.decode("utf-8", errors="surrogateescape")) + ) except orjson.JSONDecodeError: result = _elide_surrogates(result) return result @@ -322,8 +320,9 @@ def _validate_api_key_if_hosted(api_url: str, api_key: Optional[str]) -> None: # If the domain is langchain.com, raise error if no api_key if not api_key: if _is_langchain_hosted(api_url): - raise ls_utils.LangSmithUserError( - "API key must be provided when using hosted LangSmith API" + warnings.warn( + "API key must be provided when using hosted LangSmith API", + ls_utils.LangSmithMissingAPIKeyWarning, ) @@ -347,38 +346,6 @@ def _get_tracing_sampling_rate() -> float | None: return sampling_rate -def _get_env(var_names: Sequence[str], default: Optional[str] = None) -> Optional[str]: - for var_name in var_names: - var = os.getenv(var_name) - if var is not None: - return var - return default - - -def _get_api_key(api_key: Optional[str]) -> Optional[str]: - api_key_ = ( - api_key - if api_key is not None - else _get_env(("LANGSMITH_API_KEY", "LANGCHAIN_API_KEY")) - ) - if api_key_ is None or not api_key_.strip(): - return None - return api_key_.strip().strip('"').strip("'") - - -def _get_api_url(api_url: Optional[str]) -> str: - _api_url = api_url or cast( - str, - _get_env( - ("LANGSMITH_ENDPOINT", "LANGCHAIN_ENDPOINT"), - "https://api.smith.langchain.com", - ), - ) - if not _api_url.strip(): - raise ls_utils.LangSmithUserError("LangSmith API URL cannot be empty") - return _api_url.strip().strip('"').strip("'").rstrip("/") - - def _get_write_api_urls(_write_api_urls: Optional[Dict[str, str]]) -> Dict[str, str]: _write_api_urls = _write_api_urls or json.loads( os.getenv("LANGSMITH_RUNS_ENDPOINTS", "{}") @@ -462,13 +429,14 @@ class Client: "_web_url", "_tenant_id", "tracing_sample_rate", - "_sampled_post_uuids", + "_filtered_post_uuids", "tracing_queue", "_anonymizer", "_hide_inputs", "_hide_outputs", "_info", "_write_api_urls", + "_settings", ] def __init__( @@ -546,7 +514,7 @@ def __init__( ) self.tracing_sample_rate = _get_tracing_sampling_rate() - self._sampled_post_uuids: set[uuid.UUID] = set() + self._filtered_post_uuids: set[uuid.UUID] = set() self._write_api_urls: Mapping[str, Optional[str]] = _get_write_api_urls( api_urls ) @@ -554,8 +522,8 @@ def __init__( self.api_url = next(iter(self._write_api_urls)) self.api_key: Optional[str] = self._write_api_urls[self.api_url] else: - self.api_url = _get_api_url(api_url) - self.api_key = _get_api_key(api_key) + self.api_url = ls_utils.get_api_url(api_url) + self.api_key = ls_utils.get_api_key(api_key) _validate_api_key_if_hosted(self.api_url, self.api_key) self._write_api_urls = {self.api_url: self.api_key} self.retry_config = retry_config or _default_retry_config() @@ -611,6 +579,8 @@ def __init__( else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" ) + self._settings: Union[ls_schemas.LangSmithSettings, None] = None + def _repr_html_(self) -> str: """Return an HTML representation of the instance with a link to the URL. @@ -639,20 +609,7 @@ def _host(self) -> str: @property def _host_url(self) -> str: """The web host url.""" - if self._web_url: - link = self._web_url - else: - parsed_url = urllib_parse.urlparse(self.api_url) - if _is_localhost(self.api_url): - link = "http://localhost" - elif parsed_url.path.endswith("/api"): - new_path = parsed_url.path.rsplit("/api", 1)[0] - link = urllib_parse.urlunparse(parsed_url._replace(path=new_path)) - elif parsed_url.netloc.startswith("dev."): - link = "https://dev.smith.langchain.com" - else: - link = "https://smith.langchain.com" - return link + return ls_utils.get_host_url(self._web_url, self.api_url) @property def _headers(self) -> Dict[str, str]: @@ -698,6 +655,36 @@ def info(self) -> ls_schemas.LangSmithInfo: self._info = ls_schemas.LangSmithInfo() return self._info + def _get_settings(self) -> ls_schemas.LangSmithSettings: + """Get the settings for the current tenant. + + Returns: + dict: The settings for the current tenant. + """ + if self._settings is None: + response = self.request_with_retries("GET", "/settings") + ls_utils.raise_for_status_with_text(response) + self._settings = ls_schemas.LangSmithSettings(**response.json()) + + return self._settings + + def _content_above_size(self, content_length: Optional[int]) -> Optional[str]: + if content_length is None or self._info is None: + return None + info = cast(ls_schemas.LangSmithInfo, self._info) + bic = info.batch_ingest_config + if not bic: + return None + size_limit = bic.get("size_limit_bytes") + if size_limit is None: + return None + if content_length > size_limit: + return ( + f"The content length of {content_length} bytes exceeds the " + f"maximum size limit of {size_limit} bytes." + ) + return None + def request_with_retries( self, /, @@ -709,6 +696,7 @@ def request_with_retries( retry_on: Optional[Sequence[Type[BaseException]]] = None, to_ignore: Optional[Sequence[Type[BaseException]]] = None, handle_response: Optional[Callable[[requests.Response, int], Any]] = None, + _context: str = "", **kwargs: Any, ) -> requests.Response: """Send a request with retries. @@ -781,7 +769,6 @@ def request_with_retries( ) to_ignore_: Tuple[Type[BaseException], ...] = (*(to_ignore or ()),) response = None - for idx in range(stop_after_attempt): try: try: @@ -818,22 +805,26 @@ def request_with_retries( f"Server error caused failure to {method}" f" {pathname} in" f" LangSmith API. {repr(e)}" + f"{_context}" ) elif response.status_code == 429: raise ls_utils.LangSmithRateLimitError( f"Rate limit exceeded for {pathname}. {repr(e)}" + f"{_context}" ) elif response.status_code == 401: raise ls_utils.LangSmithAuthError( f"Authentication failed for {pathname}. {repr(e)}" + f"{_context}" ) elif response.status_code == 404: raise ls_utils.LangSmithNotFoundError( f"Resource not found for {pathname}. {repr(e)}" + f"{_context}" ) elif response.status_code == 409: raise ls_utils.LangSmithConflictError( - f"Conflict for {pathname}. {repr(e)}" + f"Conflict for {pathname}. {repr(e)}" f"{_context}" ) else: raise ls_utils.LangSmithError( @@ -848,14 +839,36 @@ def request_with_retries( ) except requests.ConnectionError as e: recommendation = ( - "Please confirm your LANGCHAIN_ENDPOINT" + "Please confirm your LANGCHAIN_ENDPOINT." if self.api_url != "https://api.smith.langchain.com" else "Please confirm your internet connection." ) + try: + content_length = int( + str(e.request.headers.get("Content-Length")) + if e.request + else "" + ) + size_rec = self._content_above_size(content_length) + if size_rec: + recommendation = size_rec + except ValueError: + content_length = None + + api_key = ( + e.request.headers.get("x-api-key") or "" if e.request else "" + ) + prefix, suffix = api_key[:5], api_key[-2:] + filler = "*" * (max(0, len(api_key) - 7)) + masked_api_key = f"{prefix}{filler}{suffix}" + raise ls_utils.LangSmithConnectionError( f"Connection error caused failure to {method} {pathname}" - f" in LangSmith API. {recommendation}." + f" in LangSmith API. {recommendation}" f" {repr(e)}" + f"\nContent-Length: {content_length}" + f"\nAPI Key: {masked_api_key}" + f"{_context}" ) from e except Exception as e: args = list(e.args) @@ -871,6 +884,7 @@ def request_with_retries( emsg = msg raise ls_utils.LangSmithError( f"Failed to {method} {pathname} in LangSmith API. {emsg}" + f"{_context}" ) from e except to_ignore_ as e: if response is not None: @@ -1153,11 +1167,27 @@ def _run_transform( run_create["outputs"] = self._hide_run_outputs(run_create["outputs"]) if not update and not run_create.get("start_time"): run_create["start_time"] = datetime.datetime.now(datetime.timezone.utc) + + # Only retain LLM & Prompt manifests + if "serialized" in run_create: + if run_create.get("run_type") not in ( + "llm", + "prompt", + ): + # Drop completely + run_create = {k: v for k, v in run_create.items() if k != "serialized"} + else: + # Drop graph + serialized = { + k: v for k, v in run_create["serialized"].items() if k != "graph" + } + run_create = {**run_create, "serialized": serialized} + return run_create @staticmethod def _insert_runtime_env(runs: Sequence[dict]) -> None: - runtime_env = ls_env.get_runtime_and_metrics() + runtime_env = ls_env.get_runtime_environment() for run_create in runs: run_extra = cast(dict, run_create.setdefault("extra", {})) # update runtime @@ -1180,16 +1210,24 @@ def _filter_for_sampling( sampled = [] for run in runs: run_id = _as_uuid(run["id"]) - if run_id in self._sampled_post_uuids: + if run_id not in self._filtered_post_uuids: sampled.append(run) - self._sampled_post_uuids.remove(run_id) + else: + self._filtered_post_uuids.remove(run_id) return sampled else: sampled = [] for run in runs: - if random.random() < self.tracing_sample_rate: + if ( + # Child run + run["id"] != run.get("trace_id") + # Whose trace is included + and run.get("trace_id") not in self._filtered_post_uuids + # Or a root that's randomly sampled + ) or random.random() < self.tracing_sample_rate: sampled.append(run) - self._sampled_post_uuids.add(_as_uuid(run["id"])) + else: + self._filtered_post_uuids.add(_as_uuid(run["id"])) return sampled def create_run( @@ -1238,7 +1276,6 @@ def create_run( if not self._filter_for_sampling([run_create]): return run_create = self._run_transform(run_create, copy=True) - self._insert_runtime_env([run_create]) if revision_id is not None: run_create["extra"]["metadata"]["revision_id"] = revision_id if ( @@ -1250,6 +1287,7 @@ def create_run( return self.tracing_queue.put( TracingQueueItem(run_create["dotted_order"], "create", run_create) ) + self._insert_runtime_env([run_create]) self._create_run(run_create) def _create_run(self, run_create: dict): @@ -1372,23 +1410,44 @@ def batch_ingest_runs( "post": [_dumps_json(run) for run in raw_body["post"]], "patch": [_dumps_json(run) for run in raw_body["patch"]], } + ids = { + "post": [ + f"trace={run.get('trace_id')},id={run.get('id')}" + for run in raw_body["post"] + ], + "patch": [ + f"trace={run.get('trace_id')},id={run.get('id')}" + for run in raw_body["patch"] + ], + } + body_chunks: DefaultDict[str, list] = collections.defaultdict(list) + context_ids: DefaultDict[str, list] = collections.defaultdict(list) body_size = 0 for key in ["post", "patch"]: body = collections.deque(partial_body[key]) + ids_ = collections.deque(ids[key]) while body: if body_size > 0 and body_size + len(body[0]) > size_limit_bytes: - self._post_batch_ingest_runs(orjson.dumps(body_chunks)) + self._post_batch_ingest_runs( + orjson.dumps(body_chunks), + _context=f"\n{key}: {'; '.join(context_ids[key])}", + ) body_size = 0 body_chunks.clear() + context_ids.clear() body_size += len(body[0]) body_chunks[key].append(orjson.Fragment(body.popleft())) + context_ids[key].append(ids_.popleft()) if body_size: - self._post_batch_ingest_runs(orjson.dumps(body_chunks)) + context = "; ".join(f"{k}: {'; '.join(v)}" for k, v in context_ids.items()) + self._post_batch_ingest_runs( + orjson.dumps(body_chunks), _context="\n" + context + ) - def _post_batch_ingest_runs(self, body: bytes): - try: - for api_url, api_key in self._write_api_urls.items(): + def _post_batch_ingest_runs(self, body: bytes, *, _context: str): + for api_url, api_key in self._write_api_urls.items(): + try: self.request_with_retries( "POST", f"{api_url}/runs/batch", @@ -1401,14 +1460,21 @@ def _post_batch_ingest_runs(self, body: bytes): }, to_ignore=(ls_utils.LangSmithConflictError,), stop_after_attempt=3, + _context=_context, ) - except Exception as e: - logger.warning(f"Failed to batch ingest runs: {repr(e)}") + except Exception as e: + try: + exc_desc_lines = traceback.format_exception_only(type(e), e) + exc_desc = "".join(exc_desc_lines).rstrip() + logger.warning(f"Failed to batch ingest runs: {exc_desc}") + except Exception: + logger.warning(f"Failed to batch ingest runs: {repr(e)}") def update_run( self, run_id: ID_TYPE, *, + name: Optional[str] = None, end_time: Optional[datetime.datetime] = None, error: Optional[str] = None, inputs: Optional[Dict] = None, @@ -1424,6 +1490,8 @@ def update_run( ---------- run_id : str or UUID The ID of the run to update. + name : str or None, default=None + The name of the run. end_time : datetime or None The end time of the run. error : str or None, default=None @@ -1443,6 +1511,7 @@ def update_run( """ data: Dict[str, Any] = { "id": _as_uuid(run_id, "run_id"), + "name": name, "trace_id": kwargs.pop("trace_id", None), "parent_run_id": kwargs.pop("parent_run_id", None), "dotted_order": kwargs.pop("dotted_order", None), @@ -1747,6 +1816,93 @@ def list_runs( if limit is not None and i + 1 >= limit: break + def get_run_stats( + self, + *, + id: Optional[List[ID_TYPE]] = None, + trace: Optional[ID_TYPE] = None, + parent_run: Optional[ID_TYPE] = None, + run_type: Optional[str] = None, + project_names: Optional[List[str]] = None, + project_ids: Optional[List[ID_TYPE]] = None, + reference_example_ids: Optional[List[ID_TYPE]] = None, + start_time: Optional[str] = None, + end_time: Optional[str] = None, + error: Optional[bool] = None, + query: Optional[str] = None, + filter: Optional[str] = None, + trace_filter: Optional[str] = None, + tree_filter: Optional[str] = None, + is_root: Optional[bool] = None, + data_source_type: Optional[str] = None, + ) -> Dict[str, Any]: + """Get aggregate statistics over queried runs. + + Takes in similar query parameters to `list_runs` and returns statistics + based on the runs that match the query. + + Args: + id (Optional[List[ID_TYPE]]): List of run IDs to filter by. + trace (Optional[ID_TYPE]): Trace ID to filter by. + parent_run (Optional[ID_TYPE]): Parent run ID to filter by. + run_type (Optional[str]): Run type to filter by. + projects (Optional[List[ID_TYPE]]): List of session IDs to filter by. + reference_example (Optional[List[ID_TYPE]]): List of reference example IDs to filter by. + start_time (Optional[str]): Start time to filter by. + end_time (Optional[str]): End time to filter by. + error (Optional[bool]): Filter by error status. + query (Optional[str]): Query string to filter by. + filter (Optional[str]): Filter string to apply. + trace_filter (Optional[str]): Trace filter string to apply. + tree_filter (Optional[str]): Tree filter string to apply. + is_root (Optional[bool]): Filter by root run status. + data_source_type (Optional[str]): Data source type to filter by. + + Returns: + Dict[str, Any]: A dictionary containing the run statistics. + """ # noqa: E501 + from concurrent.futures import ThreadPoolExecutor, as_completed # type: ignore + + project_ids = project_ids or [] + if project_names: + with ThreadPoolExecutor() as executor: + futures = [ + executor.submit(self.read_project, project_name=name) + for name in project_names + ] + for future in as_completed(futures): + project_ids.append(future.result().id) + payload = { + "id": id, + "trace": trace, + "parent_run": parent_run, + "run_type": run_type, + "session": project_ids, + "reference_example": reference_example_ids, + "start_time": start_time, + "end_time": end_time, + "error": error, + "query": query, + "filter": filter, + "trace_filter": trace_filter, + "tree_filter": tree_filter, + "is_root": is_root, + "data_source_type": data_source_type, + } + + # Remove None values from the payload + payload = {k: v for k, v in payload.items() if v is not None} + + response = self.request_with_retries( + "POST", + "/runs/stats", + request_kwargs={ + "data": _dumps_json(payload), + }, + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + def get_run_url( self, *, @@ -1756,6 +1912,10 @@ def get_run_url( ) -> str: """Get the URL for a run. + Not recommended for use within your agent runtime. + More for use interacting with runs after the fact + for data analysis or ETL workloads. + Parameters ---------- run : Run @@ -1770,8 +1930,10 @@ def get_run_url( str The URL for the run. """ - if hasattr(run, "session_id") and run.session_id is not None: - session_id = run.session_id + if session_id := getattr(run, "session_id", None): + pass + elif session_name := getattr(run, "session_name", None): + session_id = self.read_project(project_name=session_name).id elif project_id is not None: session_id = project_id elif project_name is not None: @@ -1837,21 +1999,32 @@ def run_is_shared(self, run_id: ID_TYPE) -> bool: link = self.read_run_shared_link(_as_uuid(run_id, "run_id")) return link is not None - def list_shared_runs( - self, share_token: ID_TYPE, run_ids: Optional[List[str]] = None - ) -> List[ls_schemas.Run]: + def read_shared_run( + self, share_token: Union[ID_TYPE, str], run_id: Optional[ID_TYPE] = None + ) -> ls_schemas.Run: """Get shared runs.""" - params = {"id": run_ids, "share_token": str(share_token)} + _, token_uuid = _parse_token_or_url(share_token, "", kind="run") + path = f"/public/{token_uuid}/run" + if run_id is not None: + path += f"/{_as_uuid(run_id, 'run_id')}" response = self.request_with_retries( "GET", - f"/public/{_as_uuid(share_token, 'share_token')}/runs", + path, headers=self._headers, - params=params, ) ls_utils.raise_for_status_with_text(response) - return [ - ls_schemas.Run(**run, _host_url=self._host_url) for run in response.json() - ] + return ls_schemas.Run(**response.json(), _host_url=self._host_url) + + def list_shared_runs( + self, share_token: Union[ID_TYPE, str], run_ids: Optional[List[str]] = None + ) -> Iterator[ls_schemas.Run]: + """Get shared runs.""" + body = {"id": run_ids} if run_ids else {} + _, token_uuid = _parse_token_or_url(share_token, "", kind="run") + for run in self._get_cursor_paginated_list( + f"/public/{token_uuid}/runs/query", body=body + ): + yield ls_schemas.Run(**run, _host_url=self._host_url) def read_dataset_shared_schema( self, @@ -2208,14 +2381,14 @@ def get_test_results( *, project_id: Optional[ID_TYPE] = None, project_name: Optional[str] = None, - ) -> "pd.DataFrame": + ) -> pd.DataFrame: """Read the record-level information from an experiment into a Pandas DF. Note: this will fetch whatever data exists in the DB. Results are not immediately available in the DB upon evaluation run completion. Returns: - ------- + -------- pd.DataFrame A dataframe containing the test results. """ @@ -2312,6 +2485,7 @@ def list_projects( reference_dataset_name: Optional[str] = None, reference_free: Optional[bool] = None, limit: Optional[int] = None, + metadata: Optional[Dict[str, Any]] = None, ) -> Iterator[ls_schemas.TracerSession]: """List projects from the LangSmith API. @@ -2331,6 +2505,8 @@ def list_projects( Whether to filter for only projects not associated with a dataset. limit : Optional[int], optional The maximum number of projects to return, by default None + metadata: Optional[Dict[str, Any]], optional + Metadata to filter by. Yields: ------ @@ -2360,6 +2536,8 @@ def list_projects( params["reference_dataset"] = reference_dataset_id if reference_free is not None: params["reference_free"] = reference_free + if metadata is not None: + params["metadata"] = json.dumps(metadata) for i, project in enumerate( self._get_paginated_list("/sessions", params=params) ): @@ -2397,6 +2575,9 @@ def create_dataset( *, description: Optional[str] = None, data_type: ls_schemas.DataType = ls_schemas.DataType.kv, + inputs_schema: Optional[Dict[str, Any]] = None, + outputs_schema: Optional[Dict[str, Any]] = None, + metadata: Optional[dict] = None, ) -> ls_schemas.Dataset: """Create a dataset in the LangSmith API. @@ -2408,24 +2589,37 @@ def create_dataset( The description of the dataset. data_type : DataType or None, default=DataType.kv The data type of the dataset. + metadata: dict or None, default=None + Additional metadata to associate with the dataset. Returns: ------- Dataset The created dataset. """ - dataset = ls_schemas.DatasetCreate( - name=dataset_name, - description=description, - data_type=data_type, - ) + dataset: Dict[str, Any] = { + "name": dataset_name, + "data_type": data_type.value, + "created_at": datetime.datetime.now().isoformat(), + "extra": {"metadata": metadata} if metadata else None, + } + if description is not None: + dataset["description"] = description + + if inputs_schema is not None: + dataset["inputs_schema_definition"] = inputs_schema + + if outputs_schema is not None: + dataset["outputs_schema_definition"] = outputs_schema + response = self.request_with_retries( "POST", "/datasets", headers={**self._headers, "Content-Type": "application/json"}, - data=dataset.json(), + data=orjson.dumps(dataset), ) ls_utils.raise_for_status_with_text(response) + return ls_schemas.Dataset( **response.json(), _host_url=self._host_url, @@ -2534,7 +2728,7 @@ def diff_dataset_versions( Examples: -------- - ..code-block:: python + .. code-block:: python # Get the difference between two tagged versions of a dataset from_version = "prod" @@ -2547,7 +2741,6 @@ def diff_dataset_versions( print(diff) # Get the difference between two timestamped versions of a dataset - from_version = datetime.datetime(2024, 1, 1) to_version = datetime.datetime(2024, 2, 1) diff = client.diff_dataset_versions( @@ -2620,12 +2813,13 @@ def list_datasets( data_type: Optional[str] = None, dataset_name: Optional[str] = None, dataset_name_contains: Optional[str] = None, + metadata: Optional[Dict[str, Any]] = None, limit: Optional[int] = None, ) -> Iterator[ls_schemas.Dataset]: """List the datasets on the LangSmith API. Yields: - ------ + ------- Dataset The datasets. """ @@ -2640,6 +2834,8 @@ def list_datasets( params["name"] = dataset_name if dataset_name_contains is not None: params["name_contains"] = dataset_name_contains + if metadata is not None: + params["metadata"] = json.dumps(metadata) for i, dataset in enumerate( self._get_paginated_list("/datasets", params=params) ): @@ -2794,7 +2990,7 @@ def read_dataset_version( Examples: - -------- + --------- .. code-block:: python # Get the latest version of a dataset @@ -2827,7 +3023,7 @@ def clone_public_dataset( *, source_api_url: Optional[str] = None, dataset_name: Optional[str] = None, - ) -> None: + ) -> ls_schemas.Dataset: """Clone a public dataset to your own langsmith tenant. This operation is idempotent. If you already have a dataset with the given name, @@ -2839,7 +3035,6 @@ def clone_public_dataset( Defaults to the API URL of your current client. dataset_name (str): The name of the dataset to create in your tenant. Defaults to the name of the public dataset. - """ source_api_url = source_api_url or self.api_url source_api_url, token_uuid = _parse_token_or_url(token_or_url, source_api_url) @@ -2852,11 +3047,15 @@ def clone_public_dataset( ) ds = source_client.read_shared_dataset(token_uuid) dataset_name = dataset_name or ds.name - if self.has_dataset(dataset_name=dataset_name): + try: + ds = self.read_dataset(dataset_name=dataset_name) logger.info( f"Dataset {dataset_name} already exists in your tenant. Skipping." ) - return + return ds + except ls_utils.LangSmithNotFoundError: + pass + try: # Fetch examples first examples = list(source_client.list_shared_examples(token_uuid)) @@ -2884,6 +3083,7 @@ def clone_public_dataset( raise e finally: del source_client + return dataset def _get_data_type(self, dataset_id: ID_TYPE) -> ls_schemas.DataType: dataset = self.read_dataset(dataset_id=dataset_id) @@ -2954,7 +3154,7 @@ def create_example_from_run( dataset_name: Optional[str] = None, created_at: Optional[datetime.datetime] = None, ) -> ls_schemas.Example: - """Add an example (row) to an LLM-type dataset.""" + """Add an example (row) to a dataset from a run.""" if dataset_id is None: dataset_id = self.read_dataset(dataset_name=dataset_name).id dataset_name = None # Nested call expects only 1 defined @@ -3049,7 +3249,7 @@ def create_examples( The output values for the examples. metadata : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None The metadata for the examples. - split : Optional[Sequence[Optional[str | List[str]]]], default=None + splits : Optional[Sequence[Optional[str | List[str]]]], default=None The splits for the examples, which are divisions of your dataset such as 'train', 'test', or 'validation'. source_run_ids : Optional[Sequence[Optional[ID_TYPE]]], default=None @@ -3060,15 +3260,6 @@ def create_examples( The ID of the dataset to create the examples in. dataset_name : Optional[str], default=None The name of the dataset to create the examples in. - - Returns: - ------- - None - - Raises: - ------ - ValueError - If both `dataset_id` and `dataset_name` are `None`. """ if dataset_id is None and dataset_name is None: raise ValueError("Either dataset_id or dataset_name must be provided.") @@ -3114,6 +3305,7 @@ def create_example( metadata: Optional[Mapping[str, Any]] = None, split: Optional[str | List[str]] = None, example_id: Optional[ID_TYPE] = None, + source_run_id: Optional[ID_TYPE] = None, ) -> ls_schemas.Example: """Create a dataset example in the LangSmith API. @@ -3137,9 +3329,11 @@ def create_example( split : str or List[str] or None, default=None The splits for the example, which are divisions of your dataset such as 'train', 'test', or 'validation'. - exemple_id : UUID or None, default=None + example_id : UUID or None, default=None The ID of the example to create. If not provided, a new example will be created. + source_run_id : UUID or None, default=None + The ID of the source run associated with this example. Returns: Example: The created example. @@ -3153,6 +3347,7 @@ def create_example( "dataset_id": dataset_id, "metadata": metadata, "split": split, + "source_run_id": source_run_id, } if created_at: data["created_at"] = created_at.isoformat() @@ -3203,8 +3398,11 @@ def list_examples( as_of: Optional[Union[datetime.datetime, str]] = None, splits: Optional[Sequence[str]] = None, inline_s3_urls: bool = True, + *, + offset: int = 0, limit: Optional[int] = None, metadata: Optional[dict] = None, + filter: Optional[str] = None, **kwargs: Any, ) -> Iterator[ls_schemas.Example]: """Retrieve the example rows of the specified dataset. @@ -3225,13 +3423,17 @@ def list_examples( Returns examples only from the specified splits. inline_s3_urls (bool, optional): Whether to inline S3 URLs. Defaults to True. + offset (int): The offset to start from. Defaults to 0. limit (int, optional): The maximum number of examples to return. + filter (str, optional): A structured fileter string to apply to + the examples. Yields: Example: The examples. """ params: Dict[str, Any] = { **kwargs, + "offset": offset, "id": example_ids, "as_of": ( as_of.isoformat() if isinstance(as_of, datetime.datetime) else as_of @@ -3239,6 +3441,7 @@ def list_examples( "splits": splits, "inline_s3_urls": inline_s3_urls, "limit": min(limit, 100) if limit is not None else 100, + "filter": filter, } if metadata is not None: params["metadata"] = _dumps_json(metadata) @@ -3260,6 +3463,130 @@ def list_examples( if limit is not None and i + 1 >= limit: break + @warn_beta + def index_dataset( + self, + *, + dataset_id: ID_TYPE, + tag: str = "latest", + **kwargs: Any, + ) -> None: + """Enable dataset indexing. Examples are indexed by their inputs. + + This enables searching for similar examples by inputs with + ``client.similar_examples()``. + + Args: + dataset_id (UUID): The ID of the dataset to index. + tag (str, optional): The version of the dataset to index. If 'latest' + then any updates to the dataset (additions, updates, deletions of + examples) will be reflected in the index. + + Returns: + None + + Raises: + requests.HTTPError + """ # noqa: E501 + dataset_id = _as_uuid(dataset_id, "dataset_id") + resp = self.request_with_retries( + "POST", + f"/datasets/{dataset_id}/index", + headers=self._headers, + data=json.dumps({"tag": tag, **kwargs}), + ) + ls_utils.raise_for_status_with_text(resp) + + # NOTE: dataset_name arg explicitly not supported to avoid extra API calls. + @warn_beta + def similar_examples( + self, + inputs: dict, + /, + *, + limit: int, + dataset_id: ID_TYPE, + filter: Optional[str] = None, + **kwargs: Any, + ) -> List[ls_schemas.ExampleSearch]: + r"""Retrieve the dataset examples whose inputs best match the current inputs. + + **Note**: Must have few-shot indexing enabled for the dataset. See + `client.index_dataset()`. + + Args: + inputs (dict): The inputs to use as a search query. Must match the dataset + input schema. Must be JSON serializable. + limit (int): The maximum number of examples to return. + dataset_id (str or UUID): The ID of the dataset to search over. + filter (str, optional): A filter string to apply to the search results. Uses + the same syntax as the `filter` parameter in `list_runs()`. Only a subset + of operations are supported. Defaults to None. + + For example, you can use ``and(eq(metadata.some_tag, 'some_value'), neq(metadata.env, 'dev'))`` + to filter only examples where some_tag has some_value, and the environment is not dev. + kwargs (Any): Additional keyword args to pass as part of request body. + + Examples: + .. code-block:: python + + from langsmith import Client + + client = Client() + client.similar_examples( + {"question": "When would i use the runnable generator"}, + limit=3, + dataset_id="...", + ) + + .. code-block:: pycon + + [ + ExampleSearch( + inputs={'question': 'How do I cache a Chat model? What caches can I use?'}, + outputs={'answer': 'You can use LangChain\'s caching layer for Chat Models. This can save you money by reducing the number of API calls you make to the LLM provider, if you\'re often requesting the same completion multiple times, and speed up your application.\n\nfrom langchain.cache import InMemoryCache\nlangchain.llm_cache = InMemoryCache()\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\')\n\nYou can also use SQLite Cache which uses a SQLite database:\n\nrm .langchain.db\n\nfrom langchain.cache import SQLiteCache\nlangchain.llm_cache = SQLiteCache(database_path=".langchain.db")\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\') \n'}, + metadata=None, + id=UUID('b2ddd1c4-dff6-49ae-8544-f48e39053398'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ExampleSearch( + inputs={'question': "What's a runnable lambda?"}, + outputs={'answer': "A runnable lambda is an object that implements LangChain's `Runnable` interface and runs a callbale (i.e., a function). Note the function must accept a single argument."}, + metadata=None, + id=UUID('f94104a7-2434-4ba7-8293-6a283f4860b4'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ExampleSearch( + inputs={'question': 'Show me how to use RecursiveURLLoader'}, + outputs={'answer': 'The RecursiveURLLoader comes from the langchain.document_loaders.recursive_url_loader module. Here\'s an example of how to use it:\n\nfrom langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader\n\n# Create an instance of RecursiveUrlLoader with the URL you want to load\nloader = RecursiveUrlLoader(url="https://example.com")\n\n# Load all child links from the URL page\nchild_links = loader.load()\n\n# Print the child links\nfor link in child_links:\n print(link)\n\nMake sure to replace "https://example.com" with the actual URL you want to load. The load() method returns a list of child links found on the URL page. You can iterate over this list to access each child link.'}, + metadata=None, + id=UUID('0308ea70-a803-4181-a37d-39e95f138f8c'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ] + + """ + dataset_id = _as_uuid(dataset_id, "dataset_id") + req = { + "inputs": inputs, + "limit": limit, + **kwargs, + } + if filter is not None: + req["filter"] = filter + + resp = self.request_with_retries( + "POST", + f"/datasets/{dataset_id}/search", + headers=self._headers, + data=json.dumps(req), + ) + ls_utils.raise_for_status_with_text(resp) + examples = [] + for ex in resp.json()["examples"]: + examples.append(ls_schemas.ExampleSearch(**ex, dataset_id=dataset_id)) + return examples + def update_example( self, example_id: ID_TYPE, @@ -3309,6 +3636,73 @@ def update_example( ls_utils.raise_for_status_with_text(response) return response.json() + def update_examples( + self, + *, + example_ids: Sequence[ID_TYPE], + inputs: Optional[Sequence[Optional[Dict[str, Any]]]] = None, + outputs: Optional[Sequence[Optional[Mapping[str, Any]]]] = None, + metadata: Optional[Sequence[Optional[Dict]]] = None, + splits: Optional[Sequence[Optional[str | List[str]]]] = None, + dataset_ids: Optional[Sequence[Optional[ID_TYPE]]] = None, + ) -> Dict[str, Any]: + """Update multiple examples. + + Parameters + ---------- + example_ids : Sequence[ID_TYPE] + The IDs of the examples to update. + inputs : Optional[Sequence[Optional[Dict[str, Any]]], default=None + The input values for the examples. + outputs : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None + The output values for the examples. + metadata : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None + The metadata for the examples. + split : Optional[Sequence[Optional[str | List[str]]]], default=None + The splits for the examples, which are divisions + of your dataset such as 'train', 'test', or 'validation'. + dataset_ids : Optional[Sequence[Optional[ID_TYPE]]], default=None + The IDs of the datasets to move the examples to. + + Returns: + ------- + Dict[str, Any] + The response from the server (specifies the number of examples updated). + """ + examples = [ + { + "id": id_, + "inputs": in_, + "outputs": out_, + "dataset_id": dataset_id_, + "metadata": metadata_, + "split": split_, + } + for id_, in_, out_, metadata_, split_, dataset_id_ in zip( + example_ids, + inputs or [None] * len(example_ids), + outputs or [None] * len(example_ids), + metadata or [None] * len(example_ids), + splits or [None] * len(example_ids), + dataset_ids or [None] * len(example_ids), + ) + ] + response = self.request_with_retries( + "PATCH", + "/examples/bulk", + headers={**self._headers, "Content-Type": "application/json"}, + data=( + _dumps_json( + [ + {k: v for k, v in example.items() if v is not None} + for example in examples + ] + ) + ), + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + def delete_example(self, example_id: ID_TYPE) -> None: """Delete an example by ID. @@ -3324,11 +3718,87 @@ def delete_example(self, example_id: ID_TYPE) -> None: ) ls_utils.raise_for_status_with_text(response) - def _resolve_run_id( + def list_dataset_splits( self, - run: Union[ls_schemas.Run, ls_schemas.RunBase, str, uuid.UUID], - load_child_runs: bool, - ) -> ls_schemas.Run: + *, + dataset_id: Optional[ID_TYPE] = None, + dataset_name: Optional[str] = None, + as_of: Optional[Union[str, datetime.datetime]] = None, + ) -> List[str]: + """Get the splits for a dataset. + + Args: + dataset_id (ID_TYPE): The ID of the dataset. + as_of (Optional[Union[str, datetime.datetime]], optional): The version + of the dataset to retrieve splits for. Can be a timestamp or a + string tag. Defaults to "latest". + + Returns: + List[str]: The names of this dataset's. + """ + if dataset_id is None: + if dataset_name is None: + raise ValueError("Must provide dataset name or ID") + dataset_id = self.read_dataset(dataset_name=dataset_name).id + params = {} + if as_of is not None: + params["as_of"] = ( + as_of.isoformat() if isinstance(as_of, datetime.datetime) else as_of + ) + + response = self.request_with_retries( + "GET", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/splits", + params=params, + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + + def update_dataset_splits( + self, + *, + dataset_id: Optional[ID_TYPE] = None, + dataset_name: Optional[str] = None, + split_name: str, + example_ids: List[ID_TYPE], + remove: bool = False, + ) -> None: + """Update the splits for a dataset. + + Args: + dataset_id (ID_TYPE): The ID of the dataset to update. + split_name (str): The name of the split to update. + example_ids (List[ID_TYPE]): The IDs of the examples to add to or + remove from the split. + remove (bool, optional): If True, remove the examples from the split. + If False, add the examples to the split. Defaults to False. + + Returns: + None + """ + if dataset_id is None: + if dataset_name is None: + raise ValueError("Must provide dataset name or ID") + dataset_id = self.read_dataset(dataset_name=dataset_name).id + data = { + "split_name": split_name, + "examples": [ + str(_as_uuid(id_, f"example_ids[{i}]")) + for i, id_ in enumerate(example_ids) + ], + "remove": remove, + } + + response = self.request_with_retries( + "PUT", f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/splits", json=data + ) + ls_utils.raise_for_status_with_text(response) + + def _resolve_run_id( + self, + run: Union[ls_schemas.Run, ls_schemas.RunBase, str, uuid.UUID], + load_child_runs: bool, + ) -> ls_schemas.Run: """Resolve the run ID. Parameters @@ -3391,25 +3861,40 @@ def _resolve_example_id( def _select_eval_results( self, - results: Union[ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults], + results: Union[ + ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults, dict + ], *, fn_name: Optional[str] = None, ) -> List[ls_evaluator.EvaluationResult]: from langsmith.evaluation import evaluator as ls_evaluator # noqa: F811 + def _cast_result( + single_result: Union[ls_evaluator.EvaluationResult, dict], + ) -> ls_evaluator.EvaluationResult: + if isinstance(single_result, dict): + return ls_evaluator.EvaluationResult( + **{ + "key": fn_name, + "comment": single_result.get("reasoning"), + **single_result, + } + ) + return single_result + + def _is_eval_results(results: Any) -> TypeGuard[ls_evaluator.EvaluationResults]: + return isinstance(results, dict) and "results" in results + if isinstance(results, ls_evaluator.EvaluationResult): results_ = [results] + elif _is_eval_results(results): + results_ = [_cast_result(r) for r in results["results"]] elif isinstance(results, dict): - if "results" in results: - results_ = cast(List[ls_evaluator.EvaluationResult], results["results"]) - else: - results_ = [ - ls_evaluator.EvaluationResult(**{"key": fn_name, **results}) # type: ignore[arg-type] - ] + results_ = [_cast_result(cast(dict, results))] else: - raise TypeError( - f"Invalid evaluation result type {type(results)}." - " Expected EvaluationResult or EvaluationResults." + raise ValueError( + f"Invalid evaluation results type: {type(results)}." + " Must be EvaluationResult, EvaluationResults." ) return results_ @@ -3463,13 +3948,22 @@ def evaluate_run( def _log_evaluation_feedback( self, evaluator_response: Union[ - ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults + ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults, dict ], run: Optional[ls_schemas.Run] = None, source_info: Optional[Dict[str, Any]] = None, project_id: Optional[ID_TYPE] = None, + *, + _executor: Optional[cf.ThreadPoolExecutor] = None, ) -> List[ls_evaluator.EvaluationResult]: results = self._select_eval_results(evaluator_response) + + def _submit_feedback(**kwargs): + if _executor: + _executor.submit(self.create_feedback, **kwargs) + else: + self.create_feedback(**kwargs) + for res in results: source_info_ = source_info or {} if res.evaluator_info: @@ -3479,9 +3973,10 @@ def _log_evaluation_feedback( run_id_ = res.target_run_id elif run is not None: run_id_ = run.id - self.create_feedback( - run_id_, - res.key, + + _submit_feedback( + run_id=run_id_, + key=res.key, score=res.score, value=res.value, comment=res.comment, @@ -3549,7 +4044,7 @@ def create_feedback( key: str, *, score: Union[float, int, bool, None] = None, - value: Union[float, int, bool, str, dict, None] = None, + value: Union[str, dict, None] = None, correction: Union[dict, None] = None, comment: Union[str, None] = None, source_info: Optional[Dict[str, Any]] = None, @@ -3593,7 +4088,7 @@ def create_feedback( feedback_id : str or UUID or None, default=None The ID of the feedback to create. If not provided, a random UUID will be generated. - feedback_config: FeedbackConfig or None, default=None, + feedback_config: langsmith.schemas.FeedbackConfig or None, default=None, The configuration specifying how to interpret feedback with this key. Examples include continuous (with min/max bounds), categorical, or freeform. @@ -3650,7 +4145,9 @@ def create_feedback( feedback_source.metadata["__run"] = _run_meta feedback = ls_schemas.FeedbackCreate( id=_ensure_uuid(feedback_id), - run_id=_ensure_uuid(run_id), + # If run_id is None, this is interpreted as session-level + # feedback. + run_id=_ensure_uuid(run_id, accept_null=True), key=key, score=score, value=value, @@ -3973,23 +4470,48 @@ def create_presigned_feedback_tokens( else: raise ValueError(f"Unknown expiration type: {type(expiration)}") # assemble body, one entry per key - body: List[Dict[str, Any]] = [ - { - "run_id": run_id, - "feedback_key": feedback_key, - "feedback_config": feedback_config, - "expires_in": expires_in, - "expires_at": expires_at, - } - for feedback_key, feedback_config in zip(feedback_keys, feedback_configs) - ] - response = self.request_with_retries( - "POST", - "/feedback/tokens", - data=_dumps_json(body), + body = _dumps_json( + [ + { + "run_id": run_id, + "feedback_key": feedback_key, + "feedback_config": feedback_config, + "expires_in": expires_in, + "expires_at": expires_at, + } + for feedback_key, feedback_config in zip( + feedback_keys, feedback_configs + ) + ] ) - ls_utils.raise_for_status_with_text(response) - return [ls_schemas.FeedbackIngestToken(**part) for part in response.json()] + + def req(api_url: str, api_key: Optional[str]) -> list: + response = self.request_with_retries( + "POST", + f"{api_url}/feedback/tokens", + request_kwargs={ + "data": body, + "headers": { + **self._headers, + X_API_KEY: api_key or self.api_key, + }, + }, + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + + tokens = [] + with cf.ThreadPoolExecutor(max_workers=len(self._write_api_urls)) as executor: + futs = [ + executor.submit(req, api_url, api_key) + for api_url, api_key in self._write_api_urls.items() + ] + for fut in cf.as_completed(futs): + response = fut.result() + tokens.extend( + [ls_schemas.FeedbackIngestToken(**part) for part in response] + ) + return tokens def list_presigned_feedback_tokens( self, @@ -4161,28 +4683,46 @@ def add_runs_to_annotation_queue( ) ls_utils.raise_for_status_with_text(response) - def list_runs_from_annotation_queue( - self, queue_id: ID_TYPE, *, limit: Optional[int] = None - ) -> Iterator[ls_schemas.RunWithAnnotationQueueInfo]: - """List runs from an annotation queue with the specified queue ID. + def delete_run_from_annotation_queue( + self, queue_id: ID_TYPE, *, run_id: ID_TYPE + ) -> None: + """Delete a run from an annotation queue with the specified queue ID and run ID. + + Args: + queue_id (ID_TYPE): The ID of the annotation queue. + run_id (ID_TYPE): The ID of the run to be added to the annotation + queue. + """ + response = self.request_with_retries( + "DELETE", + f"/annotation-queues/{_as_uuid(queue_id, 'queue_id')}/runs/{_as_uuid(run_id, 'run_id')}", + ) + ls_utils.raise_for_status_with_text(response) + + def get_run_from_annotation_queue( + self, queue_id: ID_TYPE, *, index: int + ) -> ls_schemas.RunWithAnnotationQueueInfo: + """Get a run from an annotation queue at the specified index. Args: queue_id (ID_TYPE): The ID of the annotation queue. + index (int): The index of the run to retrieve. - Yields: - ls_schemas.RunWithAnnotationQueueInfo: An iterator of runs from the - annotation queue. + Returns: + ls_schemas.RunWithAnnotationQueueInfo: The run at the specified index. + + Raises: + ls_utils.LangSmithNotFoundError: If the run is not found at the given index. + ls_utils.LangSmithError: For other API-related errors. """ - path = f"/annotation-queues/{_as_uuid(queue_id, 'queue_id')}/runs" - limit_ = min(limit, 100) if limit is not None else 100 - for i, run in enumerate( - self._get_paginated_list( - path, params={"headers": self._headers, "limit": limit_} - ) - ): - yield ls_schemas.RunWithAnnotationQueueInfo(**run) - if limit is not None and i + 1 >= limit: - break + base_url = f"/annotation-queues/{_as_uuid(queue_id, 'queue_id')}/run" + response = self.request_with_retries( + "GET", + f"{base_url}/{index}", + headers=self._headers, + ) + ls_utils.raise_for_status_with_text(response) + return ls_schemas.RunWithAnnotationQueueInfo(**response.json()) def create_comparative_experiment( self, @@ -4259,115 +4799,10 @@ async def arun_on_dataset( ) -> Dict[str, Any]: """Asynchronously run the Chain or language model on a dataset. - Store traces to the specified project name. - - Args: - dataset_name: Name of the dataset to run the chain on. - llm_or_chain_factory: Language model or Chain constructor to run - over the dataset. The Chain constructor is used to permit - independent calls on each example without carrying over state. - evaluation: Optional evaluation configuration to use when evaluating - concurrency_level: The number of async tasks to run concurrently. - project_name: Name of the project to store the traces in. - Defaults to a randomly generated name. - project_metadata: Optional metadata to store with the project. - dataset_version: Optional version identifier to run the dataset on. - Can be a timestamp or a string tag. - verbose: Whether to print progress. - tags: Tags to add to each run in the project. - input_mapper: A function to map to the inputs dictionary from an Example - to the format expected by the model to be evaluated. This is useful if - your model needs to deserialize more complex schema or if your dataset - has inputs with keys that differ from what is expected by your chain - or agent. - revision_id: Optional revision identifier to assign this test run to - track the performance of different versions of your system. - - Returns: - A dictionary containing the run's project name and the - resulting model outputs. + .. deprecated:: 0.1.0 + This method is deprecated. Use :func:`langsmith.aevaluate` instead. - For the synchronous version, see client.run_on_dataset. - - Examples: - -------- - .. code-block:: python - - from langsmith import Client - from langchain.chat_models import ChatOpenAI - from langchain.chains import LLMChain - from langchain.smith import RunEvalConfig - - - # Chains may have memory. Passing in a constructor function lets the - # evaluation framework avoid cross-contamination between runs. - def construct_chain(): - llm = ChatOpenAI(temperature=0) - chain = LLMChain.from_string(llm, "What's the answer to {your_input_key}") - return chain - - - # Load off-the-shelf evaluators via config or the EvaluatorType (string or enum) - evaluation_config = RunEvalConfig( - evaluators=[ - "qa", # "Correctness" against a reference answer - "embedding_distance", - RunEvalConfig.Criteria("helpfulness"), - RunEvalConfig.Criteria( - { - "fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?" - } - ), - ] - ) - - client = Client() - await client.arun_on_dataset( - "", - construct_chain, - evaluation=evaluation_config, - ) - - You can also create custom evaluators by subclassing the - :class:`StringEvaluator ` - or LangSmith's `RunEvaluator` classes. - - .. code-block:: python - - from typing import Optional - from langchain.evaluation import StringEvaluator - - - class MyStringEvaluator(StringEvaluator): - @property - def requires_input(self) -> bool: - return False - - @property - def requires_reference(self) -> bool: - return True - - @property - def evaluation_name(self) -> str: - return "exact_match" - - def _evaluate_strings( - self, prediction, reference=None, input=None, **kwargs - ) -> dict: - return {"score": prediction == reference} - - - evaluation_config = RunEvalConfig( - custom_evaluators=[MyStringEvaluator()], - ) - - await client.arun_on_dataset( - "", - construct_chain, - evaluation=evaluation_config, - ) """ # noqa: E501 - # warn as deprecated and to use `aevaluate` instead warnings.warn( "The `arun_on_dataset` method is deprecated and" " will be removed in a future version." @@ -4413,142 +4848,651 @@ def run_on_dataset( ) -> Dict[str, Any]: """Run the Chain or language model on a dataset. - Store traces to the specified project name. + .. deprecated:: 0.1.0 + This method is deprecated. Use :func:`langsmith.aevaluate` instead. + + """ # noqa: E501 # noqa: E501 + warnings.warn( + "The `run_on_dataset` method is deprecated and" + " will be removed in a future version." + "Please use the `evaluate` method instead.", + DeprecationWarning, + ) + try: + from langchain.smith import run_on_dataset as _run_on_dataset + except ImportError: + raise ImportError( + "The client.run_on_dataset function requires the langchain" + "package to run.\nInstall with pip install langchain" + ) + return _run_on_dataset( + dataset_name=dataset_name, + llm_or_chain_factory=llm_or_chain_factory, + concurrency_level=concurrency_level, + client=self, + evaluation=evaluation, + project_name=project_name, + project_metadata=project_metadata, + verbose=verbose, + input_mapper=input_mapper, + revision_id=revision_id, + dataset_version=dataset_version, + **kwargs, + ) + + def _current_tenant_is_owner(self, owner: str) -> bool: + """Check if the current workspace has the same handle as owner. Args: - dataset_name: Name of the dataset to run the chain on. - llm_or_chain_factory: Language model or Chain constructor to run - over the dataset. The Chain constructor is used to permit - independent calls on each example without carrying over state. - evaluation: Configuration for evaluators to run on the - results of the chain - concurrency_level: The number of tasks to execute concurrently. - project_name: Name of the project to store the traces in. - Defaults to a randomly generated name. - project_metadata: Metadata to store with the project. - dataset_version: Optional version identifier to run the dataset on. - Can be a timestamp or a string tag. - verbose: Whether to print progress. - tags: Tags to add to each run in the project. - input_mapper: A function to map to the inputs dictionary from an Example - to the format expected by the model to be evaluated. This is useful if - your model needs to deserialize more complex schema or if your dataset - has inputs with keys that differ from what is expected by your chain - or agent. - revision_id: Optional revision identifier to assign this test run to - track the performance of different versions of your system. + owner (str): The owner to check against. Returns: - A dictionary containing the run's project name and the resulting model outputs. + bool: True if the current tenant is the owner, False otherwise. + """ + settings = self._get_settings() + return owner == "-" or settings.tenant_handle == owner + + def _owner_conflict_error( + self, action: str, owner: str + ) -> ls_utils.LangSmithUserError: + return ls_utils.LangSmithUserError( + f"Cannot {action} for another tenant.\n" + f"Current tenant: {self._get_settings().tenant_handle},\n" + f"Requested tenant: {owner}" + ) + def _get_latest_commit_hash( + self, prompt_owner_and_name: str, limit: int = 1, offset: int = 0 + ) -> Optional[str]: + """Get the latest commit hash for a prompt. - For the (usually faster) async version of this function, see `client.arun_on_dataset`. + Args: + prompt_owner_and_name (str): The owner and name of the prompt. + limit (int): The maximum number of commits to fetch. Defaults to 1. + offset (int): The number of commits to skip. Defaults to 0. - Examples: - -------- - .. code-block:: python + Returns: + Optional[str]: The latest commit hash, or None if no commits are found. + """ + response = self.request_with_retries( + "GET", + f"/commits/{prompt_owner_and_name}/", + params={"limit": limit, "offset": offset}, + ) + commits = response.json()["commits"] + return commits[0]["commit_hash"] if commits else None - from langsmith import Client - from langchain.chat_models import ChatOpenAI - from langchain.chains import LLMChain - from langchain.smith import RunEvalConfig + def _like_or_unlike_prompt( + self, prompt_identifier: str, like: bool + ) -> Dict[str, int]: + """Like or unlike a prompt. + Args: + prompt_identifier (str): The identifier of the prompt. + like (bool): True to like the prompt, False to unlike it. + + Returns: + A dictionary with the key 'likes' and the count of likes as the value. + + Raises: + requests.exceptions.HTTPError: If the prompt is not found or + another error occurs. + """ + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + response = self.request_with_retries( + "POST", f"/likes/{owner}/{prompt_name}", json={"like": like} + ) + response.raise_for_status() + return response.json() - # Chains may have memory. Passing in a constructor function lets the - # evaluation framework avoid cross-contamination between runs. - def construct_chain(): - llm = ChatOpenAI(temperature=0) - chain = LLMChain.from_string(llm, "What's the answer to {your_input_key}") - return chain + def _get_prompt_url(self, prompt_identifier: str) -> str: + """Get a URL for a prompt. + Args: + prompt_identifier (str): The identifier of the prompt. - # Load off-the-shelf evaluators via config or the EvaluatorType (string or enum) - evaluation_config = RunEvalConfig( - evaluators=[ - "qa", # "Correctness" against a reference answer - "embedding_distance", - RunEvalConfig.Criteria("helpfulness"), - RunEvalConfig.Criteria( - { - "fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?" - } - ), - ] + Returns: + str: The URL for the prompt. + + """ + owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier( + prompt_identifier + ) + + if not self._current_tenant_is_owner(owner): + return f"{self._host_url}/hub/{owner}/{prompt_name}:{commit_hash[:8]}" + + settings = self._get_settings() + return ( + f"{self._host_url}/prompts/{prompt_name}/{commit_hash[:8]}" + f"?organizationId={settings.id}" + ) + + def _prompt_exists(self, prompt_identifier: str) -> bool: + """Check if a prompt exists. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + bool: True if the prompt exists, False otherwise. + """ + prompt = self.get_prompt(prompt_identifier) + return True if prompt else False + + def like_prompt(self, prompt_identifier: str) -> Dict[str, int]: + """Like a prompt. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + A dictionary with the key 'likes' and the count of likes as the value. + + """ + return self._like_or_unlike_prompt(prompt_identifier, like=True) + + def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]: + """Unlike a prompt. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + A dictionary with the key 'likes' and the count of likes as the value. + + """ + return self._like_or_unlike_prompt(prompt_identifier, like=False) + + def list_prompts( + self, + *, + limit: int = 100, + offset: int = 0, + is_public: Optional[bool] = None, + is_archived: Optional[bool] = False, + sort_field: ls_schemas.PromptSortField = ls_schemas.PromptSortField.updated_at, + sort_direction: Literal["desc", "asc"] = "desc", + query: Optional[str] = None, + ) -> ls_schemas.ListPromptsResponse: + """List prompts with pagination. + + Args: + limit (int): The maximum number of prompts to return. Defaults to 100. + offset (int): The number of prompts to skip. Defaults to 0. + is_public (Optional[bool]): Filter prompts by if they are public. + is_archived (Optional[bool]): Filter prompts by if they are archived. + sort_field (ls_schemas.PromptsSortField): The field to sort by. + Defaults to "updated_at". + sort_direction (Literal["desc", "asc"]): The order to sort by. + Defaults to "desc". + query (Optional[str]): Filter prompts by a search query. + + Returns: + ls_schemas.ListPromptsResponse: A response object containing + the list of prompts. + """ + params = { + "limit": limit, + "offset": offset, + "is_public": ( + "true" if is_public else "false" if is_public is not None else None + ), + "is_archived": "true" if is_archived else "false", + "sort_field": sort_field, + "sort_direction": sort_direction, + "query": query, + "match_prefix": "true" if query else None, + } + + response = self.request_with_retries("GET", "/repos/", params=params) + return ls_schemas.ListPromptsResponse(**response.json()) + + def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]: + """Get a specific prompt by its identifier. + + Args: + prompt_identifier (str): The identifier of the prompt. + The identifier should be in the format "prompt_name" or "owner/prompt_name". + + Returns: + Optional[ls_schemas.Prompt]: The prompt object. + + Raises: + requests.exceptions.HTTPError: If the prompt is not found or + another error occurs. + """ + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + try: + response = self.request_with_retries("GET", f"/repos/{owner}/{prompt_name}") + return ls_schemas.Prompt(**response.json()["repo"]) + except ls_utils.LangSmithNotFoundError: + return None + + def create_prompt( + self, + prompt_identifier: str, + *, + description: Optional[str] = None, + readme: Optional[str] = None, + tags: Optional[Sequence[str]] = None, + is_public: bool = False, + ) -> ls_schemas.Prompt: + """Create a new prompt. + + Does not attach prompt object, just creates an empty prompt. + + Args: + prompt_name (str): The name of the prompt. + description (Optional[str]): A description of the prompt. + readme (Optional[str]): A readme for the prompt. + tags (Optional[Sequence[str]]): A list of tags for the prompt. + is_public (bool): Whether the prompt should be public. Defaults to False. + + Returns: + ls_schemas.Prompt: The created prompt object. + + Raises: + ValueError: If the current tenant is not the owner. + HTTPError: If the server request fails. + """ + settings = self._get_settings() + if is_public and not settings.tenant_handle: + raise ls_utils.LangSmithUserError( + "Cannot create a public prompt without first\n" + "creating a LangChain Hub handle. " + "You can add a handle by creating a public prompt at:\n" + "https://smith.langchain.com/prompts" + ) + + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + if not self._current_tenant_is_owner(owner=owner): + raise self._owner_conflict_error("create a prompt", owner) + + json: Dict[str, Union[str, bool, Sequence[str]]] = { + "repo_handle": prompt_name, + "description": description or "", + "readme": readme or "", + "tags": tags or [], + "is_public": is_public, + } + + response = self.request_with_retries("POST", "/repos/", json=json) + response.raise_for_status() + return ls_schemas.Prompt(**response.json()["repo"]) + + def create_commit( + self, + prompt_identifier: str, + object: Any, + *, + parent_commit_hash: Optional[str] = None, + ) -> str: + """Create a commit for an existing prompt. + + Args: + prompt_identifier (str): The identifier of the prompt. + object (Any): The LangChain object to commit. + parent_commit_hash (Optional[str]): The hash of the parent commit. + Defaults to latest commit. + + Returns: + str: The url of the prompt commit. + + Raises: + HTTPError: If the server request fails. + ValueError: If the prompt does not exist. + """ + if not self._prompt_exists(prompt_identifier): + raise ls_utils.LangSmithNotFoundError( + "Prompt does not exist, you must create it first." ) - client = Client() - client.run_on_dataset( - "", - construct_chain, - evaluation=evaluation_config, + try: + from langchain_core.load.dump import dumps + except ImportError: + raise ImportError( + "The client.create_commit function requires the langchain_core" + "package to run.\nInstall with `pip install langchain_core`" ) - You can also create custom evaluators by subclassing the - :class:`StringEvaluator ` - or LangSmith's `RunEvaluator` classes. + json_object = dumps(object) + manifest_dict = json.loads(json_object) - .. code-block:: python + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + prompt_owner_and_name = f"{owner}/{prompt_name}" - from typing import Optional - from langchain.evaluation import StringEvaluator + if parent_commit_hash == "latest" or parent_commit_hash is None: + parent_commit_hash = self._get_latest_commit_hash(prompt_owner_and_name) + request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} + response = self.request_with_retries( + "POST", f"/commits/{prompt_owner_and_name}", json=request_dict + ) - class MyStringEvaluator(StringEvaluator): - @property - def requires_input(self) -> bool: - return False + commit_hash = response.json()["commit"]["commit_hash"] - @property - def requires_reference(self) -> bool: - return True + return self._get_prompt_url(f"{prompt_owner_and_name}:{commit_hash}") + + def update_prompt( + self, + prompt_identifier: str, + *, + description: Optional[str] = None, + readme: Optional[str] = None, + tags: Optional[Sequence[str]] = None, + is_public: Optional[bool] = None, + is_archived: Optional[bool] = None, + ) -> Dict[str, Any]: + """Update a prompt's metadata. - @property - def evaluation_name(self) -> str: - return "exact_match" + To update the content of a prompt, use push_prompt or create_commit instead. - def _evaluate_strings( - self, prediction, reference=None, input=None, **kwargs - ) -> dict: - return {"score": prediction == reference} + Args: + prompt_identifier (str): The identifier of the prompt to update. + description (Optional[str]): New description for the prompt. + readme (Optional[str]): New readme for the prompt. + tags (Optional[Sequence[str]]): New list of tags for the prompt. + is_public (Optional[bool]): New public status for the prompt. + is_archived (Optional[bool]): New archived status for the prompt. + Returns: + Dict[str, Any]: The updated prompt data as returned by the server. - evaluation_config = RunEvalConfig( - custom_evaluators=[MyStringEvaluator()], + Raises: + ValueError: If the prompt_identifier is empty. + HTTPError: If the server request fails. + """ + settings = self._get_settings() + if is_public and not settings.tenant_handle: + raise ValueError( + "Cannot create a public prompt without first\n" + "creating a LangChain Hub handle. " + "You can add a handle by creating a public prompt at:\n" + "https://smith.langchain.com/prompts" ) - client.run_on_dataset( - "", - construct_chain, - evaluation=evaluation_config, + json: Dict[str, Union[str, bool, Sequence[str]]] = {} + + if description is not None: + json["description"] = description + if readme is not None: + json["readme"] = readme + if is_public is not None: + json["is_public"] = is_public + if is_archived is not None: + json["is_archived"] = is_archived + if tags is not None: + json["tags"] = tags + + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + response = self.request_with_retries( + "PATCH", f"/repos/{owner}/{prompt_name}", json=json + ) + response.raise_for_status() + return response.json() + + def delete_prompt(self, prompt_identifier: str) -> None: + """Delete a prompt. + + Args: + prompt_identifier (str): The identifier of the prompt to delete. + + Returns: + bool: True if the prompt was successfully deleted, False otherwise. + + Raises: + ValueError: If the current tenant is not the owner of the prompt. + """ + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + if not self._current_tenant_is_owner(owner): + raise self._owner_conflict_error("delete a prompt", owner) + + response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") + response.raise_for_status() + + def pull_prompt_commit( + self, + prompt_identifier: str, + *, + include_model: Optional[bool] = False, + ) -> ls_schemas.PromptCommit: + """Pull a prompt object from the LangSmith API. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + ls_schemas.PromptObject: The prompt object. + + Raises: + ValueError: If no commits are found for the prompt. + """ + owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier( + prompt_identifier + ) + try: + use_optimization = ls_utils.is_version_greater_or_equal( + self.info.version, "0.5.23" ) - """ # noqa: E501 - warnings.warn( - "The `run_on_dataset` method is deprecated and" - " will be removed in a future version." - "Please use the `evaluate` method instead.", - DeprecationWarning, + except ValueError: + logger.exception( + "Failed to parse LangSmith API version. Defaulting to using optimization." + ) + use_optimization = True + + if not use_optimization and commit_hash == "latest": + latest_commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}") + if latest_commit_hash is None: + raise ValueError("No commits found") + else: + commit_hash = latest_commit_hash + + response = self.request_with_retries( + "GET", + ( + f"/commits/{owner}/{prompt_name}/{commit_hash}" + f"{'?include_model=true' if include_model else ''}" + ), ) + return ls_schemas.PromptCommit( + **{"owner": owner, "repo": prompt_name, **response.json()} + ) + + def list_prompt_commits( + self, + prompt_identifier: str, + *, + limit: Optional[int] = None, + offset: int = 0, + include_model: bool = False, + ) -> Iterator[ls_schemas.ListedPromptCommit]: + """List commits for a given prompt. + + Args: + prompt_identifier (str): The identifier of the prompt in the format 'owner/repo_name'. + limit (Optional[int], optional): The maximum number of commits to return. If None, returns all commits. Defaults to None. + offset (int, optional): The number of commits to skip before starting to return results. Defaults to 0. + include_model (bool, optional): Whether to include the model information in the commit data. Defaults to False. + + Returns: + Iterator[ls_schemas.ListedPromptCommit]: An iterator of ListedPromptCommit objects representing the commits. + + Yields: + ls_schemas.ListedPromptCommit: A ListedPromptCommit object for each commit. + + Note: + This method uses pagination to retrieve commits. It will make multiple API calls if necessary to retrieve all commits + or up to the specified limit. + """ + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + + params = { + "limit": min(100, limit) if limit is not None else limit, + "offset": offset, + "include_model": include_model, + } + i = 0 + while True: + params["offset"] = offset + response = self.request_with_retries( + "GET", + f"/commits/{owner}/{prompt_name}/", + params=params, + ) + val = response.json() + items = val["commits"] + total = val["total"] + + if not items: + break + for it in items: + if limit is not None and i >= limit: + return # Stop iteration if we've reached the limit + yield ls_schemas.ListedPromptCommit( + **{"owner": owner, "repo": prompt_name, **it} + ) + i += 1 + + offset += len(items) + if offset >= total: + break + + def pull_prompt( + self, prompt_identifier: str, *, include_model: Optional[bool] = False + ) -> Any: + """Pull a prompt and return it as a LangChain PromptTemplate. + + This method requires `langchain_core`. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + Any: The prompt object in the specified format. + """ try: - from langchain.smith import run_on_dataset as _run_on_dataset + from langchain_core.load.load import loads + from langchain_core.prompts import BasePromptTemplate + from langchain_core.runnables.base import RunnableSequence except ImportError: raise ImportError( - "The client.run_on_dataset function requires the langchain" - "package to run.\nInstall with pip install langchain" + "The client.pull_prompt function requires the langchain_core" + "package to run.\nInstall with `pip install langchain_core`" ) - return _run_on_dataset( - dataset_name=dataset_name, - llm_or_chain_factory=llm_or_chain_factory, - concurrency_level=concurrency_level, - client=self, - evaluation=evaluation, - project_name=project_name, - project_metadata=project_metadata, - verbose=verbose, - input_mapper=input_mapper, - revision_id=revision_id, - dataset_version=dataset_version, - **kwargs, + try: + from langchain_core._api import suppress_langchain_beta_warning + except ImportError: + + @contextlib.contextmanager + def suppress_langchain_beta_warning(): + yield + + prompt_object = self.pull_prompt_commit( + prompt_identifier, include_model=include_model + ) + with suppress_langchain_beta_warning(): + prompt = loads(json.dumps(prompt_object.manifest)) + + if ( + isinstance(prompt, BasePromptTemplate) + or isinstance(prompt, RunnableSequence) + and isinstance(prompt.first, BasePromptTemplate) + ): + prompt_template = ( + prompt + if isinstance(prompt, BasePromptTemplate) + else ( + prompt.first + if isinstance(prompt, RunnableSequence) + and isinstance(prompt.first, BasePromptTemplate) + else None + ) + ) + if prompt_template is None: + raise ls_utils.LangSmithError( + "Prompt object is not a valid prompt template." + ) + + if prompt_template.metadata is None: + prompt_template.metadata = {} + prompt_template.metadata.update( + { + "lc_hub_owner": prompt_object.owner, + "lc_hub_repo": prompt_object.repo, + "lc_hub_commit_hash": prompt_object.commit_hash, + } + ) + + return prompt + + def push_prompt( + self, + prompt_identifier: str, + *, + object: Optional[Any] = None, + parent_commit_hash: str = "latest", + is_public: bool = False, + description: Optional[str] = None, + readme: Optional[str] = None, + tags: Optional[Sequence[str]] = None, + ) -> str: + """Push a prompt to the LangSmith API. + + Can be used to update prompt metadata or prompt content. + + If the prompt does not exist, it will be created. + If the prompt exists, it will be updated. + + Args: + prompt_identifier (str): The identifier of the prompt. + object (Optional[Any]): The LangChain object to push. + parent_commit_hash (str): The parent commit hash. + Defaults to "latest". + is_public (bool): Whether the prompt should be public. Defaults to False. + description (Optional[str]): A description of the prompt. + Defaults to an empty string. + readme (Optional[str]): A readme for the prompt. + Defaults to an empty string. + tags (Optional[Sequence[str]]): A list of tags for the prompt. + Defaults to an empty list. + + Returns: + str: The URL of the prompt. + + """ + # Create or update prompt metadata + if self._prompt_exists(prompt_identifier): + if any( + param is not None + for param in [parent_commit_hash, is_public, description, readme, tags] + ): + self.update_prompt( + prompt_identifier, + description=description, + readme=readme, + tags=tags, + is_public=is_public, + ) + else: + self.create_prompt( + prompt_identifier, + is_public=is_public, + description=description, + readme=readme, + tags=tags, + ) + + if object is None: + return self._get_prompt_url(prompt_identifier=prompt_identifier) + + # Create a commit with the new manifest + url = self.create_commit( + prompt_identifier, + object, + parent_commit_hash=parent_commit_hash, ) + return url def _tracing_thread_drain_queue( @@ -4697,3 +5641,83 @@ def _tracing_sub_thread_func( tracing_queue, limit=size_limit, block=False ): _tracing_thread_handle_batch(client, tracing_queue, next_batch) + + +def convert_prompt_to_openai_format( + messages: Any, + model_kwargs: Optional[Dict[str, Any]] = None, +) -> dict: + """Convert a prompt to OpenAI format. + + Requires the `langchain_openai` package to be installed. + + Args: + messages (Any): The messages to convert. + model_kwargs (Optional[Dict[str, Any]]): Model configuration arguments including + `stop` and any other required arguments. Defaults to None. + + Returns: + dict: The prompt in OpenAI format. + + Raises: + ImportError: If the `langchain_openai` package is not installed. + ls_utils.LangSmithError: If there is an error during the conversion process. + """ + try: + from langchain_openai import ChatOpenAI + except ImportError: + raise ImportError( + "The convert_prompt_to_openai_format function requires the langchain_openai" + "package to run.\nInstall with `pip install langchain_openai`" + ) + + openai = ChatOpenAI() + + model_kwargs = model_kwargs or {} + stop = model_kwargs.pop("stop", None) + + try: + return openai._get_request_payload(messages, stop=stop, **model_kwargs) + except Exception as e: + raise ls_utils.LangSmithError(f"Error converting to OpenAI format: {e}") + + +def convert_prompt_to_anthropic_format( + messages: Any, + model_kwargs: Optional[Dict[str, Any]] = None, +) -> dict: + """Convert a prompt to Anthropic format. + + Requires the `langchain_anthropic` package to be installed. + + Args: + messages (Any): The messages to convert. + model_kwargs (Optional[Dict[str, Any]]): + Model configuration arguments including `model_name` and `stop`. + Defaults to None. + + Returns: + dict: The prompt in Anthropic format. + """ + try: + from langchain_anthropic import ChatAnthropic + except ImportError: + raise ImportError( + "The convert_prompt_to_anthropic_format function requires the " + "langchain_anthropic package to run.\n" + "Install with `pip install langchain_anthropic`" + ) + + model_kwargs = model_kwargs or {} + model_name = model_kwargs.pop("model_name", "claude-3-haiku-20240307") + stop = model_kwargs.pop("stop", None) + timeout = model_kwargs.pop("timeout", None) + + anthropic = ChatAnthropic( + model_name=model_name, timeout=timeout, stop=stop, **model_kwargs + ) + + try: + return anthropic._get_request_payload(messages, stop=stop) + except Exception as e: + raise ls_utils.LangSmithError(f"Error converting to Anthropic format: {e}") diff --git a/python/langsmith/env/_runtime_env.py b/python/langsmith/env/_runtime_env.py index 7f25b3572..354f0eca1 100644 --- a/python/langsmith/env/_runtime_env.py +++ b/python/langsmith/env/_runtime_env.py @@ -176,6 +176,7 @@ def get_langchain_env_var_metadata() -> dict: "LANGCHAIN_TRACING_V2", "LANGCHAIN_PROJECT", "LANGCHAIN_SESSION", + "LANGSMITH_RUNS_ENDPOINTS", } langchain_metadata = { k: v diff --git a/python/langsmith/evaluation/__init__.py b/python/langsmith/evaluation/__init__.py index 253732cfc..244f9a7d8 100644 --- a/python/langsmith/evaluation/__init__.py +++ b/python/langsmith/evaluation/__init__.py @@ -24,7 +24,6 @@ def __getattr__(name: str) -> Any: - # TODO: Use importlib if name == "evaluate": from langsmith.evaluation._runner import evaluate diff --git a/python/langsmith/evaluation/_arunner.py b/python/langsmith/evaluation/_arunner.py index b5e1cc2ed..a1055e64d 100644 --- a/python/langsmith/evaluation/_arunner.py +++ b/python/langsmith/evaluation/_arunner.py @@ -3,6 +3,7 @@ from __future__ import annotations import asyncio +import concurrent.futures as cf import datetime import logging import pathlib @@ -25,9 +26,9 @@ import langsmith from langsmith import run_helpers as rh from langsmith import run_trees, schemas +from langsmith import run_trees as rt from langsmith import utils as ls_utils from langsmith._internal import _aiter as aitertools -from langsmith.beta import warn_beta from langsmith.evaluation._runner import ( AEVALUATOR_T, DATA_T, @@ -36,6 +37,7 @@ ExperimentResultRow, _ExperimentManagerMixin, _ForwardResults, + _load_examples_map, _load_experiment, _load_tqdm, _load_traces, @@ -51,7 +53,6 @@ ATARGET_T = Callable[[dict], Awaitable[dict]] -@warn_beta async def aevaluate( target: Union[ATARGET_T, AsyncIterable[dict]], /, @@ -65,6 +66,7 @@ async def aevaluate( num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, + experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]] = None, ) -> AsyncExperimentResults: r"""Evaluate an async target system or function on a given dataset. @@ -90,6 +92,9 @@ async def aevaluate( Defaults to None. blocking (bool): Whether to block until the evaluation is complete. Defaults to True. + experiment (Optional[schemas.TracerSession]): An existing experiment to + extend. If provided, experiment_prefix is ignored. For advanced + usage only. Returns: AsyncIterator[ExperimentResultRow]: An async iterator over the experiment results. @@ -102,11 +107,10 @@ async def aevaluate( Examples: >>> from typing import Sequence - >>> from langsmith import Client - >>> from langsmith.evaluation import evaluate + >>> from langsmith import Client, aevaluate >>> from langsmith.schemas import Example, Run >>> client = Client() - >>> client.clone_public_dataset( + >>> dataset = client.clone_public_dataset( ... "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" ... ) >>> dataset_name = "Evaluate Examples" @@ -206,7 +210,7 @@ async def aevaluate( >>> async def helpfulness(run: Run, example: Example): ... # Row-level evaluator for helpfulness. - ... await asyncio.sleep(0.1) # Replace with your LLM API call + ... await asyncio.sleep(5) # Replace with your LLM API call ... return {"score": run.outputs["output"] == "Yes"} >>> results = asyncio.run( @@ -221,6 +225,12 @@ async def aevaluate( ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... """ # noqa: E501 + if experiment and experiment_prefix: + raise ValueError( + "Expected at most one of 'experiment' or 'experiment_prefix'," + " but both were provided. " + f"Got: experiment={experiment}, experiment_prefix={experiment_prefix}" + ) return await _aevaluate( target, data=data, @@ -233,12 +243,12 @@ async def aevaluate( num_repetitions=num_repetitions, client=client, blocking=blocking, + experiment=experiment, ) -@warn_beta async def aevaluate_existing( - experiment: Union[str, uuid.UUID], + experiment: Union[str, uuid.UUID, schemas.TracerSession], /, evaluators: Optional[Sequence[Union[EVALUATOR_T, AEVALUATOR_T]]] = None, summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None, @@ -287,7 +297,7 @@ async def aevaluate_existing( Load the experiment and run the evaluation. - >>> from langsmith.evaluation import aevaluate, aevaluate_existing + >>> from langsmith import aevaluate, aevaluate_existing >>> dataset_name = "Evaluate Examples" >>> async def apredict(inputs: dict) -> dict: ... # This can be any async function or just an API call to your app. @@ -315,18 +325,17 @@ async def aevaluate_existing( """ # noqa: E501 - client = client or langsmith.Client() - project = _load_experiment(experiment, client) - runs = _load_traces(experiment, client, load_nested=load_nested) - data = [ - example - for example in client.list_examples( - dataset_id=project.reference_dataset_id, - as_of=project.metadata.get("dataset_version"), - ) - ] - runs = sorted(runs, key=lambda r: str(r.reference_example_id)) - data = sorted(data, key=lambda d: str(d.id)) + client = client or run_trees.get_cached_client() + project = ( + experiment + if isinstance(experiment, schemas.TracerSession) + else (await aitertools.aio_to_thread(_load_experiment, experiment, client)) + ) + runs = await aitertools.aio_to_thread( + _load_traces, experiment, client, load_nested=load_nested + ) + data_map = await aitertools.aio_to_thread(_load_examples_map, client, project) + data = [data_map[run.reference_example_id] for run in runs] return await _aevaluate( runs, data=data, @@ -336,6 +345,7 @@ async def aevaluate_existing( max_concurrency=max_concurrency, client=client, blocking=blocking, + experiment=project, ) @@ -352,14 +362,15 @@ async def _aevaluate( num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, - experiment: Optional[schemas.TracerSession] = None, + experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]] = None, ) -> AsyncExperimentResults: is_async_target = asyncio.iscoroutinefunction(target) or ( hasattr(target, "__aiter__") and asyncio.iscoroutine(target.__aiter__()) ) - client = client or langsmith.Client() + client = client or rt.get_cached_client() runs = None if is_async_target else cast(Iterable[schemas.Run], target) - experiment_, runs = _resolve_experiment( + experiment_, runs = await aitertools.aio_to_thread( + _resolve_experiment, experiment, runs, client, @@ -596,7 +607,7 @@ async def predict_all(): ) async for result in aitertools.aiter_with_concurrency( - max_concurrency, predict_all() + max_concurrency, predict_all(), _eager_consumption_timeout=0.001 ): yield result @@ -607,20 +618,25 @@ async def _ascore( evaluators: Sequence[RunEvaluator], max_concurrency: Optional[int] = None, ) -> AsyncIterator[ExperimentResultRow]: - async def score_all(): - async for current_results in self.aget_results(): - # Yield the coroutine to be awaited later in aiter_with_concurrency - yield self._arun_evaluators(evaluators, current_results) + with cf.ThreadPoolExecutor(max_workers=4) as executor: - async for result in aitertools.aiter_with_concurrency( - max_concurrency, score_all() - ): - yield result + async def score_all(): + async for current_results in self.aget_results(): + # Yield the coroutine to be awaited later in aiter_with_concurrency + yield self._arun_evaluators( + evaluators, current_results, executor=executor + ) + + async for result in aitertools.aiter_with_concurrency( + max_concurrency, score_all(), _eager_consumption_timeout=0.001 + ): + yield result async def _arun_evaluators( self, evaluators: Sequence[RunEvaluator], current_results: ExperimentResultRow, + executor: cf.ThreadPoolExecutor, ) -> ExperimentResultRow: current_context = rh.get_tracing_context() metadata = { @@ -628,7 +644,13 @@ async def _arun_evaluators( **{"experiment": self.experiment_name}, } with rh.tracing_context( - **{**current_context, "project_name": "evaluators", "metadata": metadata} + **{ + **current_context, + "project_name": "evaluators", + "metadata": metadata, + "enabled": True, + "client": self.client, + } ): run = current_results["run"] example = current_results["example"] @@ -641,8 +663,7 @@ async def _arun_evaluators( ) eval_results["results"].extend( self.client._log_evaluation_feedback( - evaluator_response, - run=run, + evaluator_response, run=run, _executor=executor ) ) except Exception as e: @@ -682,11 +703,12 @@ async def _aapply_summary_evaluators( **current_context, "project_name": "evaluators", "metadata": metadata, + "enabled": True, + "client": self.client, } ): for evaluator in summary_evaluators: try: - # TODO: Support async evaluators summary_eval_result = evaluator(runs, examples) flattened_results = self.client._select_eval_results( summary_eval_result, @@ -696,7 +718,8 @@ async def _aapply_summary_evaluators( for result in flattened_results: feedback = result.dict(exclude={"target_run_id"}) evaluator_info = feedback.pop("evaluator_info", None) - self.client.create_feedback( + await aitertools.aio_to_thread( + self.client.create_feedback, **feedback, run_id=None, project_id=project_id, @@ -769,6 +792,10 @@ def __aiter__(self) -> AsyncIterator[ExperimentResultRow]: return self async def __anext__(self) -> ExperimentResultRow: + async def _wait_until_index(index: int) -> None: + while self._processed_count < index: + await asyncio.sleep(0.05) + while True: async with self._lock: if self._processed_count < len(self._results): @@ -777,8 +804,9 @@ async def __anext__(self) -> ExperimentResultRow: return result elif self._task.done(): raise StopAsyncIteration + await asyncio.shield( - asyncio.wait([self._task], return_when=asyncio.FIRST_COMPLETED) + asyncio.wait_for(_wait_until_index(len(self._results)), timeout=None) ) async def _process_data(self, manager: _AsyncExperimentManager) -> None: @@ -813,30 +841,31 @@ def _get_run(r: run_trees.RunTree) -> None: nonlocal run run = r - try: - await fn( - example.inputs, - langsmith_extra=rh.LangSmithExtra( - reference_example_id=example.id, - on_end=_get_run, - project_name=experiment_name, - metadata={ - **metadata, - "example_version": ( - example.modified_at.isoformat() - if example.modified_at - else example.created_at.isoformat() - ), - }, - client=client, - ), + with rh.tracing_context(enabled=True): + try: + await fn( + example.inputs, + langsmith_extra=rh.LangSmithExtra( + reference_example_id=example.id, + on_end=_get_run, + project_name=experiment_name, + metadata={ + **metadata, + "example_version": ( + example.modified_at.isoformat() + if example.modified_at + else example.created_at.isoformat() + ), + }, + client=client, + ), + ) + except Exception as e: + logger.error(f"Error running target function: {e}") + return _ForwardResults( + run=cast(schemas.Run, run), + example=example, ) - except Exception as e: - logger.error(f"Error running target function: {e}") - return _ForwardResults( - run=cast(schemas.Run, run), - example=example, - ) def _ensure_async_traceable( diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index f5cc1ae4c..a040ea7a3 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -9,6 +9,7 @@ import itertools import logging import pathlib +import queue import random import threading import uuid @@ -30,16 +31,17 @@ cast, ) -from requests import HTTPError from typing_extensions import TypedDict import langsmith from langsmith import env as ls_env from langsmith import run_helpers as rh -from langsmith import run_trees, schemas +from langsmith import run_trees as rt +from langsmith import schemas from langsmith import utils as ls_utils from langsmith.evaluation.evaluator import ( ComparisonEvaluationResult, + DynamicComparisonRunEvaluator, EvaluationResult, EvaluationResults, RunEvaluator, @@ -55,14 +57,23 @@ DATA_T = Union[str, uuid.UUID, Iterable[schemas.Example]] # Summary evaluator runs over the whole dataset # and reports aggregate metric(s) -SUMMARY_EVALUATOR_T = Callable[ - [Sequence[schemas.Run], Sequence[schemas.Example]], - Union[EvaluationResult, EvaluationResults], +SUMMARY_EVALUATOR_T = Union[ + Callable[ + [Sequence[schemas.Run], Sequence[schemas.Example]], + Union[EvaluationResult, EvaluationResults], + ], + Callable[ + [List[schemas.Run], List[schemas.Example]], + Union[EvaluationResult, EvaluationResults], + ], ] # Row-level evaluator EVALUATOR_T = Union[ RunEvaluator, - Callable[[schemas.Run, Optional[schemas.Example]], EvaluationResult], + Callable[ + [schemas.Run, Optional[schemas.Example]], + Union[EvaluationResult, EvaluationResults], + ], ] AEVALUATOR_T = Union[ Callable[ @@ -85,6 +96,7 @@ def evaluate( num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, + experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]] = None, ) -> ExperimentResults: r"""Evaluate a target system or function on a given dataset. @@ -102,7 +114,7 @@ def evaluate( Defaults to None. description (Optional[str]): A free-form text description for the experiment. max_concurrency (Optional[int]): The maximum number of concurrent - evaluations to run. Defaults to None. + evaluations to run. Defaults to None (max number of workers). client (Optional[langsmith.Client]): The LangSmith client to use. Defaults to None. blocking (bool): Whether to block until the evaluation is complete. @@ -110,6 +122,9 @@ def evaluate( num_repetitions (int): The number of times to run the evaluation. Each item in the dataset will be run and evaluated this many times. Defaults to 1. + experiment (Optional[schemas.TracerSession]): An existing experiment to + extend. If provided, experiment_prefix is ignored. For advanced + usage only. Returns: ExperimentResults: The results of the evaluation. @@ -122,7 +137,7 @@ def evaluate( >>> from langsmith.evaluation import evaluate >>> from langsmith.schemas import Example, Run >>> client = Client() - >>> client.clone_public_dataset( + >>> dataset = client.clone_public_dataset( ... "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" ... ) >>> dataset_name = "Evaluate Examples" @@ -190,6 +205,7 @@ def evaluate( Using the `evaluate` API with an off-the-shelf LangChain evaluator: >>> from langsmith.evaluation import LangChainStringEvaluator + >>> from langchain_openai import ChatOpenAI >>> def prepare_criteria_data(run: Run, example: Example): ... return { ... "prediction": run.outputs["output"], @@ -209,6 +225,7 @@ def evaluate( ... "usefulness": "The prediction is useful if it is correct" ... " and/or asks a useful followup question." ... }, + ... "llm": ChatOpenAI(model="gpt-4o"), ... }, ... prepare_data=prepare_criteria_data, ... ), @@ -236,6 +253,12 @@ def evaluate( ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... """ # noqa: E501 + if experiment and experiment_prefix: + raise ValueError( + "Expected at most one of 'experiment' or 'experiment_prefix'," + " but both were provided. " + f"Got: experiment={experiment}, experiment_prefix={experiment_prefix}" + ) return _evaluate( target, data=data, @@ -248,11 +271,12 @@ def evaluate( num_repetitions=num_repetitions, client=client, blocking=blocking, + experiment=experiment, ) def evaluate_existing( - experiment: Union[str, uuid.UUID], + experiment: Union[str, uuid.UUID, schemas.TracerSession], /, evaluators: Optional[Sequence[EVALUATOR_T]] = None, summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None, @@ -323,17 +347,15 @@ def evaluate_existing( ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... """ # noqa: E501 - client = client or langsmith.Client() - project = _load_experiment(experiment, client) - runs = _load_traces(experiment, client, load_nested=load_nested) - data = list( - client.list_examples( - dataset_id=project.reference_dataset_id, - as_of=project.metadata.get("dataset_version"), - ) + client = client or rt.get_cached_client(timeout_ms=(20_000, 90_001)) + project = ( + experiment + if isinstance(experiment, schemas.TracerSession) + else _load_experiment(experiment, client) ) - runs = sorted(runs, key=lambda r: str(r.reference_example_id)) - data = sorted(data, key=lambda d: str(d.id)) + runs = _load_traces(experiment, client, load_nested=load_nested) + data_map = _load_examples_map(client, project) + data = [data_map[cast(uuid.UUID, run.reference_example_id)] for run in runs] return _evaluate( runs, data=data, @@ -343,6 +365,7 @@ def evaluate_existing( max_concurrency=max_concurrency, client=client, blocking=blocking, + experiment=project, ) @@ -364,41 +387,51 @@ class ExperimentResults: wait() -> None: Waits for the experiment data to be processed. """ - def __init__( - self, - experiment_manager: _ExperimentManager, - ): + def __init__(self, experiment_manager: _ExperimentManager, blocking: bool = True): self._manager = experiment_manager self._results: List[ExperimentResultRow] = [] - self._lock = threading.RLock() - self._thread = threading.Thread( - target=lambda: self._process_data(self._manager) - ) - self._thread.start() + self._queue: queue.Queue[ExperimentResultRow] = queue.Queue() + self._processing_complete = threading.Event() + if not blocking: + self._thread: Optional[threading.Thread] = threading.Thread( + target=self._process_data + ) + self._thread.start() + else: + self._thread = None + self._process_data() @property def experiment_name(self) -> str: return self._manager.experiment_name def __iter__(self) -> Iterator[ExperimentResultRow]: - processed_count = 0 - while True: - with self._lock: - if processed_count < len(self._results): - yield self._results[processed_count] - processed_count += 1 - elif not self._thread.is_alive(): - break - - def _process_data(self, manager: _ExperimentManager) -> None: + ix = 0 + while ( + not self._processing_complete.is_set() + or not self._queue.empty() + or ix < len(self._results) + ): + try: + if ix < len(self._results): + yield self._results[ix] + ix += 1 + else: + self._queue.get(block=True, timeout=0.1) + except queue.Empty: + continue + + def _process_data(self) -> None: tqdm = _load_tqdm() - results = manager.get_results() + results = self._manager.get_results() for item in tqdm(results): - with self._lock: - self._results.append(item) - summary_scores = manager.get_summary_scores() - with self._lock: - self._summary_results = summary_scores + self._queue.put(item) + self._results.append(item) + + summary_scores = self._manager.get_summary_scores() + self._summary_results = summary_scores + + self._processing_complete.set() def __len__(self) -> int: return len(self._results) @@ -412,7 +445,8 @@ def wait(self) -> None: This method blocks the current thread until the evaluation runner has finished its execution. """ - self._thread.join() + if self._thread: + self._thread.join() ## Public API for Comparison Experiments @@ -475,7 +509,7 @@ def evaluate_comparative( >>> from langsmith.evaluation import evaluate >>> from langsmith.schemas import Example, Run >>> client = Client() - >>> client.clone_public_dataset( + >>> dataset = client.clone_public_dataset( ... "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" ... ) >>> dataset_name = "Evaluate Examples" @@ -524,7 +558,10 @@ def evaluate_comparative( Finally, you would compare the two prompts directly: >>> import json >>> from langsmith.evaluation import evaluate_comparative - >>> def score_preferences(runs: list, example): + >>> def score_preferences(runs: list, example: schemas.Example): + ... assert len(runs) == 2 # Comparing 2 systems + ... assert isinstance(example, schemas.Example) + ... assert all(run.reference_example_id == example.id for run in runs) ... pred_a = runs[0].outputs["output"] ... pred_b = runs[1].outputs["output"] ... ground_truth = example.outputs["answer"] @@ -581,12 +618,40 @@ def evaluate_comparative( ... "key": "ranked_preference", ... "scores": {runs[0].id: 0, runs[1].id: 1}, ... } + >>> def score_length_difference(runs: list, example: schemas.Example): + ... # Just return whichever response is longer. + ... # Just an example, not actually useful in real life. + ... assert len(runs) == 2 # Comparing 2 systems + ... assert isinstance(example, schemas.Example) + ... assert all(run.reference_example_id == example.id for run in runs) + ... pred_a = runs[0].outputs["output"] + ... pred_b = runs[1].outputs["output"] + ... if len(pred_a) > len(pred_b): + ... return { + ... "key": "length_difference", + ... "scores": {runs[0].id: 1, runs[1].id: 0}, + ... } + ... else: + ... return { + ... "key": "length_difference", + ... "scores": {runs[0].id: 0, runs[1].id: 1}, + ... } >>> results = evaluate_comparative( ... [results_1.experiment_name, results_2.experiment_name], - ... evaluators=[score_preferences], + ... evaluators=[score_preferences, score_length_difference], ... client=client, ... ) # doctest: +ELLIPSIS View the pairwise evaluation results at:... + >>> eval_results = list(results) + >>> assert len(eval_results) >= 10 + >>> assert all( + ... "feedback.ranked_preference" in r["evaluation_results"] + ... for r in eval_results + ... ) + >>> assert all( + ... "feedback.length_difference" in r["evaluation_results"] + ... for r in eval_results + ... ) """ # noqa: E501 if len(experiments) < 2: raise ValueError("Comparative evaluation requires at least 2 experiments.") @@ -596,7 +661,7 @@ def evaluate_comparative( ) if max_concurrency < 0: raise ValueError("max_concurrency must be a positive integer.") - client = client or langsmith.Client() + client = client or rt.get_cached_client() # TODO: Add information about comparison experiments projects = [_load_experiment(experiment, client) for experiment in experiments] @@ -664,14 +729,18 @@ def evaluate_comparative( results: dict = {} def evaluate_and_submit_feedback( - runs_list: list[schemas.Run], example: schemas.Example, executor: cf.Executor + runs_list: list[schemas.Run], + example: schemas.Example, + comparator: DynamicComparisonRunEvaluator, + executor: cf.Executor, ) -> ComparisonEvaluationResult: feedback_group_id = uuid.uuid4() if randomize_order: random.shuffle(runs_list) - result = comparator.compare_runs(runs_list, example) - if client is None: - raise ValueError("Client is required to submit feedback.") + with rh.tracing_context(project_name="evaluators", client=client): + result = comparator.compare_runs(runs_list, example) + if client is None: + raise ValueError("Client is required to submit feedback.") for run_id, score in result.scores.items(): executor.submit( client.create_feedback, @@ -685,7 +754,9 @@ def evaluate_and_submit_feedback( return result tqdm = _load_tqdm() - with cf.ThreadPoolExecutor(max_workers=max_concurrency or 1) as executor: + with ls_utils.ContextThreadPoolExecutor( + max_workers=max_concurrency or 1 + ) as executor: futures = [] for example_id, runs_list in tqdm(runs_dict.items()): results[example_id] = { @@ -697,12 +768,13 @@ def evaluate_and_submit_feedback( evaluate_and_submit_feedback, runs_list, data[example_id], + comparator, executor, ) futures.append(future) else: result = evaluate_and_submit_feedback( - runs_list, data[example_id], executor + runs_list, data[example_id], comparator, executor ) results[example_id][f"feedback.{result.key}"] = result if futures: @@ -711,7 +783,7 @@ def evaluate_and_submit_feedback( result = future.result() results[example_id][f"feedback.{result.key}"] = result - return ComparativeExperimentResults(results) + return ComparativeExperimentResults(results, data) class ComparativeExperimentResults: @@ -729,13 +801,22 @@ class ComparativeExperimentResults: def __init__( self, results: dict, + examples: Optional[Dict[uuid.UUID, schemas.Example]] = None, ): self._results = results + self._examples = examples def __getitem__(self, key): """Return the result associated with the given key.""" return self._results[key] + def __iter__(self): + for key, value in self._results.items(): + yield { + "example": self._examples[key] if self._examples else None, + "evaluation_results": value, + } + ## Private API @@ -776,10 +857,10 @@ def _evaluate( num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, - experiment: Optional[schemas.TracerSession] = None, + experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]] = None, ) -> ExperimentResults: # Initialize the experiment manager. - client = client or langsmith.Client() + client = client or rt.get_cached_client() runs = None if _is_callable(target) else cast(Iterable[schemas.Run], target) experiment_, runs = _resolve_experiment( experiment, @@ -817,10 +898,7 @@ def _evaluate( # Apply the experiment-level summary evaluators. manager = manager.with_summary_evaluators(summary_evaluators) # Start consuming the results. - results = ExperimentResults(manager) - if blocking: - # Wait for the evaluation to complete. - results.wait() + results = ExperimentResults(manager, blocking=blocking) return results @@ -841,14 +919,18 @@ def _load_experiment( def _load_traces( - project: Union[str, uuid.UUID], client: langsmith.Client, load_nested: bool = False + project: Union[str, uuid.UUID, schemas.TracerSession], + client: langsmith.Client, + load_nested: bool = False, ) -> List[schemas.Run]: """Load nested traces for a given project.""" - execution_order = None if load_nested else 1 - if isinstance(project, uuid.UUID) or _is_uuid(project): - runs = client.list_runs(project_id=project, execution_order=execution_order) + is_root = None if load_nested else True + if isinstance(project, schemas.TracerSession): + runs = client.list_runs(project_id=project.id, is_root=is_root) + elif isinstance(project, uuid.UUID) or _is_uuid(project): + runs = client.list_runs(project_id=project, is_root=is_root) else: - runs = client.list_runs(project_name=project, execution_order=execution_order) + runs = client.list_runs(project_name=project, is_root=is_root) if not load_nested: return list(runs) @@ -866,6 +948,18 @@ def _load_traces( return results +def _load_examples_map( + client: langsmith.Client, project: schemas.TracerSession +) -> Dict[uuid.UUID, schemas.Example]: + return { + e.id: e + for e in client.list_examples( + dataset_id=project.reference_dataset_id, + as_of=project.metadata.get("dataset_version"), + ) + } + + IT = TypeVar("IT") @@ -889,7 +983,7 @@ def __init__( client: Optional[langsmith.Client] = None, description: Optional[str] = None, ): - self.client = client or langsmith.Client() + self.client = client or rt.get_cached_client() self._experiment: Optional[schemas.TracerSession] = None if experiment is None: self._experiment_name = _get_random_name() @@ -938,24 +1032,33 @@ def _get_experiment_metadata(self): } return project_metadata - def _get_project(self, first_example: schemas.Example) -> schemas.TracerSession: - if self._experiment is None: + def _create_experiment( + self, dataset_id: uuid.UUID, metadata: dict + ) -> schemas.TracerSession: + # There is a chance of name collision, so we'll retry + starting_name = self._experiment_name + num_attempts = 10 + for _ in range(num_attempts): try: - project_metadata = self._get_experiment_metadata() - project = self.client.create_project( - self.experiment_name, + return self.client.create_project( + self._experiment_name, description=self._description, - reference_dataset_id=first_example.dataset_id, - metadata=project_metadata, - ) - except (HTTPError, ValueError, ls_utils.LangSmithError) as e: - if "already exists " not in str(e): - raise e - raise ValueError( - # TODO: Better error - f"Experiment {self.experiment_name} already exists." - " Please use a different name." + reference_dataset_id=dataset_id, + metadata=metadata, ) + except ls_utils.LangSmithConflictError: + self._experiment_name = f"{starting_name}-{str(uuid.uuid4().hex[:6])}" + raise ValueError( + f"Could not find a unique experiment name in {num_attempts} attempts." + " Please try again with a different experiment name." + ) + + def _get_project(self, first_example: schemas.Example) -> schemas.TracerSession: + if self._experiment is None: + project_metadata = self._get_experiment_metadata() + project = self._create_experiment( + first_example.dataset_id, project_metadata + ) else: project = self._experiment return project @@ -978,7 +1081,9 @@ def _print_experiment_start( ) else: # HACKHACK - print("Starting evaluation of experiment: %s", self.experiment_name) + print( # noqa: T201 + "Starting evaluation of experiment: %s", self.experiment_name + ) class _ExperimentManager(_ExperimentManagerMixin): @@ -1056,7 +1161,7 @@ def dataset_id(self) -> str: @property def evaluation_results(self) -> Iterable[EvaluationResults]: if self._evaluation_results is None: - return [{"results": []} for _ in self.examples] + return ({"results": []} for _ in self.examples) return self._evaluation_results @property @@ -1191,7 +1296,7 @@ def _predict( ) else: - with cf.ThreadPoolExecutor(max_concurrency) as executor: + with ls_utils.ContextThreadPoolExecutor(max_concurrency) as executor: futures = [ executor.submit( _forward, @@ -1212,6 +1317,7 @@ def _run_evaluators( self, evaluators: Sequence[RunEvaluator], current_results: ExperimentResultRow, + executor: cf.ThreadPoolExecutor, ) -> ExperimentResultRow: current_context = rh.get_tracing_context() metadata = { @@ -1223,7 +1329,13 @@ def _run_evaluators( }, } with rh.tracing_context( - **{**current_context, "project_name": "evaluators", "metadata": metadata} + **{ + **current_context, + "project_name": "evaluators", + "metadata": metadata, + "enabled": True, + "client": self.client, + } ): run = current_results["run"] example = current_results["example"] @@ -1237,8 +1349,7 @@ def _run_evaluators( eval_results["results"].extend( # TODO: This is a hack self.client._log_evaluation_feedback( - evaluator_response, - run=run, + evaluator_response, run=run, _executor=executor ) ) except Exception as e: @@ -1263,20 +1374,37 @@ def _score( Expects runs to be available in the manager. (e.g. from a previous prediction step) """ - if max_concurrency == 0: - for current_results in self.get_results(): - yield self._run_evaluators(evaluators, current_results) - else: - with cf.ThreadPoolExecutor(max_workers=max_concurrency) as executor: - futures = [] + with ls_utils.ContextThreadPoolExecutor( + max_workers=max_concurrency + ) as executor: + if max_concurrency == 0: + context = copy_context() for current_results in self.get_results(): - futures.append( + yield context.run( + self._run_evaluators, + evaluators, + current_results, + executor=executor, + ) + else: + futures = set() + for current_results in self.get_results(): + futures.add( executor.submit( self._run_evaluators, evaluators, current_results, + executor=executor, ) ) + try: + # Since prediction may be slow, yield (with a timeout) to + # allow for early results to be emitted. + for future in cf.as_completed(futures, timeout=0.001): + yield future.result() + futures.remove(future) + except (cf.TimeoutError, TimeoutError): + pass for future in cf.as_completed(futures): result = future.result() yield result @@ -1289,7 +1417,7 @@ def _apply_summary_evaluators( runs.append(run) examples.append(example) aggregate_feedback = [] - with cf.ThreadPoolExecutor() as executor: + with ls_utils.ContextThreadPoolExecutor() as executor: project_id = self._get_experiment().id current_context = rh.get_tracing_context() metadata = { @@ -1304,6 +1432,8 @@ def _apply_summary_evaluators( **current_context, "project_name": "evaluators", "metadata": metadata, + "client": self.client, + "enabled": True, } ): for evaluator in summary_evaluators: @@ -1399,7 +1529,7 @@ def _wrapper_inner( def _wrapper_super_inner( runs_: str, examples_: str ) -> Union[EvaluationResult, EvaluationResults]: - return evaluator(runs, examples) + return evaluator(list(runs), list(examples)) return _wrapper_super_inner( f"Runs[] (Length={len(runs)})", f"Examples[] (Length={len(examples)})" @@ -1427,34 +1557,35 @@ def _forward( ) -> _ForwardResults: run: Optional[schemas.RunBase] = None - def _get_run(r: run_trees.RunTree) -> None: + def _get_run(r: rt.RunTree) -> None: nonlocal run run = r - try: - fn( - example.inputs, - langsmith_extra=rh.LangSmithExtra( - reference_example_id=example.id, - on_end=_get_run, - project_name=experiment_name, - metadata={ - **metadata, - "example_version": ( - example.modified_at.isoformat() - if example.modified_at - else example.created_at.isoformat() - ), - }, - client=client, - ), + with rh.tracing_context(enabled=True): + try: + fn( + example.inputs, + langsmith_extra=rh.LangSmithExtra( + reference_example_id=example.id, + on_end=_get_run, + project_name=experiment_name, + metadata={ + **metadata, + "example_version": ( + example.modified_at.isoformat() + if example.modified_at + else example.created_at.isoformat() + ), + }, + client=client, + ), + ) + except Exception as e: + logger.error(f"Error running target function: {e}") + return _ForwardResults( + run=cast(schemas.Run, run), + example=example, ) - except Exception as e: - logger.error(f"Error running target function: {e}") - return _ForwardResults( - run=cast(schemas.Run, run), - example=example, - ) def _resolve_data( @@ -1482,7 +1613,7 @@ def _ensure_traceable( def _resolve_experiment( - experiment: Optional[schemas.TracerSession], + experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]], runs: Optional[Iterable[schemas.Run]], client: langsmith.Client, ) -> Tuple[ @@ -1490,18 +1621,28 @@ def _resolve_experiment( ]: # TODO: Remove this, handle outside the manager if experiment is not None: - if not experiment.name: + if isinstance(experiment, schemas.TracerSession): + experiment_ = experiment + else: + experiment_ = _load_experiment(experiment, client) + + if not experiment_.name: raise ValueError("Experiment name must be defined if provided.") - return experiment, None + if not experiment_.reference_dataset_id: + raise ValueError( + "Experiment must have an associated reference_dataset_id, " + "but none was provided." + ) + return experiment_, runs # If we have runs, that means the experiment was already started. if runs is not None: if runs is not None: runs_, runs = itertools.tee(runs) first_run = next(runs_) - experiment = client.read_project(project_id=first_run.session_id) - if not experiment.name: + experiment_ = client.read_project(project_id=first_run.session_id) + if not experiment_.name: raise ValueError("Experiment name not found for provided runs.") - return experiment, runs + return experiment_, runs return None, None diff --git a/python/langsmith/evaluation/evaluator.py b/python/langsmith/evaluation/evaluator.py index 47797e646..7e3e748ba 100644 --- a/python/langsmith/evaluation/evaluator.py +++ b/python/langsmith/evaluation/evaluator.py @@ -20,14 +20,27 @@ from typing_extensions import TypedDict try: - from pydantic.v1 import BaseModel, Field, ValidationError # type: ignore[import] + from pydantic.v1 import ( # type: ignore[import] + BaseModel, + Field, + ValidationError, + validator, + ) except ImportError: - from pydantic import BaseModel, Field, ValidationError # type: ignore[assignment] - + from pydantic import ( # type: ignore[assignment] + BaseModel, + Field, + ValidationError, + validator, + ) + +import logging from functools import wraps from langsmith.schemas import SCORE_TYPE, VALUE_TYPE, Example, Run +logger = logging.getLogger(__name__) + class Category(TypedDict): """A category for categorical feedback.""" @@ -83,6 +96,20 @@ class Config: allow_extra = False + @validator("value", pre=True) + def check_value_non_numeric(cls, v, values): + """Check that the value is not numeric.""" + # If a score isn't provided and the value is numeric + # it's more likely the user intended use the score field + if "score" not in values or values["score"] is None: + if isinstance(v, (int, float)): + logger.warning( + "Numeric values should be provided in" + " the 'score' field, not 'value'." + f" Got: {v}" + ) + return v + class EvaluationResults(TypedDict, total=False): """Batch evaluation results. @@ -169,7 +196,9 @@ def __init__( from langsmith import run_helpers # type: ignore if afunc is not None: - self.afunc = run_helpers.ensure_traceable(afunc) + self.afunc = run_helpers.ensure_traceable( + afunc, process_inputs=_serialize_inputs + ) self._name = getattr(afunc, "__name__", "DynamicRunEvaluator") if inspect.iscoroutinefunction(func): if afunc is not None: @@ -178,11 +207,14 @@ def __init__( "also provided. If providing both, func should be a regular " "function to avoid ambiguity." ) - self.afunc = run_helpers.ensure_traceable(func) + self.afunc = run_helpers.ensure_traceable( + func, process_inputs=_serialize_inputs + ) self._name = getattr(func, "__name__", "DynamicRunEvaluator") else: self.func = run_helpers.ensure_traceable( - cast(Callable[[Run, Optional[Example]], _RUNNABLE_OUTPUT], func) + cast(Callable[[Run, Optional[Example]], _RUNNABLE_OUTPUT], func), + process_inputs=_serialize_inputs, ) self._name = getattr(func, "__name__", "DynamicRunEvaluator") @@ -197,9 +229,19 @@ def _coerce_evaluation_result( result.source_run_id = source_run_id return result try: + if not result: + raise ValueError( + "Expected an EvaluationResult object, or dict with a metric" + f" 'key' and optional 'score'; got empty result: {result}" + ) if "key" not in result: if allow_no_key: result["key"] = self._name + if all(k not in result for k in ("score", "value", "comment")): + raise ValueError( + "Expected an EvaluationResult object, or dict with a metric" + f" 'key' and optional 'score' or categorical 'value'; got {result}" + ) return EvaluationResult(**{"source_run_id": source_run_id, **result}) except ValidationError as e: raise ValueError( @@ -233,10 +275,17 @@ def _format_result( if not result.source_run_id: result.source_run_id = source_run_id return result + if not result: + raise ValueError( + "Expected an EvaluationResult or EvaluationResults object, or a" + " dict with key and one of score or value, EvaluationResults," + f" got {result}" + ) if not isinstance(result, dict): raise ValueError( f"Expected a dict, EvaluationResult, or EvaluationResults, got {result}" ) + return self._coerce_evaluation_results(result, source_run_id) @property @@ -328,7 +377,7 @@ def __call__( def __repr__(self) -> str: """Represent the DynamicRunEvaluator object.""" - return f"" + return f"" def run_evaluator( @@ -343,6 +392,22 @@ def run_evaluator( return DynamicRunEvaluator(func) +_MAXSIZE = 10_000 + + +def _maxsize_repr(obj: Any): + s = repr(obj) + if len(s) > _MAXSIZE: + s = s[: _MAXSIZE - 4] + "...)" + return s + + +def _serialize_inputs(inputs: dict) -> dict: + run_truncated = _maxsize_repr(inputs.get("run")) + example_truncated = _maxsize_repr(inputs.get("example")) + return {"run": run_truncated, "example": example_truncated} + + class DynamicComparisonRunEvaluator: """Compare predictions (as traces) from 2 or more runs.""" @@ -370,7 +435,9 @@ def __init__( from langsmith import run_helpers # type: ignore if afunc is not None: - self.afunc = run_helpers.ensure_traceable(afunc) + self.afunc = run_helpers.ensure_traceable( + afunc, process_inputs=_serialize_inputs + ) self._name = getattr(afunc, "__name__", "DynamicRunEvaluator") if inspect.iscoroutinefunction(func): if afunc is not None: @@ -379,7 +446,9 @@ def __init__( "also provided. If providing both, func should be a regular " "function to avoid ambiguity." ) - self.afunc = run_helpers.ensure_traceable(func) + self.afunc = run_helpers.ensure_traceable( + func, process_inputs=_serialize_inputs + ) self._name = getattr(func, "__name__", "DynamicRunEvaluator") else: self.func = run_helpers.ensure_traceable( @@ -389,7 +458,8 @@ def __init__( _COMPARISON_OUTPUT, ], func, - ) + ), + process_inputs=_serialize_inputs, ) self._name = getattr(func, "__name__", "DynamicRunEvaluator") @@ -480,7 +550,7 @@ def __call__( def __repr__(self) -> str: """Represent the DynamicRunEvaluator object.""" - return f"" + return f"" @staticmethod def _get_tags(runs: Sequence[Run]) -> List[str]: diff --git a/python/langsmith/evaluation/integrations/_langchain.py b/python/langsmith/evaluation/integrations/_langchain.py index 510e79c12..3d4baa62f 100644 --- a/python/langsmith/evaluation/integrations/_langchain.py +++ b/python/langsmith/evaluation/integrations/_langchain.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Any, Callable, Optional, TypedDict, Union -from langsmith.evaluation.evaluator import run_evaluator +from langsmith.evaluation.evaluator import DynamicRunEvaluator from langsmith.run_helpers import traceable from langsmith.schemas import Example, Run @@ -44,6 +44,7 @@ class LangChainStringEvaluator: Converting a LangChainStringEvaluator to a RunEvaluator: >>> from langsmith.evaluation import LangChainStringEvaluator + >>> from langchain_openai import ChatOpenAI >>> evaluator = LangChainStringEvaluator( ... "criteria", ... config={ @@ -51,6 +52,7 @@ class LangChainStringEvaluator: ... "usefulness": "The prediction is useful if" ... " it is correct and/or asks a useful followup question." ... }, + ... "llm": ChatOpenAI(model="gpt-4o"), ... }, ... ) >>> run_evaluator = evaluator.as_run_evaluator() @@ -111,6 +113,7 @@ class LangChainStringEvaluator: ... "accuracy": "Score 1: Completely inaccurate\nScore 5: Somewhat accurate\nScore 10: Completely accurate" ... }, ... "normalize_by": 10, + ... "llm": ChatAnthropic(model="claude-3-opus-20240229"), ... }, ... prepare_data=prepare_data, ... ) @@ -257,4 +260,14 @@ def evaluate(run: Run, example: Optional[Example] = None) -> dict: results = self.evaluator.evaluate_strings(**eval_inputs) return {"key": self.evaluator.evaluation_name, **results} - return run_evaluator(evaluate) + @traceable(name=self.evaluator.evaluation_name) + async def aevaluate(run: Run, example: Optional[Example] = None) -> dict: + eval_inputs = ( + prepare_evaluator_inputs(run, example) + if self._prepare_data is None + else self._prepare_data(run, example) + ) + results = await self.evaluator.aevaluate_strings(**eval_inputs) + return {"key": self.evaluator.evaluation_name, **results} + + return DynamicRunEvaluator(evaluate, aevaluate) diff --git a/python/langsmith/evaluation/integrations/test.excalidraw.png b/python/langsmith/evaluation/integrations/test.excalidraw.png new file mode 100644 index 000000000..72b17426d Binary files /dev/null and b/python/langsmith/evaluation/integrations/test.excalidraw.png differ diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py new file mode 100644 index 000000000..3ae7b333c --- /dev/null +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -0,0 +1,292 @@ +"""Contains the LLMEvaluator class for building LLM-as-a-judge evaluators.""" + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast + +from pydantic import BaseModel + +from langsmith._internal._beta_decorator import warn_beta +from langsmith.evaluation import EvaluationResult, EvaluationResults, RunEvaluator +from langsmith.schemas import Example, Run + + +class CategoricalScoreConfig(BaseModel): + """Configuration for a categorical score.""" + + key: str + choices: List[str] + description: str + include_explanation: bool = False + explanation_description: Optional[str] = None + + +class ContinuousScoreConfig(BaseModel): + """Configuration for a continuous score.""" + + key: str + min: float = 0 + max: float = 1 + description: str + include_explanation: bool = False + explanation_description: Optional[str] = None + + +def _create_score_json_schema( + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], +) -> dict: + properties: Dict[str, Any] = {} + if isinstance(score_config, CategoricalScoreConfig): + properties["score"] = { + "type": "string", + "enum": score_config.choices, + "description": f"The score for the evaluation, one of " + f"{', '.join(score_config.choices)}.", + } + elif isinstance(score_config, ContinuousScoreConfig): + properties["score"] = { + "type": "number", + "minimum": score_config.min, + "maximum": score_config.max, + "description": f"The score for the evaluation, between " + f"{score_config.min} and {score_config.max}, inclusive.", + } + else: + raise ValueError("Invalid score type. Must be 'categorical' or 'continuous'") + + if score_config.include_explanation: + properties["explanation"] = { + "type": "string", + "description": ( + "The explanation for the score." + if score_config.explanation_description is None + else score_config.explanation_description + ), + } + + return { + "title": score_config.key, + "description": score_config.description, + "type": "object", + "properties": properties, + "required": ( + ["score", "explanation"] if score_config.include_explanation else ["score"] + ), + } + + +class LLMEvaluator(RunEvaluator): + """A class for building LLM-as-a-judge evaluators.""" + + def __init__( + self, + *, + prompt_template: Union[str, List[Tuple[str, str]]], + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], + map_variables: Optional[Callable[[Run, Optional[Example]], dict]] = None, + model_name: str = "gpt-4o", + model_provider: str = "openai", + **kwargs, + ): + """Initialize the LLMEvaluator. + + Args: + prompt_template (Union[str, List[Tuple[str, str]]): The prompt + template to use for the evaluation. If a string is provided, it is + assumed to be a human / user message. + score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]): + The configuration for the score, either categorical or continuous. + map_variables (Optional[Callable[[Run, Example], dict]], optional): + A function that maps the run and example to the variables in the + prompt. Defaults to None. If None, it is assumed that the prompt + only requires 'input', 'output', and 'expected'. + model_name (Optional[str], optional): The model to use for the evaluation. + Defaults to "gpt-4o". + model_provider (Optional[str], optional): The model provider to use + for the evaluation. Defaults to "openai". + """ + try: + from langchain.chat_models import init_chat_model + except ImportError as e: + raise ImportError( + "LLMEvaluator requires langchain to be installed. " + "Please install langchain by running `pip install langchain`." + ) from e + + chat_model = init_chat_model( + model=model_name, model_provider=model_provider, **kwargs + ) + + self._initialize(prompt_template, score_config, map_variables, chat_model) + + @classmethod + def from_model( + cls, + model: Any, + *, + prompt_template: Union[str, List[Tuple[str, str]]], + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], + map_variables: Optional[Callable[[Run, Optional[Example]], dict]] = None, + ): + """Create an LLMEvaluator instance from a BaseChatModel instance. + + Args: + model (BaseChatModel): The chat model instance to use for the evaluation. + prompt_template (Union[str, List[Tuple[str, str]]): The prompt + template to use for the evaluation. If a string is provided, it is + assumed to be a system message. + score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]): + The configuration for the score, either categorical or continuous. + map_variables (Optional[Callable[[Run, Example]], dict]], optional): + A function that maps the run and example to the variables in the + prompt. Defaults to None. If None, it is assumed that the prompt + only requires 'input', 'output', and 'expected'. + + Returns: + LLMEvaluator: An instance of LLMEvaluator. + """ + instance = cls.__new__(cls) + instance._initialize(prompt_template, score_config, map_variables, model) + return instance + + def _initialize( + self, + prompt_template: Union[str, List[Tuple[str, str]]], + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], + map_variables: Optional[Callable[[Run, Optional[Example]], dict]], + chat_model: Any, + ): + """Shared initialization code for __init__ and from_model. + + Args: + prompt_template (Union[str, List[Tuple[str, str]]): The prompt template. + score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]): + The score configuration. + map_variables (Optional[Callable[[Run, Example]], dict]]): + Function to map variables. + chat_model (BaseChatModel): The chat model instance. + """ + try: + from langchain_core.language_models.chat_models import BaseChatModel + from langchain_core.prompts import ChatPromptTemplate + except ImportError as e: + raise ImportError( + "LLMEvaluator requires langchain-core to be installed. " + "Please install langchain-core by running `pip install langchain-core`." + ) from e + + if not ( + isinstance(chat_model, BaseChatModel) + and hasattr(chat_model, "with_structured_output") + ): + raise ValueError( + "chat_model must be an instance of " + "BaseLanguageModel and support structured output." + ) + + if isinstance(prompt_template, str): + self.prompt = ChatPromptTemplate.from_messages([("human", prompt_template)]) + else: + self.prompt = ChatPromptTemplate.from_messages(prompt_template) + + if set(self.prompt.input_variables) - {"input", "output", "expected"}: + if not map_variables: + raise ValueError( + "map_inputs must be provided if the prompt template contains " + "variables other than 'input', 'output', and 'expected'" + ) + self.map_variables = map_variables + + self.score_config = score_config + self.score_schema = _create_score_json_schema(self.score_config) + + chat_model = chat_model.with_structured_output(self.score_schema) + self.runnable = self.prompt | chat_model + + @warn_beta + def evaluate_run( + self, run: Run, example: Optional[Example] = None + ) -> Union[EvaluationResult, EvaluationResults]: + """Evaluate a run.""" + variables = self._prepare_variables(run, example) + output: dict = cast(dict, self.runnable.invoke(variables)) + return self._parse_output(output) + + @warn_beta + async def aevaluate_run( + self, run: Run, example: Optional[Example] = None + ) -> Union[EvaluationResult, EvaluationResults]: + """Asynchronously evaluate a run.""" + variables = self._prepare_variables(run, example) + output: dict = cast(dict, await self.runnable.ainvoke(variables)) + return self._parse_output(output) + + def _prepare_variables(self, run: Run, example: Optional[Example]) -> dict: + """Prepare variables for model invocation.""" + if self.map_variables: + return self.map_variables(run, example) + + variables = {} + if "input" in self.prompt.input_variables: + if len(run.inputs) == 0: + raise ValueError( + "No input keys are present in run.inputs but the prompt " + "requires 'input'." + ) + if len(run.inputs) != 1: + raise ValueError( + "Multiple input keys are present in run.inputs. Please provide " + "a map_variables function." + ) + variables["input"] = list(run.inputs.values())[0] + + if "output" in self.prompt.input_variables: + if not run.outputs: + raise ValueError( + "No output keys are present in run.outputs but the prompt " + "requires 'output'." + ) + if len(run.outputs) == 0: + raise ValueError( + "No output keys are present in run.outputs but the prompt " + "requires 'output'." + ) + if len(run.outputs) != 1: + raise ValueError( + "Multiple output keys are present in run.outputs. Please " + "provide a map_variables function." + ) + variables["output"] = list(run.outputs.values())[0] + + if "expected" in self.prompt.input_variables: + if not example or not example.outputs: + raise ValueError( + "No example or example outputs is provided but the prompt " + "requires 'expected'." + ) + if len(example.outputs) == 0: + raise ValueError( + "No output keys are present in example.outputs but the prompt " + "requires 'expected'." + ) + if len(example.outputs) != 1: + raise ValueError( + "Multiple output keys are present in example.outputs. Please " + "provide a map_variables function." + ) + variables["expected"] = list(example.outputs.values())[0] + + return variables + + def _parse_output(self, output: dict) -> Union[EvaluationResult, EvaluationResults]: + """Parse the model output into an evaluation result.""" + if isinstance(self.score_config, CategoricalScoreConfig): + value = output["score"] + explanation = output.get("explanation", None) + return EvaluationResult( + key=self.score_config.key, value=value, comment=explanation + ) + elif isinstance(self.score_config, ContinuousScoreConfig): + score = output["score"] + explanation = output.get("explanation", None) + return EvaluationResult( + key=self.score_config.key, score=score, comment=explanation + ) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 3d4753f67..6da368a06 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -16,15 +16,18 @@ TYPE_CHECKING, Any, AsyncGenerator, + AsyncIterator, Awaitable, Callable, Dict, Generator, Generic, + Iterator, List, Mapping, Optional, Protocol, + Sequence, Tuple, Type, TypedDict, @@ -43,6 +46,8 @@ from langsmith.env import _runtime_env if TYPE_CHECKING: + from types import TracebackType + from langchain_core.runnables import Runnable LOGGER = logging.getLogger(__name__) @@ -55,12 +60,14 @@ _TRACING_ENABLED = contextvars.ContextVar[Optional[bool]]( "_TRACING_ENABLED", default=None ) +_CLIENT = contextvars.ContextVar[Optional[ls_client.Client]]("_CLIENT", default=None) _CONTEXT_KEYS: Dict[str, contextvars.ContextVar] = { "parent": _PARENT_RUN_TREE, "project_name": _PROJECT_NAME, "tags": _TAGS, "metadata": _METADATA, "enabled": _TRACING_ENABLED, + "client": _CLIENT, } @@ -80,17 +87,11 @@ def get_tracing_context( "tags": _TAGS.get(), "metadata": _METADATA.get(), "enabled": _TRACING_ENABLED.get(), + "client": _CLIENT.get(), } return {k: context.get(v) for k, v in _CONTEXT_KEYS.items()} -def _set_tracing_context(context: Dict[str, Any]): - """Set the tracing context.""" - for k, v in context.items(): - var = _CONTEXT_KEYS[k] - var.set(v) - - @contextlib.contextmanager def tracing_context( *, @@ -99,6 +100,7 @@ def tracing_context( metadata: Optional[Dict[str, Any]] = None, parent: Optional[Union[run_trees.RunTree, Mapping, str]] = None, enabled: Optional[bool] = None, + client: Optional[ls_client.Client] = None, **kwargs: Any, ) -> Generator[None, None, None]: """Set the tracing context for a block of code. @@ -110,9 +112,11 @@ def tracing_context( parent: The parent run to use for the context. Can be a Run/RunTree object, request headers (for distributed tracing), or the dotted order string. Defaults to None. + client: The client to use for logging the run to LangSmith. Defaults to None, enabled: Whether tracing is enabled. Defaults to None, meaning it will use the current context value or environment variables. + """ if kwargs: # warn @@ -126,7 +130,6 @@ def tracing_context( tags = sorted(set(tags or []) | set(parent_run.tags or [])) metadata = {**parent_run.metadata, **(metadata or {})} enabled = enabled if enabled is not None else current_context.get("enabled") - _set_tracing_context( { "parent": parent_run, @@ -134,6 +137,7 @@ def tracing_context( "tags": tags, "metadata": metadata, "enabled": enabled, + "client": client, } ) try: @@ -157,11 +161,31 @@ def is_traceable_function( ) -def ensure_traceable(func: Callable[P, R]) -> SupportsLangsmithExtra[P, R]: +def ensure_traceable( + func: Callable[P, R], + *, + name: Optional[str] = None, + metadata: Optional[Mapping[str, Any]] = None, + tags: Optional[List[str]] = None, + client: Optional[ls_client.Client] = None, + reduce_fn: Optional[Callable[[Sequence], dict]] = None, + project_name: Optional[str] = None, + process_inputs: Optional[Callable[[dict], dict]] = None, + process_outputs: Optional[Callable[..., dict]] = None, +) -> SupportsLangsmithExtra[P, R]: """Ensure that a function is traceable.""" if is_traceable_function(func): return func - return traceable()(func) + return traceable( + name=name, + metadata=metadata, + tags=tags, + client=client, + reduce_fn=reduce_fn, + project_name=project_name, + process_inputs=process_inputs, + process_outputs=process_outputs, + )(func) def is_async(func: Callable) -> bool: @@ -174,16 +198,28 @@ def is_async(func: Callable) -> bool: class LangSmithExtra(TypedDict, total=False): """Any additional info to be injected into the run dynamically.""" + name: Optional[str] + """Optional name for the run.""" reference_example_id: Optional[ls_client.ID_TYPE] + """Optional ID of a reference example.""" run_extra: Optional[Dict] + """Optional additional run information.""" parent: Optional[Union[run_trees.RunTree, str, Mapping]] + """Optional parent run, can be a RunTree, string, or mapping.""" run_tree: Optional[run_trees.RunTree] # TODO: Deprecate + """Optional run tree (deprecated).""" project_name: Optional[str] + """Optional name of the project.""" metadata: Optional[Dict[str, Any]] + """Optional metadata for the run.""" tags: Optional[List[str]] + """Optional list of tags for the run.""" run_id: Optional[ls_client.ID_TYPE] + """Optional ID for the run.""" client: Optional[ls_client.Client] + """Optional LangSmith client.""" on_end: Optional[Callable[[run_trees.RunTree], Any]] + """Optional callback function to be called when the run ends.""" R = TypeVar("R", covariant=True) @@ -239,9 +275,10 @@ def traceable( metadata: Optional[Mapping[str, Any]] = None, tags: Optional[List[str]] = None, client: Optional[ls_client.Client] = None, - reduce_fn: Optional[Callable] = None, + reduce_fn: Optional[Callable[[Sequence], dict]] = None, project_name: Optional[str] = None, process_inputs: Optional[Callable[[dict], dict]] = None, + process_outputs: Optional[Callable[..., dict]] = None, _invocation_params_fn: Optional[Callable[[dict], dict]] = None, ) -> Callable[[Callable[P, R]], SupportsLangsmithExtra[P, R]]: ... @@ -262,13 +299,15 @@ def traceable( None, which will use the default client. reduce_fn: A function to reduce the output of the function if the function returns a generator. Defaults to None, which means the values will be - logged as a list. Note: if the iterator is never exhausted (e.g. - the function returns an infinite generator), this will never be - called, and the run itself will be stuck in a pending state. + logged as a list. Note: if the iterator is never exhausted (e.g. + the function returns an infinite generator), this will never be + called, and the run itself will be stuck in a pending state. project_name: The name of the project to log the run to. Defaults to None, which will use the default project. - process_inputs: A function to filter the inputs to the run. Defaults to None. - + process_inputs: Custom serialization / processing function for inputs. + Defaults to None. + process_outputs: Custom serialization / processing function for outputs. + Defaults to None. Returns: Union[Callable, Callable[[Callable], Callable]]: The decorated function. @@ -277,15 +316,10 @@ def traceable( - Requires that LANGSMITH_TRACING_V2 be set to 'true' in the environment. Examples: - .. code-block:: python - import httpx - import asyncio - - from typing import Iterable - from langsmith import traceable, Client + Basic usage: + .. code-block:: python - # Basic usage: @traceable def my_function(x: float, y: float) -> float: return x + y @@ -306,8 +340,10 @@ async def my_async_function(query_params: dict) -> dict: asyncio.run(my_async_function({"param": "value"})) + Streaming data with a generator: + + .. code-block:: python - # Streaming data with a generator: @traceable def my_generator(n: int) -> Iterable: for i in range(n): @@ -317,8 +353,10 @@ def my_generator(n: int) -> Iterable: for item in my_generator(5): print(item) + Async streaming data: + + .. code-block:: python - # Async streaming data @traceable async def my_async_generator(query_params: dict) -> Iterable: async with httpx.AsyncClient() as http_client: @@ -337,8 +375,10 @@ async def async_code(): asyncio.run(async_code()) + Specifying a run type and name: + + .. code-block:: python - # Specifying a run type and name: @traceable(name="CustomName", run_type="tool") def another_function(a: float, b: float) -> float: return a * b @@ -346,8 +386,10 @@ def another_function(a: float, b: float) -> float: another_function(5, 6) + Logging with custom metadata and tags: + + .. code-block:: python - # Logging with custom metadata and tags: @traceable( metadata={"version": "1.0", "author": "John Doe"}, tags=["beta", "test"] ) @@ -357,7 +399,10 @@ def tagged_function(x): tagged_function(5) - # Specifying a custom client and project name: + Specifying a custom client and project name: + + .. code-block:: python + custom_client = Client(api_key="your_api_key") @@ -368,15 +413,17 @@ def project_specific_function(data): project_specific_function({"data": "to process"}) + Manually passing langsmith_extra: + + .. code-block:: python - # Manually passing langsmith_extra: @traceable def manual_extra_function(x): return x**2 manual_extra_function(5, langsmith_extra={"metadata": {"version": "1.0"}}) - """ # noqa: E501 + """ run_type: ls_client.RUN_TYPE_T = ( args[0] if args and isinstance(args[0], str) @@ -412,6 +459,11 @@ def manual_extra_function(x): process_inputs=kwargs.pop("process_inputs", None), invocation_params_fn=kwargs.pop("_invocation_params_fn", None), ) + outputs_processor = kwargs.pop("process_outputs", None) + _on_run_end = functools.partial( + _handle_container_end, outputs_processor=outputs_processor + ) + if kwargs: warnings.warn( f"The following keyword arguments are not recognized and will be ignored: " @@ -431,7 +483,8 @@ async def async_wrapper( **kwargs: Any, ) -> Any: """Async version of wrapper function.""" - run_container = _setup_run( + run_container = await aitertools.aio_to_thread( + _setup_run, func, container_input=container_input, langsmith_extra=langsmith_extra, @@ -440,7 +493,7 @@ async def async_wrapper( ) try: - accepts_context = aitertools.accepts_context(asyncio.create_task) + accepts_context = aitertools.asyncio_accepts_context() if func_accepts_parent_run: kwargs["run_tree"] = run_container["new_run"] if not func_accepts_config: @@ -457,16 +510,22 @@ async def async_wrapper( ): function_result = await fr_coro except BaseException as e: - _container_end(run_container, error=e) + # shield from cancellation, given we're catching all exceptions + await asyncio.shield( + aitertools.aio_to_thread(_on_run_end, run_container, error=e) + ) raise e - _container_end(run_container, outputs=function_result) + await aitertools.aio_to_thread( + _on_run_end, run_container, outputs=function_result + ) return function_result @functools.wraps(func) async def async_generator_wrapper( *args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any ) -> AsyncGenerator: - run_container = _setup_run( + run_container = await aitertools.aio_to_thread( + _setup_run, func, container_input=container_input, langsmith_extra=langsmith_extra, @@ -484,7 +543,7 @@ async def async_generator_wrapper( kwargs.pop("config", None) async_gen_result = func(*args, **kwargs) # Can't iterate through if it's a coroutine - accepts_context = aitertools.accepts_context(asyncio.create_task) + accepts_context = aitertools.asyncio_accepts_context() if inspect.iscoroutine(async_gen_result): if accepts_context: async_gen_result = await asyncio.create_task( @@ -496,36 +555,23 @@ async def async_generator_wrapper( **get_tracing_context(run_container["context"]) ): async_gen_result = await async_gen_result - try: - while True: - if accepts_context: - item = await asyncio.create_task( # type: ignore[call-arg, var-annotated] - aitertools.py_anext(async_gen_result), # type: ignore[arg-type] - context=run_container["context"], - ) - else: - # Python < 3.11 - with tracing_context( - **get_tracing_context(run_container["context"]) - ): - item = await aitertools.py_anext(async_gen_result) - if run_type == "llm": - if run_container["new_run"]: - run_container["new_run"].add_event( - { - "name": "new_token", - "time": datetime.datetime.now( - datetime.timezone.utc - ).isoformat(), - "kwargs": {"token": item}, - } - ) - results.append(item) - yield item - except StopAsyncIteration: - pass + + async for item in _process_async_iterator( + generator=async_gen_result, + run_container=run_container, + is_llm_run=( + run_container["new_run"].run_type == "llm" + if run_container["new_run"] + else False + ), + accepts_context=accepts_context, + results=results, + ): + yield item except BaseException as e: - _container_end(run_container, error=e) + await asyncio.shield( + aitertools.aio_to_thread(_on_run_end, run_container, error=e) + ) raise e if results: if reduce_fn: @@ -538,7 +584,9 @@ async def async_generator_wrapper( function_result = results else: function_result = None - _container_end(run_container, outputs=function_result) + await aitertools.aio_to_thread( + _on_run_end, run_container, outputs=function_result + ) @functools.wraps(func) def wrapper( @@ -564,9 +612,9 @@ def wrapper( kwargs.pop("config", None) function_result = run_container["context"].run(func, *args, **kwargs) except BaseException as e: - _container_end(run_container, error=e) + _on_run_end(run_container, error=e) raise e - _container_end(run_container, outputs=function_result) + _on_run_end(run_container, outputs=function_result) return function_result @functools.wraps(func) @@ -584,40 +632,29 @@ def generator_wrapper( inspect.signature(func).parameters.get("run_tree", None) is not None ) results: List[Any] = [] + function_return: Any = None + try: if func_accepts_parent_run: kwargs["run_tree"] = run_container["new_run"] - # TODO: Nesting is ambiguous if a nested traceable function is only - # called mid-generation. Need to explicitly accept run_tree to get - # around this. if not func_accepts_config: kwargs.pop("config", None) generator_result = run_container["context"].run(func, *args, **kwargs) - try: - while True: - item = run_container["context"].run(next, generator_result) - if run_type == "llm": - if run_container["new_run"]: - run_container["new_run"].add_event( - { - "name": "new_token", - "time": datetime.datetime.now( - datetime.timezone.utc - ).isoformat(), - "kwargs": {"token": item}, - } - ) - results.append(item) - try: - yield item - except GeneratorExit: - break - except StopIteration: - pass + + function_return = yield from _process_iterator( + generator_result, + run_container, + is_llm_run=run_type == "llm", + results=results, + ) + + if function_return is not None: + results.append(function_return) except BaseException as e: - _container_end(run_container, error=e) + _on_run_end(run_container, error=e) raise e + if results: if reduce_fn: try: @@ -629,19 +666,91 @@ def generator_wrapper( function_result = results else: function_result = None - _container_end(run_container, outputs=function_result) + _on_run_end(run_container, outputs=function_result) + return function_return + + # "Stream" functions (used in methods like OpenAI/Anthropic's SDKs) + # are functions that return iterable responses and should not be + # considered complete until the streaming is completed + @functools.wraps(func) + def stream_wrapper( + *args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any + ) -> Any: + trace_container = _setup_run( + func, + container_input=container_input, + langsmith_extra=langsmith_extra, + args=args, + kwargs=kwargs, + ) + + try: + if func_accepts_parent_run: + kwargs["run_tree"] = trace_container["new_run"] + if not func_accepts_config: + kwargs.pop("config", None) + stream = trace_container["context"].run(func, *args, **kwargs) + except Exception as e: + _on_run_end(trace_container, error=e) + raise + + if hasattr(stream, "__iter__"): + return _TracedStream(stream, trace_container, reduce_fn) + elif hasattr(stream, "__aiter__"): + # sync function -> async iterable (unexpected) + return _TracedAsyncStream(stream, trace_container, reduce_fn) + + # If it's not iterable, end the trace immediately + _on_run_end(trace_container, outputs=stream) + return stream + + @functools.wraps(func) + async def async_stream_wrapper( + *args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any + ) -> Any: + trace_container = await aitertools.aio_to_thread( + _setup_run, + func, + container_input=container_input, + langsmith_extra=langsmith_extra, + args=args, + kwargs=kwargs, + ) + + try: + if func_accepts_parent_run: + kwargs["run_tree"] = trace_container["new_run"] + if not func_accepts_config: + kwargs.pop("config", None) + stream = await func(*args, **kwargs) + except Exception as e: + await aitertools.aio_to_thread(_on_run_end, trace_container, error=e) + raise + + if hasattr(stream, "__aiter__"): + return _TracedAsyncStream(stream, trace_container, reduce_fn) + elif hasattr(stream, "__iter__"): + # Async function -> sync iterable + return _TracedStream(stream, trace_container, reduce_fn) + + # If it's not iterable, end the trace immediately + await aitertools.aio_to_thread(_on_run_end, trace_container, outputs=stream) + return stream if inspect.isasyncgenfunction(func): selected_wrapper: Callable = async_generator_wrapper + elif inspect.isgeneratorfunction(func): + selected_wrapper = generator_wrapper elif is_async(func): if reduce_fn: - selected_wrapper = async_generator_wrapper + selected_wrapper = async_stream_wrapper else: selected_wrapper = async_wrapper - elif reduce_fn or inspect.isgeneratorfunction(func): - selected_wrapper = generator_wrapper else: - selected_wrapper = wrapper + if reduce_fn: + selected_wrapper = stream_wrapper + else: + selected_wrapper = wrapper setattr(selected_wrapper, "__langsmith_traceable__", True) sig = inspect.signature(selected_wrapper) if not sig.parameters.get("config"): @@ -673,91 +782,284 @@ def generator_wrapper( return decorator -@contextlib.contextmanager -def trace( - name: str, - run_type: ls_client.RUN_TYPE_T = "chain", - *, - inputs: Optional[Dict] = None, - extra: Optional[Dict] = None, - project_name: Optional[str] = None, - parent: Optional[Union[run_trees.RunTree, str, Mapping]] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Mapping[str, Any]] = None, - client: Optional[ls_client.Client] = None, - run_id: Optional[ls_client.ID_TYPE] = None, - reference_example_id: Optional[ls_client.ID_TYPE] = None, - exceptions_to_handle: Optional[Tuple[Type[BaseException], ...]] = None, - **kwargs: Any, -) -> Generator[run_trees.RunTree, None, None]: - """Context manager for creating a run tree.""" - if kwargs: - # In case someone was passing an executor before. - warnings.warn( - "The `trace` context manager no longer supports the following kwargs: " - f"{sorted(kwargs.keys())}.", - DeprecationWarning, - ) - old_ctx = get_tracing_context() - outer_tags = _TAGS.get() - outer_metadata = _METADATA.get() - outer_project = _PROJECT_NAME.get() or utils.get_tracer_project() - parent_run_ = _get_parent_run( - {"parent": parent, "run_tree": kwargs.get("run_tree"), "client": client} - ) +class trace: + """Manage a LangSmith run in context. - # Merge and set context variables - tags_ = sorted(set((tags or []) + (outer_tags or []))) - _TAGS.set(tags_) - metadata = {**(metadata or {}), **(outer_metadata or {}), "ls_method": "trace"} - _METADATA.set(metadata) + This class can be used as both a synchronous and asynchronous context manager. - extra_outer = extra or {} - extra_outer["metadata"] = metadata + Args: + name (str): Name of the run. + run_type (ls_client.RUN_TYPE_T, optional): Type of run (e.g., "chain", "llm", "tool"). Defaults to "chain". + inputs (Optional[Dict], optional): Initial input data for the run. Defaults to None. + project_name (Optional[str], optional): Project name to associate the run with. Defaults to None. + parent (Optional[Union[run_trees.RunTree, str, Mapping]], optional): Parent run. Can be a RunTree, dotted order string, or tracing headers. Defaults to None. + tags (Optional[List[str]], optional): List of tags for the run. Defaults to None. + metadata (Optional[Mapping[str, Any]], optional): Additional metadata for the run. Defaults to None. + client (Optional[ls_client.Client], optional): LangSmith client for custom settings. Defaults to None. + run_id (Optional[ls_client.ID_TYPE], optional): Preset identifier for the run. Defaults to None. + reference_example_id (Optional[ls_client.ID_TYPE], optional): Associates run with a dataset example. Only for root runs in evaluation. Defaults to None. + exceptions_to_handle (Optional[Tuple[Type[BaseException], ...]], optional): Exception types to ignore. Defaults to None. + extra (Optional[Dict], optional): Extra data to send to LangSmith. Use 'metadata' instead. Defaults to None. - project_name_ = project_name or outer_project - if parent_run_ is not None: - new_run = parent_run_.create_child( - name=name, - run_id=run_id, - run_type=run_type, - extra=extra_outer, - inputs=inputs, - tags=tags_, - ) - else: - new_run = run_trees.RunTree( - name=name, - id=ls_client._ensure_uuid(run_id), - reference_example_id=ls_client._ensure_uuid( - reference_example_id, accept_null=True - ), - run_type=run_type, - extra=extra_outer, - project_name=project_name_, # type: ignore[arg-type] - inputs=inputs or {}, - tags=tags_, - client=client, # type: ignore[arg-type] + Examples: + Synchronous usage: + + .. code-block:: python + + >>> with trace("My Operation", run_type="tool", tags=["important"]) as run: + ... result = "foo" # Perform operation + ... run.metadata["some-key"] = "some-value" + ... run.end(outputs={"result": result}) + + Asynchronous usage: + + .. code-block:: python + + >>> async def main(): + ... async with trace("Async Operation", run_type="tool", tags=["async"]) as run: + ... result = "foo" # Await async operation + ... run.metadata["some-key"] = "some-value" + ... # "end" just adds the outputs and sets error to None + ... # The actual patching of the run happens when the context exits + ... run.end(outputs={"result": result}) + >>> asyncio.run(main()) + + Handling specific exceptions: + + .. code-block:: python + + >>> import pytest + >>> import sys + >>> with trace("Test", exceptions_to_handle=(pytest.skip.Exception,)): + ... if sys.platform == "win32": # Just an example + ... pytest.skip("Skipping test for windows") + ... result = "foo" # Perform test operation + """ + + def __init__( + self, + name: str, + run_type: ls_client.RUN_TYPE_T = "chain", + *, + inputs: Optional[Dict] = None, + extra: Optional[Dict] = None, + project_name: Optional[str] = None, + parent: Optional[Union[run_trees.RunTree, str, Mapping]] = None, + tags: Optional[List[str]] = None, + metadata: Optional[Mapping[str, Any]] = None, + client: Optional[ls_client.Client] = None, + run_id: Optional[ls_client.ID_TYPE] = None, + reference_example_id: Optional[ls_client.ID_TYPE] = None, + exceptions_to_handle: Optional[Tuple[Type[BaseException], ...]] = None, + **kwargs: Any, + ): + """Initialize the trace context manager. + + Warns if unsupported kwargs are passed. + """ + if kwargs: + warnings.warn( + "The `trace` context manager no longer supports the following kwargs: " + f"{sorted(kwargs.keys())}.", + DeprecationWarning, + ) + self.name = name + self.run_type = run_type + self.inputs = inputs + self.extra = extra + self.project_name = project_name + self.parent = parent + # The run tree is deprecated. Keeping for backwards compat. + # Will fully merge within parent later. + self.run_tree = kwargs.get("run_tree") + self.tags = tags + self.metadata = metadata + self.client = client + self.run_id = run_id + self.reference_example_id = reference_example_id + self.exceptions_to_handle = exceptions_to_handle + self.new_run: Optional[run_trees.RunTree] = None + self.old_ctx: Optional[dict] = None + + def _setup(self) -> run_trees.RunTree: + """Set up the tracing context and create a new run. + + This method initializes the tracing context, merges tags and metadata, + creates a new run (either as a child of an existing run or as a new root run), + and sets up the necessary context variables. + + Returns: + run_trees.RunTree: The newly created run. + """ + self.old_ctx = get_tracing_context() + enabled = utils.tracing_is_enabled(self.old_ctx) + + outer_tags = _TAGS.get() + outer_metadata = _METADATA.get() + client_ = self.client or self.old_ctx.get("client") + parent_run_ = _get_parent_run( + { + "parent": self.parent, + "run_tree": self.run_tree, + "client": client_, + } ) - new_run.post() - _PARENT_RUN_TREE.set(new_run) - _PROJECT_NAME.set(project_name_) - try: - yield new_run - except (Exception, KeyboardInterrupt, BaseException) as e: - if exceptions_to_handle and isinstance(e, exceptions_to_handle): - tb = None + tags_ = sorted(set((self.tags or []) + (outer_tags or []))) + metadata = { + **(self.metadata or {}), + **(outer_metadata or {}), + "ls_method": "trace", + } + + extra_outer = self.extra or {} + extra_outer["metadata"] = metadata + + project_name_ = _get_project_name(self.project_name) + + if parent_run_ is not None and enabled: + self.new_run = parent_run_.create_child( + name=self.name, + run_id=self.run_id, + run_type=self.run_type, + extra=extra_outer, + inputs=self.inputs, + tags=tags_, + ) else: - tb = utils._format_exc() - tb = f"{e.__class__.__name__}: {e}\n\n{tb}" - new_run.end(error=tb) - new_run.patch() - raise e - finally: - # Reset the old context - _set_tracing_context(old_ctx) - new_run.patch() + self.new_run = run_trees.RunTree( + name=self.name, + id=ls_client._ensure_uuid(self.run_id), + reference_example_id=ls_client._ensure_uuid( + self.reference_example_id, accept_null=True + ), + run_type=self.run_type, + extra=extra_outer, + project_name=project_name_ or "default", + inputs=self.inputs or {}, + tags=tags_, + client=client_, # type: ignore + ) + + if enabled: + self.new_run.post() + _TAGS.set(tags_) + _METADATA.set(metadata) + _PARENT_RUN_TREE.set(self.new_run) + _PROJECT_NAME.set(project_name_) + _CLIENT.set(client_) + + return self.new_run + + def _teardown( + self, + exc_type: Optional[Type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + """Clean up the tracing context and finalize the run. + + This method handles exceptions, ends the run if necessary, + patches the run if it's not disabled, and resets the tracing context. + + Args: + exc_type: The type of the exception that occurred, if any. + exc_value: The exception instance that occurred, if any. + traceback: The traceback object associated with the exception, if any. + """ + if self.new_run is None: + return + if exc_type is not None: + if self.exceptions_to_handle and issubclass( + exc_type, self.exceptions_to_handle + ): + tb = None + else: + tb = utils._format_exc() + tb = f"{exc_type.__name__}: {exc_value}\n\n{tb}" + self.new_run.end(error=tb) + if self.old_ctx is not None: + enabled = utils.tracing_is_enabled(self.old_ctx) + if enabled: + self.new_run.patch() + + _set_tracing_context(self.old_ctx) + else: + warnings.warn("Tracing context was not set up properly.", RuntimeWarning) + + def __enter__(self) -> run_trees.RunTree: + """Enter the context manager synchronously. + + Returns: + run_trees.RunTree: The newly created run. + """ + return self._setup() + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + """Exit the context manager synchronously. + + Args: + exc_type: The type of the exception that occurred, if any. + exc_value: The exception instance that occurred, if any. + traceback: The traceback object associated with the exception, if any. + """ + self._teardown(exc_type, exc_value, traceback) + + async def __aenter__(self) -> run_trees.RunTree: + """Enter the context manager asynchronously. + + Returns: + run_trees.RunTree: The newly created run. + """ + ctx = copy_context() + result = await aitertools.aio_to_thread(self._setup, __ctx=ctx) + # Set the context for the current thread + _set_tracing_context(get_tracing_context(ctx)) + return result + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + """Exit the context manager asynchronously. + + Args: + exc_type: The type of the exception that occurred, if any. + exc_value: The exception instance that occurred, if any. + traceback: The traceback object associated with the exception, if any. + """ + ctx = copy_context() + if exc_type is not None: + await asyncio.shield( + aitertools.aio_to_thread( + self._teardown, exc_type, exc_value, traceback, __ctx=ctx + ) + ) + else: + await aitertools.aio_to_thread( + self._teardown, exc_type, exc_value, traceback, __ctx=ctx + ) + _set_tracing_context(get_tracing_context(ctx)) + + +def _get_project_name(project_name: Optional[str]) -> Optional[str]: + prt = _PARENT_RUN_TREE.get() + return ( + # Maintain tree consistency first + _PROJECT_NAME.get() + or (prt.session_name if prt else None) + # Then check the passed in value + or project_name + # fallback to the default for the environment + or utils.get_tracer_project() + ) def as_runnable(traceable_fn: Callable) -> Runnable: @@ -876,7 +1178,6 @@ async def awrap_traceable(inputs: dict, config: RunnableConfig) -> Any: ## Private Methods and Objects - _VALID_RUN_TYPES = { "tool", "chain", @@ -919,7 +1220,7 @@ def _container_end( container: _TraceableContainer, outputs: Optional[Any] = None, error: Optional[BaseException] = None, -): +) -> None: """End the run.""" run_tree = container.get("new_run") if run_tree is None: @@ -932,11 +1233,6 @@ def _container_end( error_ = f"{repr(error)}\n\n{stacktrace}" run_tree.end(outputs=outputs_, error=error_) run_tree.patch() - if error: - try: - LOGGER.info(f"See trace: {run_tree.get_url()}") - except Exception: - pass on_end = container.get("on_end") if on_end is not None and callable(on_end): try: @@ -963,12 +1259,19 @@ def _get_parent_run( return parent if isinstance(parent, dict): return run_trees.RunTree.from_headers( - parent, client=langsmith_extra.get("client") + parent, + client=langsmith_extra.get("client"), + # Precedence: headers -> cvar -> explicit -> env var + project_name=_get_project_name(langsmith_extra.get("project_name")), ) if isinstance(parent, str): - return run_trees.RunTree.from_dotted_order( - parent, client=langsmith_extra.get("client") + dort = run_trees.RunTree.from_dotted_order( + parent, + client=langsmith_extra.get("client"), + # Precedence: cvar -> explicit -> env var + project_name=_get_project_name(langsmith_extra.get("project_name")), ) + return dort run_tree = langsmith_extra.get("run_tree") if run_tree: return run_tree @@ -1006,32 +1309,30 @@ def _setup_run( ) -> _TraceableContainer: """Create a new run or create_child() if run is passed in kwargs.""" extra_outer = container_input.get("extra_outer") or {} - name = container_input.get("name") metadata = container_input.get("metadata") tags = container_input.get("tags") client = container_input.get("client") run_type = container_input.get("run_type") or "chain" outer_project = _PROJECT_NAME.get() langsmith_extra = langsmith_extra or LangSmithExtra() - client_ = langsmith_extra.get("client", client) + name = langsmith_extra.get("name") or container_input.get("name") + client_ = langsmith_extra.get("client", client) or _CLIENT.get() parent_run_ = _get_parent_run( {**langsmith_extra, "client": client_}, kwargs.get("config") ) project_cv = _PROJECT_NAME.get() selected_project = ( project_cv # From parent trace + or ( + parent_run_.session_name if parent_run_ else None + ) # from parent run attempt 2 (not managed by traceable) or langsmith_extra.get("project_name") # at invocation time or container_input["project_name"] # at decorator time or utils.get_tracer_project() # default ) reference_example_id = langsmith_extra.get("reference_example_id") id_ = langsmith_extra.get("run_id") - if ( - not project_cv - and not reference_example_id - and not parent_run_ - and not utils.tracing_is_enabled() - ): + if not parent_run_ and not utils.tracing_is_enabled(): utils.log_once( logging.DEBUG, "LangSmith tracing is enabled, returning original function." ) @@ -1046,7 +1347,7 @@ def _setup_run( ) id_ = id_ or str(uuid.uuid4()) signature = inspect.signature(func) - name_ = name or func.__name__ + name_ = name or utils._get_function_name(func) docstring = func.__doc__ extra_inner = _collect_extra(extra_outer, langsmith_extra) outer_metadata = _METADATA.get() @@ -1131,6 +1432,21 @@ def _setup_run( return response_container +def _handle_container_end( + container: _TraceableContainer, + outputs: Optional[Any] = None, + error: Optional[BaseException] = None, + outputs_processor: Optional[Callable[..., dict]] = None, +) -> None: + """Handle the end of run.""" + try: + if outputs_processor is not None: + outputs = outputs_processor(outputs) + _container_end(container, outputs=outputs, error=error) + except BaseException as e: + LOGGER.warning(f"Unable to process trace outputs: {repr(e)}") + + def _is_traceable_function(func: Callable) -> bool: return getattr(func, "__langsmith_traceable__", False) @@ -1163,3 +1479,233 @@ def _get_inputs_safe( except BaseException as e: LOGGER.debug(f"Failed to get inputs for {signature}: {e}") return {"args": args, "kwargs": kwargs} + + +def _set_tracing_context(context: Dict[str, Any]): + """Set the tracing context.""" + for k, v in context.items(): + var = _CONTEXT_KEYS[k] + var.set(v) + + +def _process_iterator( + generator: Iterator[T], + run_container: _TraceableContainer, + is_llm_run: bool, + # Results is mutated + results: List[Any], +) -> Generator[T, None, Any]: + try: + while True: + item = run_container["context"].run(next, generator) + if is_llm_run and run_container["new_run"]: + run_container["new_run"].add_event( + { + "name": "new_token", + "time": datetime.datetime.now( + datetime.timezone.utc + ).isoformat(), + "kwargs": {"token": item}, + } + ) + results.append(item) + yield item + except StopIteration as e: + return e.value + + +async def _process_async_iterator( + generator: AsyncIterator[T], + run_container: _TraceableContainer, + *, + is_llm_run: bool, + accepts_context: bool, + results: List[Any], +) -> AsyncGenerator[T, None]: + try: + while True: + if accepts_context: + item = await asyncio.create_task( # type: ignore[call-arg, var-annotated] + aitertools.py_anext(generator), # type: ignore[arg-type] + context=run_container["context"], + ) + else: + # Python < 3.11 + with tracing_context(**get_tracing_context(run_container["context"])): + item = await aitertools.py_anext(generator) + if is_llm_run and run_container["new_run"]: + run_container["new_run"].add_event( + { + "name": "new_token", + "time": datetime.datetime.now( + datetime.timezone.utc + ).isoformat(), + "kwargs": {"token": item}, + } + ) + results.append(item) + yield item + except StopAsyncIteration: + pass + + +T = TypeVar("T") + + +class _TracedStreamBase(Generic[T]): + """Base class for traced stream objects.""" + + def __init__( + self, + stream: Union[Iterator[T], AsyncIterator[T]], + trace_container: _TraceableContainer, + reduce_fn: Optional[Callable] = None, + ): + self.__ls_stream__ = stream + self.__ls_trace_container__ = trace_container + self.__ls_completed__ = False + self.__ls_reduce_fn__ = reduce_fn + self.__ls_accumulated_output__: list[T] = [] + self.__is_llm_run__ = ( + trace_container["new_run"].run_type == "llm" + if trace_container["new_run"] + else False + ) + + def __getattr__(self, name: str): + return getattr(self.__ls_stream__, name) + + def __dir__(self): + return list(set(dir(self.__class__) + dir(self.__ls_stream__))) + + def __repr__(self): + return f"Traceable({self.__ls_stream__!r})" + + def __str__(self): + return str(self.__ls_stream__) + + def __del__(self): + try: + if not self.__ls_completed__: + self._end_trace() + except BaseException: + pass + try: + self.__ls_stream__.__del__() + except BaseException: + pass + + def _end_trace(self, error: Optional[BaseException] = None): + if self.__ls_completed__: + return + try: + if self.__ls_reduce_fn__: + reduced_output = self.__ls_reduce_fn__(self.__ls_accumulated_output__) + else: + reduced_output = self.__ls_accumulated_output__ + _container_end( + self.__ls_trace_container__, outputs=reduced_output, error=error + ) + finally: + self.__ls_completed__ = True + + +class _TracedStream(_TracedStreamBase, Generic[T]): + """A wrapper for synchronous stream objects that handles tracing.""" + + def __init__( + self, + stream: Iterator[T], + trace_container: _TraceableContainer, + reduce_fn: Optional[Callable] = None, + ): + super().__init__( + stream=stream, trace_container=trace_container, reduce_fn=reduce_fn + ) + self.__ls_stream__ = stream + self.__ls__gen__ = _process_iterator( + self.__ls_stream__, + self.__ls_trace_container__, + is_llm_run=self.__is_llm_run__, + results=self.__ls_accumulated_output__, + ) + + def __next__(self) -> T: + try: + return next(self.__ls__gen__) + except StopIteration: + self._end_trace() + raise + + def __iter__(self) -> Iterator[T]: + try: + yield from self.__ls__gen__ + except BaseException as e: + self._end_trace(error=e) + raise + else: + self._end_trace() + + def __enter__(self): + return self.__ls_stream__.__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + return self.__ls_stream__.__exit__(exc_type, exc_val, exc_tb) + finally: + self._end_trace(error=exc_val if exc_type else None) + + +class _TracedAsyncStream(_TracedStreamBase, Generic[T]): + """A wrapper for asynchronous stream objects that handles tracing.""" + + def __init__( + self, + stream: AsyncIterator[T], + trace_container: _TraceableContainer, + reduce_fn: Optional[Callable] = None, + ): + super().__init__( + stream=stream, trace_container=trace_container, reduce_fn=reduce_fn + ) + self.__ls_stream__ = stream + self.__ls_gen = _process_async_iterator( + generator=self.__ls_stream__, + run_container=self.__ls_trace_container__, + is_llm_run=self.__is_llm_run__, + accepts_context=aitertools.asyncio_accepts_context(), + results=self.__ls_accumulated_output__, + ) + + async def _aend_trace(self, error: Optional[BaseException] = None): + ctx = copy_context() + await asyncio.shield( + aitertools.aio_to_thread(self._end_trace, error, __ctx=ctx) + ) + _set_tracing_context(get_tracing_context(ctx)) + + async def __anext__(self) -> T: + try: + return cast(T, await aitertools.py_anext(self.__ls_gen)) + except StopAsyncIteration: + await self._aend_trace() + raise + + async def __aiter__(self) -> AsyncIterator[T]: + try: + async for item in self.__ls_gen: + yield item + except BaseException: + await self._aend_trace() + raise + else: + await self._aend_trace() + + async def __aenter__(self): + return await self.__ls_stream__.__aenter__() + + async def __aexit__(self, exc_type, exc_val, exc_tb): + try: + return await self.__ls_stream__.__aexit__(exc_type, exc_val, exc_tb) + finally: + await self._aend_trace() diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index ffe997c67..f52dda1c1 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -9,12 +9,11 @@ from uuid import UUID, uuid4 try: - from pydantic.v1 import Field, root_validator, validator # type: ignore[import] + from pydantic.v1 import Field, root_validator # type: ignore[import] except ImportError: from pydantic import ( # type: ignore[assignment, no-redef] Field, root_validator, - validator, ) import threading @@ -32,12 +31,15 @@ _LOCK = threading.Lock() -def _get_client() -> Client: +# Note, this is called directly by langchain. Do not remove. + + +def get_cached_client(**init_kwargs: Any) -> Client: global _CLIENT if _CLIENT is None: with _LOCK: if _CLIENT is None: - _CLIENT = Client() + _CLIENT = Client(**init_kwargs) return _CLIENT @@ -59,7 +61,11 @@ class RunTree(ls_schemas.RunBase): ) session_id: Optional[UUID] = Field(default=None, alias="project_id") extra: Dict = Field(default_factory=dict) - client: Client = Field(default_factory=_get_client, exclude=True) + tags: Optional[List[str]] = Field(default_factory=list) + events: List[Dict] = Field(default_factory=list) + """List of events associated with the run, like + start and end events.""" + ls_client: Optional[Any] = Field(default=None, exclude=True) dotted_order: str = Field( default="", description="The order of the run in the tree." ) @@ -70,20 +76,24 @@ class Config: arbitrary_types_allowed = True allow_population_by_field_name = True - extra = "allow" - - @validator("client", pre=True) - def validate_client(cls, v: Optional[Client]) -> Client: - """Ensure the client is specified.""" - if v is None: - return _get_client() - return v + extra = "ignore" @root_validator(pre=True) def infer_defaults(cls, values: dict) -> dict: """Assign name to the run.""" - if "serialized" not in values: - values["serialized"] = {"name": values["name"]} + if values.get("name") is None and values.get("serialized") is not None: + if "name" in values["serialized"]: + values["name"] = values["serialized"]["name"] + elif "id" in values["serialized"]: + values["name"] = values["serialized"]["id"][-1] + if values.get("name") is None: + values["name"] = "Unnamed" + if "client" in values: # Handle user-constructed clients + values["ls_client"] = values.pop("client") + elif "_client" in values: + values["ls_client"] = values.pop("_client") + if not values.get("ls_client"): + values["ls_client"] = None if values.get("parent_run") is not None: values["parent_run_id"] = values["parent_run"].id if "id" not in values: @@ -98,6 +108,8 @@ def infer_defaults(cls, values: dict) -> dict: values["events"] = [] if values.get("tags") is None: values["tags"] = [] + if values.get("outputs") is None: + values["outputs"] = {} return values @root_validator(pre=False) @@ -117,6 +129,28 @@ def ensure_dotted_order(cls, values: dict) -> dict: values["dotted_order"] = current_dotted_order return values + @property + def client(self) -> Client: + """Return the client.""" + # Lazily load the client + # If you never use this for API calls, it will never be loaded + if self.ls_client is None: + self.ls_client = get_cached_client() + return self.ls_client + + @property + def _client(self) -> Optional[Client]: + # For backwards compat + return self.ls_client + + def __setattr__(self, name, value): + """Set the _client specially.""" + # For backwards compat + if name == "_client": + self.ls_client = value + else: + return super().__setattr__(name, value) + def add_tags(self, tags: Union[Sequence[str], str]) -> None: """Add tags to the run.""" if isinstance(tags, str): @@ -161,8 +195,7 @@ def add_event( events (Union[ls_schemas.RunEvent, Sequence[ls_schemas.RunEvent], Sequence[dict], dict, str]): The event(s) to be added. It can be a single event, a sequence - of events, - a sequence of dictionaries, a dictionary, or a string. + of events, a sequence of dictionaries, a dictionary, or a string. Returns: None @@ -234,7 +267,7 @@ def create_child( extra=extra or {}, parent_run=self, project_name=self.session_name, - client=self.client, + ls_client=self.ls_client, tags=tags, ) self.child_runs.append(run) @@ -266,6 +299,7 @@ def patch(self) -> None: if not self.end_time: self.end() self.client.update_run( + name=self.name, run_id=self.id, outputs=self.outputs.copy() if self.outputs else None, error=self.error, @@ -329,9 +363,13 @@ def from_runnable_config( "RunTree.from_runnable_config requires langchain-core to be installed. " "You can install it with `pip install langchain-core`." ) from e - config_ = ensure_config( - cast(RunnableConfig, config) if isinstance(config, dict) else None - ) + if config is None: + config_ = ensure_config( + cast(RunnableConfig, config) if isinstance(config, dict) else None + ) + else: + config_ = cast(RunnableConfig, config) + if ( (cb := config_.get("callbacks")) and isinstance(cb, (CallbackManager, AsyncCallbackManager)) @@ -350,6 +388,11 @@ def from_runnable_config( kwargs["outputs"] = run.outputs kwargs["start_time"] = run.start_time kwargs["end_time"] = run.end_time + kwargs["tags"] = sorted(set(run.tags or [] + kwargs.get("tags", []))) + kwargs["name"] = run.name + extra_ = kwargs.setdefault("extra", {}) + metadata_ = extra_.setdefault("metadata", {}) + metadata_.update(run.metadata) elif hasattr(tracer, "order_map") and cb.parent_run_id in tracer.order_map: dotted_order = tracer.order_map[cb.parent_run_id][1] else: @@ -404,6 +447,8 @@ def from_headers(cls, headers: Dict[str, str], **kwargs: Any) -> Optional[RunTre init_args["extra"]["metadata"] = metadata tags = sorted(set(baggage.tags + init_args.get("tags", []))) init_args["tags"] = tags + if baggage.project_name: + init_args["project_name"] = baggage.project_name return RunTree(**init_args) @@ -415,6 +460,7 @@ def to_headers(self) -> Dict[str, str]: baggage = _Baggage( metadata=self.extra.get("metadata", {}), tags=self.tags, + project_name=self.session_name, ) headers["baggage"] = baggage.to_header() return headers @@ -427,10 +473,12 @@ def __init__( self, metadata: Optional[Dict[str, str]] = None, tags: Optional[List[str]] = None, + project_name: Optional[str] = None, ): """Initialize the Baggage object.""" self.metadata = metadata or {} self.tags = tags or [] + self.project_name = project_name @classmethod def from_header(cls, header_value: Optional[str]) -> _Baggage: @@ -439,6 +487,7 @@ def from_header(cls, header_value: Optional[str]) -> _Baggage: return cls() metadata = {} tags = [] + project_name = None try: for item in header_value.split(","): key, value = item.split("=", 1) @@ -446,10 +495,12 @@ def from_header(cls, header_value: Optional[str]) -> _Baggage: metadata = json.loads(urllib.parse.unquote(value)) elif key == f"{LANGSMITH_PREFIX}tags": tags = urllib.parse.unquote(value).split(",") + elif key == f"{LANGSMITH_PREFIX}project": + project_name = urllib.parse.unquote(value) except Exception as e: logger.warning(f"Error parsing baggage header: {e}") - return cls(metadata=metadata, tags=tags) + return cls(metadata=metadata, tags=tags, project_name=project_name) def to_header(self) -> str: """Return the Baggage object as a header value.""" @@ -464,6 +515,10 @@ def to_header(self) -> str: items.append( f"{LANGSMITH_PREFIX}tags={urllib.parse.quote(serialized_tags)}" ) + if self.project_name: + items.append( + f"{LANGSMITH_PREFIX}project={urllib.parse.quote(self.project_name)}" + ) return ",".join(items) diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 453aa13de..33bb11c40 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -41,14 +41,14 @@ from typing_extensions import Literal SCORE_TYPE = Union[StrictBool, StrictInt, StrictFloat, None] -VALUE_TYPE = Union[Dict, StrictBool, StrictInt, StrictFloat, str, None] +VALUE_TYPE = Union[Dict, str, None] class ExampleBase(BaseModel): """Example base model.""" dataset_id: UUID - inputs: Dict[str, Any] + inputs: Dict[str, Any] = Field(default_factory=dict) outputs: Optional[Dict[str, Any]] = Field(default=None) metadata: Optional[Dict[str, Any]] = Field(default=None) @@ -70,7 +70,10 @@ class Example(ExampleBase): """Example model.""" id: UUID - created_at: datetime + created_at: datetime = Field( + default_factory=lambda: datetime.fromtimestamp(0, tz=timezone.utc) + ) + dataset_id: UUID = Field(default=UUID("00000000-0000-0000-0000-000000000000")) modified_at: Optional[datetime] = Field(default=None) runs: List[Run] = Field(default_factory=list) source_run_id: Optional[UUID] = None @@ -99,6 +102,12 @@ def url(self) -> Optional[str]: return None +class ExampleSearch(ExampleBase): + """Example returned via search.""" + + id: UUID + + class ExampleUpdate(BaseModel): """Update class for Example.""" @@ -135,13 +144,6 @@ class Config: frozen = True -class DatasetCreate(DatasetBase): - """Dataset create model.""" - - id: Optional[UUID] = None - created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) - - class Dataset(DatasetBase): """Dataset ORM model.""" @@ -151,6 +153,8 @@ class Dataset(DatasetBase): example_count: Optional[int] = None session_count: Optional[int] = None last_session_start_time: Optional[datetime] = None + inputs_schema: Optional[Dict[str, Any]] = None + outputs_schema: Optional[Dict[str, Any]] = None _host_url: Optional[str] = PrivateAttr(default=None) _tenant_id: Optional[UUID] = PrivateAttr(default=None) _public_path: Optional[str] = PrivateAttr(default=None) @@ -163,6 +167,12 @@ def __init__( **kwargs: Any, ) -> None: """Initialize a Dataset object.""" + if "inputs_schema_definition" in kwargs: + kwargs["inputs_schema"] = kwargs.pop("inputs_schema_definition") + + if "outputs_schema_definition" in kwargs: + kwargs["outputs_schema"] = kwargs.pop("outputs_schema_definition") + super().__init__(**kwargs) self._host_url = _host_url self._tenant_id = _tenant_id @@ -301,8 +311,8 @@ class Run(RunBase): sorted in the order it was executed. Example: - - Parent: 20230914T223155647Z1b64098b-4ab7-43f6-afee-992304f198d8 - - Children: + - Parent: 20230914T223155647Z1b64098b-4ab7-43f6-afee-992304f198d8 + - Children: - 20230914T223155647Z1b64098b-4ab7-43f6-afee-992304f198d8.20230914T223155649Z809ed3a2-0172-4f4d-8a02-a64e9b7a0f8a - 20230915T223155647Z1b64098b-4ab7-43f6-afee-992304f198d8.20230914T223155650Zc8d9f4c5-6c5a-4b2d-9b1c-3d9d7a7c5c7c """ # noqa: E501 @@ -382,15 +392,12 @@ class FeedbackSourceBase(BaseModel): This represents whether feedback is submitted from the API, model, human labeler, etc. - - Attributes: - type (str): The type of the feedback source. - metadata (Optional[Dict[str, Any]]): Additional metadata for the feedback - source. """ type: str + """The type of the feedback source.""" metadata: Optional[Dict[str, Any]] = Field(default_factory=dict) + """Additional metadata for the feedback source.""" class APIFeedbackSource(FeedbackSourceBase): @@ -456,25 +463,23 @@ class FeedbackCategory(TypedDict, total=False): """Specific value and label pair for feedback.""" value: float + """The numeric value associated with this feedback category.""" label: Optional[str] + """An optional label to interpret the value for this feedback category.""" class FeedbackConfig(TypedDict, total=False): - """Represents _how_ a feedback value ought to be interpreted. - - Attributes: - type (Literal["continuous", "categorical", "freeform"]): The type of feedback. - min (Optional[float]): The minimum value for continuous feedback. - max (Optional[float]): The maximum value for continuous feedback. - categories (Optional[List[FeedbackCategory]]): If feedback is categorical, - This defines the valid categories the server will accept. - Not applicable to continuosu or freeform feedback types. - """ + """Represents _how_ a feedback value ought to be interpreted.""" type: Literal["continuous", "categorical", "freeform"] + """The type of feedback.""" min: Optional[float] + """The minimum value for continuous feedback.""" max: Optional[float] + """The maximum value for continuous feedback.""" categories: Optional[List[FeedbackCategory]] + """If feedback is categorical, this defines the valid categories the server will accept. + Not applicable to continuous or freeform feedback types.""" # noqa class FeedbackCreate(FeedbackBase): @@ -592,7 +597,9 @@ class BaseMessageLike(Protocol): """A protocol representing objects similar to BaseMessage.""" content: str - additional_kwargs: Dict + """The content of the message.""" + additional_kwargs: Dict[Any, Any] + """Additional keyword arguments associated with the message.""" @property def type(self) -> str: @@ -600,58 +607,46 @@ def type(self) -> str: class DatasetShareSchema(TypedDict, total=False): - """Represents the schema for a dataset share. - - Attributes: - dataset_id (UUID): The ID of the dataset. - share_token (UUID): The token for sharing the dataset. - url (str): The URL of the shared dataset. - """ + """Represents the schema for a dataset share.""" dataset_id: UUID + """The ID of the dataset.""" share_token: UUID + """The token for sharing the dataset.""" url: str + """The URL of the shared dataset.""" class AnnotationQueue(BaseModel): - """Represents an annotation queue. - - Attributes: - id (UUID): The ID of the annotation queue. - name (str): The name of the annotation queue. - description (Optional[str], optional): The description of the annotation queue. - Defaults to None. - created_at (datetime, optional): The creation timestamp of the annotation queue. - Defaults to the current UTC time. - updated_at (datetime, optional): The last update timestamp of the annotation - queue. Defaults to the current UTC time. - tenant_id (UUID): The ID of the tenant associated with the annotation queue. - """ + """Represents an annotation queue.""" id: UUID + """The unique identifier of the annotation queue.""" name: str + """The name of the annotation queue.""" description: Optional[str] = None + """An optional description of the annotation queue.""" created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) + """The timestamp when the annotation queue was created.""" updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) + """The timestamp when the annotation queue was last updated.""" tenant_id: UUID + """The ID of the tenant associated with the annotation queue.""" class BatchIngestConfig(TypedDict, total=False): - """Configuration for batch ingestion. - - Attributes: - scale_up_qsize_trigger (int): The queue size threshold that triggers scaling up. - scale_up_nthreads_limit (int): The maximum number of threads to scale up to. - scale_down_nempty_trigger (int): The number of empty threads that triggers - scaling down. - size_limit (int): The maximum size limit for the batch. - """ + """Configuration for batch ingestion.""" scale_up_qsize_trigger: int + """The queue size threshold that triggers scaling up.""" scale_up_nthreads_limit: int + """The maximum number of threads to scale up to.""" scale_down_nempty_trigger: int + """The number of empty threads that triggers scaling down.""" size_limit: int + """The maximum size limit for the batch.""" size_limit_bytes: Optional[int] + """The maximum size limit in bytes for the batch.""" class LangSmithInfo(BaseModel): @@ -667,18 +662,27 @@ class LangSmithInfo(BaseModel): Example.update_forward_refs() -class FeedbackIngestToken(BaseModel): - """Represents the schema for a feedback ingest token. +class LangSmithSettings(BaseModel): + """Settings for the LangSmith tenant.""" - Attributes: - id (UUID): The ID of the feedback ingest token. - token (str): The token for ingesting feedback. - expires_at (datetime): The expiration time of the token. - """ + id: str + """The ID of the tenant.""" + display_name: str + """The display name of the tenant.""" + created_at: datetime + """The creation time of the tenant.""" + tenant_handle: Optional[str] = None + + +class FeedbackIngestToken(BaseModel): + """Represents the schema for a feedback ingest token.""" id: UUID + """The ID of the feedback ingest token.""" url: str + """The URL to GET when logging the feedback.""" expires_at: datetime + """The expiration time of the token.""" class RunEvent(TypedDict, total=False): @@ -704,20 +708,14 @@ class TimeDeltaInput(TypedDict, total=False): class DatasetDiffInfo(BaseModel): - """Represents the difference information between two datasets. - - Attributes: - examples_modified (List[UUID]): A list of UUIDs representing - the modified examples. - examples_added (List[UUID]): A list of UUIDs representing - the added examples. - examples_removed (List[UUID]): A list of UUIDs representing - the removed examples. - """ + """Represents the difference information between two datasets.""" examples_modified: List[UUID] + """A list of UUIDs representing the modified examples.""" examples_added: List[UUID] + """A list of UUIDs representing the added examples.""" examples_removed: List[UUID] + """A list of UUIDs representing the removed examples.""" class ComparativeExperiment(BaseModel): @@ -728,15 +726,25 @@ class ComparativeExperiment(BaseModel): """ id: UUID + """The unique identifier for the comparative experiment.""" name: Optional[str] = None + """The optional name of the comparative experiment.""" description: Optional[str] = None + """An optional description of the comparative experiment.""" tenant_id: UUID + """The identifier of the tenant associated with this experiment.""" created_at: datetime + """The timestamp when the comparative experiment was created.""" modified_at: datetime + """The timestamp when the comparative experiment was last modified.""" reference_dataset_id: UUID + """The identifier of the reference dataset used in this experiment.""" extra: Optional[Dict[str, Any]] = None + """Optional additional information about the experiment.""" experiments_info: Optional[List[dict]] = None + """Optional list of dictionaries containing information about individual experiments.""" feedback_stats: Optional[Dict[str, Any]] = None + """Optional dictionary containing feedback statistics for the experiment.""" @property def metadata(self) -> dict[str, Any]: @@ -744,3 +752,132 @@ def metadata(self) -> dict[str, Any]: if self.extra is None or "metadata" not in self.extra: return {} return self.extra["metadata"] + + +class PromptCommit(BaseModel): + """Represents a Prompt with a manifest.""" + + owner: str + """The handle of the owner of the prompt.""" + repo: str + """The name of the prompt.""" + commit_hash: str + """The commit hash of the prompt.""" + manifest: Dict[str, Any] + """The manifest of the prompt.""" + examples: List[dict] + """The list of examples.""" + + +class ListedPromptCommit(BaseModel): + """Represents a listed prompt commit with associated metadata.""" + + id: UUID + """The unique identifier for the prompt commit.""" + + owner: str + """The owner of the prompt commit.""" + + repo: str + """The repository name of the prompt commit.""" + + manifest_id: Optional[UUID] = None + """The optional identifier for the manifest associated with this commit.""" + + repo_id: Optional[UUID] = None + """The optional identifier for the repository.""" + + parent_id: Optional[UUID] = None + """The optional identifier for the parent commit.""" + + commit_hash: Optional[str] = None + """The optional hash of the commit.""" + + created_at: Optional[datetime] = None + """The optional timestamp when the commit was created.""" + + updated_at: Optional[datetime] = None + """The optional timestamp when the commit was last updated.""" + + example_run_ids: Optional[List[UUID]] = Field(default_factory=list) + """A list of example run identifiers associated with this commit.""" + + num_downloads: Optional[int] = 0 + """The number of times this commit has been downloaded.""" + + num_views: Optional[int] = 0 + """The number of times this commit has been viewed.""" + + parent_commit_hash: Optional[str] = None + """The optional hash of the parent commit.""" + + +class Prompt(BaseModel): + """Represents a Prompt with metadata.""" + + repo_handle: str + """The name of the prompt.""" + description: Optional[str] = None + """The description of the prompt.""" + readme: Optional[str] = None + """The README of the prompt.""" + id: str + """The ID of the prompt.""" + tenant_id: str + """The tenant ID of the prompt owner.""" + created_at: datetime + """The creation time of the prompt.""" + updated_at: datetime + """The last update time of the prompt.""" + is_public: bool + """Whether the prompt is public.""" + is_archived: bool + """Whether the prompt is archived.""" + tags: List[str] + """The tags associated with the prompt.""" + original_repo_id: Optional[str] = None + """The ID of the original prompt, if forked.""" + upstream_repo_id: Optional[str] = None + """The ID of the upstream prompt, if forked.""" + owner: Optional[str] + """The handle of the owner of the prompt.""" + full_name: str + """The full name of the prompt. (owner + repo_handle)""" + num_likes: int + """The number of likes.""" + num_downloads: int + """The number of downloads.""" + num_views: int + """The number of views.""" + liked_by_auth_user: bool + """Whether the prompt is liked by the authenticated user.""" + last_commit_hash: Optional[str] = None + """The hash of the last commit.""" + num_commits: int + """The number of commits.""" + original_repo_full_name: Optional[str] = None + """The full name of the original prompt, if forked.""" + upstream_repo_full_name: Optional[str] = None + """The full name of the upstream prompt, if forked.""" + + +class ListPromptsResponse(BaseModel): + """A list of prompts with metadata.""" + + repos: List[Prompt] + """The list of prompts.""" + total: int + """The total number of prompts.""" + + +class PromptSortField(str, Enum): + """Enum for sorting fields for prompts.""" + + num_downloads = "num_downloads" + """Number of downloads.""" + num_views = "num_views" + """Number of views.""" + updated_at = "updated_at" + """Last updated time.""" + num_likes = "num_likes" + """Number of likes.""" diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 2c0152e0f..bf6068004 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -1,21 +1,28 @@ """Generic utility functions.""" +from __future__ import annotations + import contextlib +import contextvars import copy import enum import functools import logging import os import pathlib +import socket import subprocess import sys import threading import traceback +from concurrent.futures import Future, ThreadPoolExecutor from typing import ( Any, Callable, Dict, Generator, + Iterable, + Iterator, List, Mapping, Optional, @@ -23,9 +30,13 @@ Tuple, TypeVar, Union, + cast, ) +from urllib import parse as urllib_parse +import httpx import requests +from typing_extensions import ParamSpec from urllib3.util import Retry from langsmith import schemas as ls_schemas @@ -65,11 +76,22 @@ class LangSmithConnectionError(LangSmithError): """Couldn't connect to the LangSmith API.""" -def tracing_is_enabled() -> bool: +## Warning classes + + +class LangSmithWarning(UserWarning): + """Base class for warnings.""" + + +class LangSmithMissingAPIKeyWarning(LangSmithWarning): + """Warning for missing API key.""" + + +def tracing_is_enabled(ctx: Optional[dict] = None) -> bool: """Return True if tracing is enabled.""" from langsmith.run_helpers import get_current_run_tree, get_tracing_context - tc = get_tracing_context() + tc = ctx or get_tracing_context() # You can manually override the environment using context vars. # Check that first. # Doing this before checking the run tree lets us @@ -115,13 +137,18 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: return decorator -def raise_for_status_with_text(response: requests.Response) -> None: +def raise_for_status_with_text( + response: Union[requests.Response, httpx.Response], +) -> None: """Raise an error with the response text.""" try: response.raise_for_status() except requests.HTTPError as e: raise requests.HTTPError(str(e), response.text) from e # type: ignore[call-arg] + except httpx.HTTPError as e: + raise httpx.HTTPError(str(e), response.text) from e # type: ignore[call-arg] + def get_enum_value(enu: Union[enum.Enum, str]) -> str: """Get the value of a string enum.""" @@ -325,6 +352,7 @@ def is_base_message_like(obj: object) -> bool: ) +@functools.lru_cache(maxsize=100) def get_env_var( name: str, default: Optional[str] = None, @@ -352,6 +380,7 @@ def get_env_var( return default +@functools.lru_cache(maxsize=1) def get_tracer_project(return_default_value=True) -> Optional[str]: """Get the project name for a LangSmith tracer.""" return os.environ.get( @@ -561,3 +590,202 @@ def deepish_copy(val: T) -> T: # what we can _LOGGER.debug("Failed to deepcopy input: %s", repr(e)) return _middle_copy(val, memo) + + +def is_version_greater_or_equal(current_version: str, target_version: str) -> bool: + """Check if the current version is greater or equal to the target version.""" + from packaging import version + + current = version.parse(current_version) + target = version.parse(target_version) + return current >= target + + +def parse_prompt_identifier(identifier: str) -> Tuple[str, str, str]: + """Parse a string in the format of owner/name:hash, name:hash, owner/name, or name. + + Args: + identifier (str): The prompt identifier to parse. + + Returns: + Tuple[str, str, str]: A tuple containing (owner, name, hash). + + Raises: + ValueError: If the identifier doesn't match the expected formats. + """ + if ( + not identifier + or identifier.count("/") > 1 + or identifier.startswith("/") + or identifier.endswith("/") + ): + raise ValueError(f"Invalid identifier format: {identifier}") + + parts = identifier.split(":", 1) + owner_name = parts[0] + commit = parts[1] if len(parts) > 1 else "latest" + + if "/" in owner_name: + owner, name = owner_name.split("/", 1) + if not owner or not name: + raise ValueError(f"Invalid identifier format: {identifier}") + return owner, name, commit + else: + if not owner_name: + raise ValueError(f"Invalid identifier format: {identifier}") + return "-", owner_name, commit + + +P = ParamSpec("P") + + +class ContextThreadPoolExecutor(ThreadPoolExecutor): + """ThreadPoolExecutor that copies the context to the child thread.""" + + def submit( # type: ignore[override] + self, + func: Callable[P, T], + *args: P.args, + **kwargs: P.kwargs, + ) -> Future[T]: + """Submit a function to the executor. + + Args: + func (Callable[..., T]): The function to submit. + *args (Any): The positional arguments to the function. + **kwargs (Any): The keyword arguments to the function. + + Returns: + Future[T]: The future for the function. + """ + return super().submit( + cast( + Callable[..., T], + functools.partial( + contextvars.copy_context().run, func, *args, **kwargs + ), + ) + ) + + def map( + self, + fn: Callable[..., T], + *iterables: Iterable[Any], + timeout: Optional[float] = None, + chunksize: int = 1, + ) -> Iterator[T]: + """Return an iterator equivalent to stdlib map. + + Each function will receive its own copy of the context from the parent thread. + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + contexts = [contextvars.copy_context() for _ in range(len(iterables[0]))] # type: ignore[arg-type] + + def _wrapped_fn(*args: Any) -> T: + return contexts.pop().run(fn, *args) + + return super().map( + _wrapped_fn, + *iterables, + timeout=timeout, + chunksize=chunksize, + ) + + +def get_api_url(api_url: Optional[str]) -> str: + """Get the LangSmith API URL from the environment or the given value.""" + _api_url = api_url or cast( + str, + get_env_var( + "ENDPOINT", + default="https://api.smith.langchain.com", + ), + ) + if not _api_url.strip(): + raise LangSmithUserError("LangSmith API URL cannot be empty") + return _api_url.strip().strip('"').strip("'").rstrip("/") + + +def get_api_key(api_key: Optional[str]) -> Optional[str]: + """Get the API key from the environment or the given value.""" + api_key_ = api_key if api_key is not None else get_env_var("API_KEY", default=None) + if api_key_ is None or not api_key_.strip(): + return None + return api_key_.strip().strip('"').strip("'") + + +def _is_localhost(url: str) -> bool: + """Check if the URL is localhost. + + Parameters + ---------- + url : str + The URL to check. + + Returns: + ------- + bool + True if the URL is localhost, False otherwise. + """ + try: + netloc = urllib_parse.urlsplit(url).netloc.split(":")[0] + ip = socket.gethostbyname(netloc) + return ip == "127.0.0.1" or ip.startswith("0.0.0.0") or ip.startswith("::") + except socket.gaierror: + return False + + +@functools.lru_cache(maxsize=2) +def get_host_url(web_url: Optional[str], api_url: str): + """Get the host URL based on the web URL or API URL.""" + if web_url: + return web_url + parsed_url = urllib_parse.urlparse(api_url) + if _is_localhost(api_url): + link = "http://localhost" + elif str(parsed_url.path).endswith("/api"): + new_path = str(parsed_url.path).rsplit("/api", 1)[0] + link = urllib_parse.urlunparse(parsed_url._replace(path=new_path)) + elif str(parsed_url.netloc).startswith("eu."): + link = "https://eu.smith.langchain.com" + elif str(parsed_url.netloc).startswith("dev."): + link = "https://dev.smith.langchain.com" + else: + link = "https://smith.langchain.com" + return link + + +def _get_function_name(fn: Callable, depth: int = 0) -> str: + if depth > 2 or not callable(fn): + return str(fn) + + if hasattr(fn, "__name__"): + return fn.__name__ + + if isinstance(fn, functools.partial): + return _get_function_name(fn.func, depth + 1) + + if hasattr(fn, "__call__"): + if hasattr(fn, "__class__") and hasattr(fn.__class__, "__name__"): + return fn.__class__.__name__ + return _get_function_name(fn.__call__, depth + 1) + + return str(fn) diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index 5b6798e8d..014d364cd 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -114,13 +114,11 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"][ - "name" - ] += chunk.function.name + fn_ = message["tool_calls"][index]["function"] + fn_["name"] += chunk.function.name if chunk.function.arguments: - message["tool_calls"][index]["function"][ - "arguments" - ] += chunk.function.arguments + fn_ = message["tool_calls"][index]["function"] + fn_["arguments"] += chunk.function.arguments return { "index": choices[0].index, "finish_reason": next( @@ -195,11 +193,6 @@ async def acreate(*args, stream: bool = False, **kwargs): _invocation_params_fn=invocation_params_fn, **textra, ) - if stream: - # TODO: This slightly alters the output to be a generator instead of the - # stream object. We can probably fix this with a bit of simple changes - res = decorator(original_create)(*args, stream=stream, **kwargs) - return res return await decorator(original_create)(*args, stream=stream, **kwargs) return acreate if run_helpers.is_async(original_create) else create @@ -245,6 +238,6 @@ def wrap_openai( completions_name, _reduce_completions, tracing_extra=tracing_extra, - invocation_params_fn=functools.partial(_infer_invocation_params, "text"), + invocation_params_fn=functools.partial(_infer_invocation_params, "llm"), ) return client diff --git a/python/poetry.lock b/python/poetry.lock index 2d896248c..861e0f392 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -16,13 +16,13 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anyio" -version = "4.4.0" +version = "4.5.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, + {file = "anyio-4.5.0-py3-none-any.whl", hash = "sha256:fdeb095b7cc5a5563175eedd926ec4ae55413bb4be5770c424af0ba46ccb4a78"}, + {file = "anyio-4.5.0.tar.gz", hash = "sha256:c5a275fe5ca0afd788001f58fca1e69e29ce706d746e317d660e21f70c530ef9"}, ] [package.dependencies] @@ -32,58 +32,58 @@ sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "attrs" -version = "23.2.0" +version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "black" -version = "24.4.2" +version = "24.8.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"}, - {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"}, - {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"}, - {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"}, - {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"}, - {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"}, - {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"}, - {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"}, - {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"}, - {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"}, - {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"}, - {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"}, - {file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"}, - {file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"}, - {file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"}, - {file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"}, - {file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"}, - {file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"}, - {file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"}, - {file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"}, - {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"}, - {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"}, + {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"}, + {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"}, + {file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"}, + {file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"}, + {file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"}, + {file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"}, + {file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"}, + {file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"}, + {file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"}, + {file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"}, + {file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"}, + {file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"}, + {file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"}, + {file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"}, + {file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"}, + {file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"}, + {file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"}, + {file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"}, + {file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"}, + {file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"}, + {file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"}, + {file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"}, ] [package.dependencies] @@ -103,13 +103,13 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "certifi" -version = "2024.6.2" +version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.6.2-py3-none-any.whl", hash = "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56"}, - {file = "certifi-2024.6.2.tar.gz", hash = "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516"}, + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] [[package]] @@ -238,63 +238,83 @@ files = [ [[package]] name = "coverage" -version = "7.5.3" +version = "7.6.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45"}, - {file = "coverage-7.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec"}, - {file = "coverage-7.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:923b7b1c717bd0f0f92d862d1ff51d9b2b55dbbd133e05680204465f454bb286"}, - {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62bda40da1e68898186f274f832ef3e759ce929da9a9fd9fcf265956de269dbc"}, - {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8b7339180d00de83e930358223c617cc343dd08e1aa5ec7b06c3a121aec4e1d"}, - {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:25a5caf742c6195e08002d3b6c2dd6947e50efc5fc2c2205f61ecb47592d2d83"}, - {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:05ac5f60faa0c704c0f7e6a5cbfd6f02101ed05e0aee4d2822637a9e672c998d"}, - {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:239a4e75e09c2b12ea478d28815acf83334d32e722e7433471fbf641c606344c"}, - {file = "coverage-7.5.3-cp310-cp310-win32.whl", hash = "sha256:a5812840d1d00eafae6585aba38021f90a705a25b8216ec7f66aebe5b619fb84"}, - {file = "coverage-7.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:33ca90a0eb29225f195e30684ba4a6db05dbef03c2ccd50b9077714c48153cac"}, - {file = "coverage-7.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f81bc26d609bf0fbc622c7122ba6307993c83c795d2d6f6f6fd8c000a770d974"}, - {file = "coverage-7.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7cec2af81f9e7569280822be68bd57e51b86d42e59ea30d10ebdbb22d2cb7232"}, - {file = "coverage-7.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55f689f846661e3f26efa535071775d0483388a1ccfab899df72924805e9e7cd"}, - {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50084d3516aa263791198913a17354bd1dc627d3c1639209640b9cac3fef5807"}, - {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:341dd8f61c26337c37988345ca5c8ccabeff33093a26953a1ac72e7d0103c4fb"}, - {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ab0b028165eea880af12f66086694768f2c3139b2c31ad5e032c8edbafca6ffc"}, - {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5bc5a8c87714b0c67cfeb4c7caa82b2d71e8864d1a46aa990b5588fa953673b8"}, - {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38a3b98dae8a7c9057bd91fbf3415c05e700a5114c5f1b5b0ea5f8f429ba6614"}, - {file = "coverage-7.5.3-cp311-cp311-win32.whl", hash = "sha256:fcf7d1d6f5da887ca04302db8e0e0cf56ce9a5e05f202720e49b3e8157ddb9a9"}, - {file = "coverage-7.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:8c836309931839cca658a78a888dab9676b5c988d0dd34ca247f5f3e679f4e7a"}, - {file = "coverage-7.5.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:296a7d9bbc598e8744c00f7a6cecf1da9b30ae9ad51c566291ff1314e6cbbed8"}, - {file = "coverage-7.5.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:34d6d21d8795a97b14d503dcaf74226ae51eb1f2bd41015d3ef332a24d0a17b3"}, - {file = "coverage-7.5.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e317953bb4c074c06c798a11dbdd2cf9979dbcaa8ccc0fa4701d80042d4ebf1"}, - {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:705f3d7c2b098c40f5b81790a5fedb274113373d4d1a69e65f8b68b0cc26f6db"}, - {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1196e13c45e327d6cd0b6e471530a1882f1017eb83c6229fc613cd1a11b53cd"}, - {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:015eddc5ccd5364dcb902eaecf9515636806fa1e0d5bef5769d06d0f31b54523"}, - {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fd27d8b49e574e50caa65196d908f80e4dff64d7e592d0c59788b45aad7e8b35"}, - {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:33fc65740267222fc02975c061eb7167185fef4cc8f2770267ee8bf7d6a42f84"}, - {file = "coverage-7.5.3-cp312-cp312-win32.whl", hash = "sha256:7b2a19e13dfb5c8e145c7a6ea959485ee8e2204699903c88c7d25283584bfc08"}, - {file = "coverage-7.5.3-cp312-cp312-win_amd64.whl", hash = "sha256:0bbddc54bbacfc09b3edaec644d4ac90c08ee8ed4844b0f86227dcda2d428fcb"}, - {file = "coverage-7.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f78300789a708ac1f17e134593f577407d52d0417305435b134805c4fb135adb"}, - {file = "coverage-7.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b368e1aee1b9b75757942d44d7598dcd22a9dbb126affcbba82d15917f0cc155"}, - {file = "coverage-7.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f836c174c3a7f639bded48ec913f348c4761cbf49de4a20a956d3431a7c9cb24"}, - {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:244f509f126dc71369393ce5fea17c0592c40ee44e607b6d855e9c4ac57aac98"}, - {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4c2872b3c91f9baa836147ca33650dc5c172e9273c808c3c3199c75490e709d"}, - {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dd4b3355b01273a56b20c219e74e7549e14370b31a4ffe42706a8cda91f19f6d"}, - {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f542287b1489c7a860d43a7d8883e27ca62ab84ca53c965d11dac1d3a1fab7ce"}, - {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:75e3f4e86804023e991096b29e147e635f5e2568f77883a1e6eed74512659ab0"}, - {file = "coverage-7.5.3-cp38-cp38-win32.whl", hash = "sha256:c59d2ad092dc0551d9f79d9d44d005c945ba95832a6798f98f9216ede3d5f485"}, - {file = "coverage-7.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:fa21a04112c59ad54f69d80e376f7f9d0f5f9123ab87ecd18fbb9ec3a2beed56"}, - {file = "coverage-7.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5102a92855d518b0996eb197772f5ac2a527c0ec617124ad5242a3af5e25f85"}, - {file = "coverage-7.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d1da0a2e3b37b745a2b2a678a4c796462cf753aebf94edcc87dcc6b8641eae31"}, - {file = "coverage-7.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8383a6c8cefba1b7cecc0149415046b6fc38836295bc4c84e820872eb5478b3d"}, - {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aad68c3f2566dfae84bf46295a79e79d904e1c21ccfc66de88cd446f8686341"}, - {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e079c9ec772fedbade9d7ebc36202a1d9ef7291bc9b3a024ca395c4d52853d7"}, - {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bde997cac85fcac227b27d4fb2c7608a2c5f6558469b0eb704c5726ae49e1c52"}, - {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:990fb20b32990b2ce2c5f974c3e738c9358b2735bc05075d50a6f36721b8f303"}, - {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3d5a67f0da401e105753d474369ab034c7bae51a4c31c77d94030d59e41df5bd"}, - {file = "coverage-7.5.3-cp39-cp39-win32.whl", hash = "sha256:e08c470c2eb01977d221fd87495b44867a56d4d594f43739a8028f8646a51e0d"}, - {file = "coverage-7.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:1d2a830ade66d3563bb61d1e3c77c8def97b30ed91e166c67d0632c018f380f0"}, - {file = "coverage-7.5.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:3538d8fb1ee9bdd2e2692b3b18c22bb1c19ffbefd06880f5ac496e42d7bb3884"}, - {file = "coverage-7.5.3.tar.gz", hash = "sha256:04aefca5190d1dc7a53a4c1a5a7f8568811306d7a8ee231c42fb69215571944f"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, + {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, + {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, + {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, + {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, + {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, + {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, + {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, + {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, + {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, + {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, + {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, + {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, + {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, + {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, + {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, + {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, ] [package.dependencies] @@ -331,13 +351,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.1" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, - {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] @@ -424,13 +444,13 @@ trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" -version = "0.27.0" +version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, - {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, ] [package.dependencies] @@ -445,18 +465,22 @@ brotli = ["brotli", "brotlicffi"] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" -version = "3.7" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "iniconfig" version = "2.0.0" @@ -468,15 +492,85 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "jiter" +version = "0.5.0" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jiter-0.5.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b599f4e89b3def9a94091e6ee52e1d7ad7bc33e238ebb9c4c63f211d74822c3f"}, + {file = "jiter-0.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a063f71c4b06225543dddadbe09d203dc0c95ba352d8b85f1221173480a71d5"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acc0d5b8b3dd12e91dd184b87273f864b363dfabc90ef29a1092d269f18c7e28"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c22541f0b672f4d741382a97c65609332a783501551445ab2df137ada01e019e"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63314832e302cc10d8dfbda0333a384bf4bcfce80d65fe99b0f3c0da8945a91a"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a25fbd8a5a58061e433d6fae6d5298777c0814a8bcefa1e5ecfff20c594bd749"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:503b2c27d87dfff5ab717a8200fbbcf4714516c9d85558048b1fc14d2de7d8dc"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d1f3d27cce923713933a844872d213d244e09b53ec99b7a7fdf73d543529d6d"}, + {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c95980207b3998f2c3b3098f357994d3fd7661121f30669ca7cb945f09510a87"}, + {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:afa66939d834b0ce063f57d9895e8036ffc41c4bd90e4a99631e5f261d9b518e"}, + {file = "jiter-0.5.0-cp310-none-win32.whl", hash = "sha256:f16ca8f10e62f25fd81d5310e852df6649af17824146ca74647a018424ddeccf"}, + {file = "jiter-0.5.0-cp310-none-win_amd64.whl", hash = "sha256:b2950e4798e82dd9176935ef6a55cf6a448b5c71515a556da3f6b811a7844f1e"}, + {file = "jiter-0.5.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4c8e1ed0ef31ad29cae5ea16b9e41529eb50a7fba70600008e9f8de6376d553"}, + {file = "jiter-0.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c6f16e21276074a12d8421692515b3fd6d2ea9c94fd0734c39a12960a20e85f3"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5280e68e7740c8c128d3ae5ab63335ce6d1fb6603d3b809637b11713487af9e6"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:583c57fc30cc1fec360e66323aadd7fc3edeec01289bfafc35d3b9dcb29495e4"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26351cc14507bdf466b5f99aba3df3143a59da75799bf64a53a3ad3155ecded9"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829df14d656b3fb87e50ae8b48253a8851c707da9f30d45aacab2aa2ba2d614"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42a4bdcf7307b86cb863b2fb9bb55029b422d8f86276a50487982d99eed7c6e"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04d461ad0aebf696f8da13c99bc1b3e06f66ecf6cfd56254cc402f6385231c06"}, + {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6375923c5f19888c9226582a124b77b622f8fd0018b843c45eeb19d9701c403"}, + {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cec323a853c24fd0472517113768c92ae0be8f8c384ef4441d3632da8baa646"}, + {file = "jiter-0.5.0-cp311-none-win32.whl", hash = "sha256:aa1db0967130b5cab63dfe4d6ff547c88b2a394c3410db64744d491df7f069bb"}, + {file = "jiter-0.5.0-cp311-none-win_amd64.whl", hash = "sha256:aa9d2b85b2ed7dc7697597dcfaac66e63c1b3028652f751c81c65a9f220899ae"}, + {file = "jiter-0.5.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9f664e7351604f91dcdd557603c57fc0d551bc65cc0a732fdacbf73ad335049a"}, + {file = "jiter-0.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:044f2f1148b5248ad2c8c3afb43430dccf676c5a5834d2f5089a4e6c5bbd64df"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:702e3520384c88b6e270c55c772d4bd6d7b150608dcc94dea87ceba1b6391248"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:528d742dcde73fad9d63e8242c036ab4a84389a56e04efd854062b660f559544"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cf80e5fe6ab582c82f0c3331df27a7e1565e2dcf06265afd5173d809cdbf9ba"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:44dfc9ddfb9b51a5626568ef4e55ada462b7328996294fe4d36de02fce42721f"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c451f7922992751a936b96c5f5b9bb9312243d9b754c34b33d0cb72c84669f4e"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:308fce789a2f093dca1ff91ac391f11a9f99c35369117ad5a5c6c4903e1b3e3a"}, + {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7f5ad4a7c6b0d90776fdefa294f662e8a86871e601309643de30bf94bb93a64e"}, + {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ea189db75f8eca08807d02ae27929e890c7d47599ce3d0a6a5d41f2419ecf338"}, + {file = "jiter-0.5.0-cp312-none-win32.whl", hash = "sha256:e3bbe3910c724b877846186c25fe3c802e105a2c1fc2b57d6688b9f8772026e4"}, + {file = "jiter-0.5.0-cp312-none-win_amd64.whl", hash = "sha256:a586832f70c3f1481732919215f36d41c59ca080fa27a65cf23d9490e75b2ef5"}, + {file = "jiter-0.5.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f04bc2fc50dc77be9d10f73fcc4e39346402ffe21726ff41028f36e179b587e6"}, + {file = "jiter-0.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6f433a4169ad22fcb550b11179bb2b4fd405de9b982601914ef448390b2954f3"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad4a6398c85d3a20067e6c69890ca01f68659da94d74c800298581724e426c7e"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6baa88334e7af3f4d7a5c66c3a63808e5efbc3698a1c57626541ddd22f8e4fbf"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ece0a115c05efca597c6d938f88c9357c843f8c245dbbb53361a1c01afd7148"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:335942557162ad372cc367ffaf93217117401bf930483b4b3ebdb1223dbddfa7"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649b0ee97a6e6da174bffcb3c8c051a5935d7d4f2f52ea1583b5b3e7822fbf14"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4be354c5de82157886ca7f5925dbda369b77344b4b4adf2723079715f823989"}, + {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5206144578831a6de278a38896864ded4ed96af66e1e63ec5dd7f4a1fce38a3a"}, + {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8120c60f8121ac3d6f072b97ef0e71770cc72b3c23084c72c4189428b1b1d3b6"}, + {file = "jiter-0.5.0-cp38-none-win32.whl", hash = "sha256:6f1223f88b6d76b519cb033a4d3687ca157c272ec5d6015c322fc5b3074d8a5e"}, + {file = "jiter-0.5.0-cp38-none-win_amd64.whl", hash = "sha256:c59614b225d9f434ea8fc0d0bec51ef5fa8c83679afedc0433905994fb36d631"}, + {file = "jiter-0.5.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0af3838cfb7e6afee3f00dc66fa24695199e20ba87df26e942820345b0afc566"}, + {file = "jiter-0.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:550b11d669600dbc342364fd4adbe987f14d0bbedaf06feb1b983383dcc4b961"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:489875bf1a0ffb3cb38a727b01e6673f0f2e395b2aad3c9387f94187cb214bbf"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b250ca2594f5599ca82ba7e68785a669b352156260c5362ea1b4e04a0f3e2389"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ea18e01f785c6667ca15407cd6dabbe029d77474d53595a189bdc813347218e"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:462a52be85b53cd9bffd94e2d788a09984274fe6cebb893d6287e1c296d50653"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92cc68b48d50fa472c79c93965e19bd48f40f207cb557a8346daa020d6ba973b"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1c834133e59a8521bc87ebcad773608c6fa6ab5c7a022df24a45030826cf10bc"}, + {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab3a71ff31cf2d45cb216dc37af522d335211f3a972d2fe14ea99073de6cb104"}, + {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cccd3af9c48ac500c95e1bcbc498020c87e1781ff0345dd371462d67b76643eb"}, + {file = "jiter-0.5.0-cp39-none-win32.whl", hash = "sha256:368084d8d5c4fc40ff7c3cc513c4f73e02c85f6009217922d0823a48ee7adf61"}, + {file = "jiter-0.5.0-cp39-none-win_amd64.whl", hash = "sha256:ce03f7b4129eb72f1687fa11300fbf677b02990618428934662406d2a76742a1"}, + {file = "jiter-0.5.0.tar.gz", hash = "sha256:1d916ba875bcab5c5f7d927df998c4cb694d27dceddf3392e58beaf10563368a"}, +] + [[package]] name = "marshmallow" -version = "3.21.3" +version = "3.22.0" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" files = [ - {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, - {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, + {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, + {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, ] [package.dependencies] @@ -484,148 +578,153 @@ packaging = ">=17.0" [package.extras] dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] tests = ["pytest", "pytz", "simplejson"] [[package]] name = "multidict" -version = "6.0.5" +version = "6.1.0" description = "multidict implementation" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"}, - {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"}, - {file = "multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae"}, - {file = "multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef"}, - {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc"}, - {file = "multidict-6.0.5-cp310-cp310-win32.whl", hash = "sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319"}, - {file = "multidict-6.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8"}, - {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba"}, - {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e"}, - {file = "multidict-6.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed"}, - {file = "multidict-6.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc"}, - {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e"}, - {file = "multidict-6.0.5-cp311-cp311-win32.whl", hash = "sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c"}, - {file = "multidict-6.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda"}, - {file = "multidict-6.0.5-cp312-cp312-win32.whl", hash = "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5"}, - {file = "multidict-6.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556"}, - {file = "multidict-6.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626"}, - {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3"}, - {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc"}, - {file = "multidict-6.0.5-cp37-cp37m-win32.whl", hash = "sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee"}, - {file = "multidict-6.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423"}, - {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54"}, - {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d"}, - {file = "multidict-6.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50"}, - {file = "multidict-6.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461"}, - {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44"}, - {file = "multidict-6.0.5-cp38-cp38-win32.whl", hash = "sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241"}, - {file = "multidict-6.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c"}, - {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929"}, - {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9"}, - {file = "multidict-6.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c"}, - {file = "multidict-6.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479"}, - {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c"}, - {file = "multidict-6.0.5-cp39-cp39-win32.whl", hash = "sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b"}, - {file = "multidict-6.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755"}, - {file = "multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7"}, - {file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} + [[package]] name = "mypy" -version = "1.10.0" +version = "1.11.2" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2"}, - {file = "mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99"}, - {file = "mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2"}, - {file = "mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9"}, - {file = "mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051"}, - {file = "mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1"}, - {file = "mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee"}, - {file = "mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de"}, - {file = "mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7"}, - {file = "mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53"}, - {file = "mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b"}, - {file = "mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30"}, - {file = "mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e"}, - {file = "mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5"}, - {file = "mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda"}, - {file = "mypy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9fd50226364cd2737351c79807775136b0abe084433b55b2e29181a4c3c878c0"}, - {file = "mypy-1.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f90cff89eea89273727d8783fef5d4a934be2fdca11b47def50cf5d311aff727"}, - {file = "mypy-1.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcfc70599efde5c67862a07a1aaf50e55bce629ace26bb19dc17cece5dd31ca4"}, - {file = "mypy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:075cbf81f3e134eadaf247de187bd604748171d6b79736fa9b6c9685b4083061"}, - {file = "mypy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:3f298531bca95ff615b6e9f2fc0333aae27fa48052903a0ac90215021cdcfa4f"}, - {file = "mypy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa7ef5244615a2523b56c034becde4e9e3f9b034854c93639adb667ec9ec2976"}, - {file = "mypy-1.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3236a4c8f535a0631f85f5fcdffba71c7feeef76a6002fcba7c1a8e57c8be1ec"}, - {file = "mypy-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2b5cdbb5dd35aa08ea9114436e0d79aceb2f38e32c21684dcf8e24e1e92821"}, - {file = "mypy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92f93b21c0fe73dc00abf91022234c79d793318b8a96faac147cd579c1671746"}, - {file = "mypy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:28d0e038361b45f099cc086d9dd99c15ff14d0188f44ac883010e172ce86c38a"}, - {file = "mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee"}, - {file = "mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131"}, + {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, + {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, + {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, + {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, + {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, + {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, + {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, + {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, + {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, + {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, + {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, + {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, + {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, + {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, + {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, + {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, + {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, + {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, + {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, + {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, + {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, + {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, + {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, + {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, + {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, + {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, + {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, ] [package.dependencies] mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -646,134 +745,146 @@ files = [ [[package]] name = "numpy" -version = "2.0.0" +version = "2.0.2" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:04494f6ec467ccb5369d1808570ae55f6ed9b5809d7f035059000a37b8d7e86f"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2635dbd200c2d6faf2ef9a0d04f0ecc6b13b3cad54f7c67c61155138835515d2"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:0a43f0974d501842866cc83471bdb0116ba0dffdbaac33ec05e6afed5b615238"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:8d83bb187fb647643bd56e1ae43f273c7f4dbcdf94550d7938cfc32566756514"}, - {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e843d186c8fb1b102bef3e2bc35ef81160ffef3194646a7fdd6a73c6b97196"}, - {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d7696c615765091cc5093f76fd1fa069870304beaccfd58b5dcc69e55ef49c1"}, - {file = "numpy-2.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b4c76e3d4c56f145d41b7b6751255feefae92edbc9a61e1758a98204200f30fc"}, - {file = "numpy-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd3a644e4807e73b4e1867b769fbf1ce8c5d80e7caaef0d90dcdc640dfc9787"}, - {file = "numpy-2.0.0-cp310-cp310-win32.whl", hash = "sha256:cee6cc0584f71adefe2c908856ccc98702baf95ff80092e4ca46061538a2ba98"}, - {file = "numpy-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:ed08d2703b5972ec736451b818c2eb9da80d66c3e84aed1deeb0c345fefe461b"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad0c86f3455fbd0de6c31a3056eb822fc939f81b1618f10ff3406971893b62a5"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7f387600d424f91576af20518334df3d97bc76a300a755f9a8d6e4f5cadd289"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:34f003cb88b1ba38cb9a9a4a3161c1604973d7f9d5552c38bc2f04f829536609"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:b6f6a8f45d0313db07d6d1d37bd0b112f887e1369758a5419c0370ba915b3871"}, - {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f64641b42b2429f56ee08b4f427a4d2daf916ec59686061de751a55aafa22e4"}, - {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7039a136017eaa92c1848152827e1424701532ca8e8967fe480fe1569dae581"}, - {file = "numpy-2.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:46e161722e0f619749d1cd892167039015b2c2817296104487cd03ed4a955995"}, - {file = "numpy-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0e50842b2295ba8414c8c1d9d957083d5dfe9e16828b37de883f51fc53c4016f"}, - {file = "numpy-2.0.0-cp311-cp311-win32.whl", hash = "sha256:2ce46fd0b8a0c947ae047d222f7136fc4d55538741373107574271bc00e20e8f"}, - {file = "numpy-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd6acc766814ea6443628f4e6751d0da6593dae29c08c0b2606164db026970c"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:354f373279768fa5a584bac997de6a6c9bc535c482592d7a813bb0c09be6c76f"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4d2f62e55a4cd9c58c1d9a1c9edaedcd857a73cb6fda875bf79093f9d9086f85"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:1e72728e7501a450288fc8e1f9ebc73d90cfd4671ebbd631f3e7857c39bd16f2"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:84554fc53daa8f6abf8e8a66e076aff6ece62de68523d9f665f32d2fc50fd66e"}, - {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73aafd1afca80afecb22718f8700b40ac7cab927b8abab3c3e337d70e10e5a2"}, - {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d9f7d256fbc804391a7f72d4a617302b1afac1112fac19b6c6cec63fe7fe8a"}, - {file = "numpy-2.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0ec84b9ba0654f3b962802edc91424331f423dcf5d5f926676e0150789cb3d95"}, - {file = "numpy-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:feff59f27338135776f6d4e2ec7aeeac5d5f7a08a83e80869121ef8164b74af9"}, - {file = "numpy-2.0.0-cp312-cp312-win32.whl", hash = "sha256:c5a59996dc61835133b56a32ebe4ef3740ea5bc19b3983ac60cc32be5a665d54"}, - {file = "numpy-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:a356364941fb0593bb899a1076b92dfa2029f6f5b8ba88a14fd0984aaf76d0df"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e61155fae27570692ad1d327e81c6cf27d535a5d7ef97648a17d922224b216de"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4554eb96f0fd263041baf16cf0881b3f5dafae7a59b1049acb9540c4d57bc8cb"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:903703372d46bce88b6920a0cd86c3ad82dae2dbef157b5fc01b70ea1cfc430f"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:3e8e01233d57639b2e30966c63d36fcea099d17c53bf424d77f088b0f4babd86"}, - {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cde1753efe513705a0c6d28f5884e22bdc30438bf0085c5c486cdaff40cd67a"}, - {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821eedb7165ead9eebdb569986968b541f9908979c2da8a4967ecac4439bae3d"}, - {file = "numpy-2.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a1712c015831da583b21c5bfe15e8684137097969c6d22e8316ba66b5baabe4"}, - {file = "numpy-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9c27f0946a3536403efb0e1c28def1ae6730a72cd0d5878db38824855e3afc44"}, - {file = "numpy-2.0.0-cp39-cp39-win32.whl", hash = "sha256:63b92c512d9dbcc37f9d81b123dec99fdb318ba38c8059afc78086fe73820275"}, - {file = "numpy-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f6bed7f840d44c08ebdb73b1825282b801799e325bcbdfa6bc5c370e5aecc65"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9416a5c2e92ace094e9f0082c5fd473502c91651fb896bc17690d6fc475128d6"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:17067d097ed036636fa79f6a869ac26df7db1ba22039d962422506640314933a"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ecb5b0582cd125f67a629072fed6f83562d9dd04d7e03256c9829bdec027ad"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cef04d068f5fb0518a77857953193b6bb94809a806bd0a14983a8f12ada060c9"}, - {file = "numpy-2.0.0.tar.gz", hash = "sha256:cf5d1c9e6837f8af9f92b6bd3e86d513cdc11f60fd62185cc49ec7d1aba34864"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8c5713284ce4e282544c68d1c3b2c7161d38c256d2eefc93c1d683cf47683e66"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:becfae3ddd30736fe1889a37f1f580e245ba79a5855bff5f2a29cb3ccc22dd7b"}, + {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2da5960c3cf0df7eafefd806d4e612c5e19358de82cb3c343631188991566ccd"}, + {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:496f71341824ed9f3d2fd36cf3ac57ae2e0165c143b55c3a035ee219413f3318"}, + {file = "numpy-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a61ec659f68ae254e4d237816e33171497e978140353c0c2038d46e63282d0c8"}, + {file = "numpy-2.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d731a1c6116ba289c1e9ee714b08a8ff882944d4ad631fd411106a30f083c326"}, + {file = "numpy-2.0.2-cp310-cp310-win32.whl", hash = "sha256:984d96121c9f9616cd33fbd0618b7f08e0cfc9600a7ee1d6fd9b239186d19d97"}, + {file = "numpy-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:c7b0be4ef08607dd04da4092faee0b86607f111d5ae68036f16cc787e250a131"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:49ca4decb342d66018b01932139c0961a8f9ddc7589611158cb3c27cbcf76448"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11a76c372d1d37437857280aa142086476136a8c0f373b2e648ab2c8f18fb195"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:807ec44583fd708a21d4a11d94aedf2f4f3c3719035c76a2bbe1fe8e217bdc57"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8cafab480740e22f8d833acefed5cc87ce276f4ece12fdaa2e8903db2f82897a"}, + {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15f476a45e6e5a3a79d8a14e62161d27ad897381fecfa4a09ed5322f2085669"}, + {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13e689d772146140a252c3a28501da66dfecd77490b498b168b501835041f951"}, + {file = "numpy-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9ea91dfb7c3d1c56a0e55657c0afb38cf1eeae4544c208dc465c3c9f3a7c09f9"}, + {file = "numpy-2.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c1c9307701fec8f3f7a1e6711f9089c06e6284b3afbbcd259f7791282d660a15"}, + {file = "numpy-2.0.2-cp311-cp311-win32.whl", hash = "sha256:a392a68bd329eafac5817e5aefeb39038c48b671afd242710b451e76090e81f4"}, + {file = "numpy-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:286cd40ce2b7d652a6f22efdfc6d1edf879440e53e76a75955bc0c826c7e64dc"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:df55d490dea7934f330006d0f81e8551ba6010a5bf035a249ef61a94f21c500b"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8df823f570d9adf0978347d1f926b2a867d5608f434a7cff7f7908c6570dcf5e"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9a92ae5c14811e390f3767053ff54eaee3bf84576d99a2456391401323f4ec2c"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a842d573724391493a97a62ebbb8e731f8a5dcc5d285dfc99141ca15a3302d0c"}, + {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05e238064fc0610c840d1cf6a13bf63d7e391717d247f1bf0318172e759e692"}, + {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0123ffdaa88fa4ab64835dcbde75dcdf89c453c922f18dced6e27c90d1d0ec5a"}, + {file = "numpy-2.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:96a55f64139912d61de9137f11bf39a55ec8faec288c75a54f93dfd39f7eb40c"}, + {file = "numpy-2.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec9852fb39354b5a45a80bdab5ac02dd02b15f44b3804e9f00c556bf24b4bded"}, + {file = "numpy-2.0.2-cp312-cp312-win32.whl", hash = "sha256:671bec6496f83202ed2d3c8fdc486a8fc86942f2e69ff0e986140339a63bcbe5"}, + {file = "numpy-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cfd41e13fdc257aa5778496b8caa5e856dc4896d4ccf01841daee1d96465467a"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9059e10581ce4093f735ed23f3b9d283b9d517ff46009ddd485f1747eb22653c"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:423e89b23490805d2a5a96fe40ec507407b8ee786d66f7328be214f9679df6dd"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:2b2955fa6f11907cf7a70dab0d0755159bca87755e831e47932367fc8f2f2d0b"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:97032a27bd9d8988b9a97a8c4d2c9f2c15a81f61e2f21404d7e8ef00cb5be729"}, + {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e795a8be3ddbac43274f18588329c72939870a16cae810c2b73461c40718ab1"}, + {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b258c385842546006213344c50655ff1555a9338e2e5e02a0756dc3e803dd"}, + {file = "numpy-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fec9451a7789926bcf7c2b8d187292c9f93ea30284802a0ab3f5be8ab36865d"}, + {file = "numpy-2.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9189427407d88ff25ecf8f12469d4d39d35bee1db5d39fc5c168c6f088a6956d"}, + {file = "numpy-2.0.2-cp39-cp39-win32.whl", hash = "sha256:905d16e0c60200656500c95b6b8dca5d109e23cb24abc701d41c02d74c6b3afa"}, + {file = "numpy-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:a3f4ab0caa7f053f6797fcd4e1e25caee367db3112ef2b6ef82d749530768c73"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f0a0c6f12e07fa94133c8a67404322845220c06a9e80e85999afe727f7438b8"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:312950fdd060354350ed123c0e25a71327d3711584beaef30cdaa93320c392d4"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26df23238872200f63518dd2aa984cfca675d82469535dc7162dc2ee52d9dd5c"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385"}, + {file = "numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78"}, ] [[package]] name = "openai" -version = "1.35.3" +version = "1.50.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.35.3-py3-none-any.whl", hash = "sha256:7b26544cef80f125431c073ffab3811d2421fbb9e30d3bd5c2436aba00b042d5"}, - {file = "openai-1.35.3.tar.gz", hash = "sha256:d6177087f150b381d49499be782d764213fdf638d391b29ca692b84dd675a389"}, + {file = "openai-1.50.0-py3-none-any.whl", hash = "sha256:8545b3e37aa28a39e5177adbb6142f3e2b2b9e2889ae002c0ba785d917e466e2"}, + {file = "openai-1.50.0.tar.gz", hash = "sha256:fc774e36ad96839b9fc14f1097093527b8abd1348ed824e25818309820afa344"}, ] [package.dependencies] anyio = ">=3.5.0,<5" distro = ">=1.7.0,<2" httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" pydantic = ">=1.9.0,<3" sniffio = "*" tqdm = ">4" -typing-extensions = ">=4.7,<5" +typing-extensions = ">=4.11,<5" [package.extras] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "orjson" -version = "3.10.5" +version = "3.10.7" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.5-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:545d493c1f560d5ccfc134803ceb8955a14c3fcb47bbb4b2fee0232646d0b932"}, - {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4324929c2dd917598212bfd554757feca3e5e0fa60da08be11b4aa8b90013c1"}, - {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c13ca5e2ddded0ce6a927ea5a9f27cae77eee4c75547b4297252cb20c4d30e6"}, - {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b6c8e30adfa52c025f042a87f450a6b9ea29649d828e0fec4858ed5e6caecf63"}, - {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:338fd4f071b242f26e9ca802f443edc588fa4ab60bfa81f38beaedf42eda226c"}, - {file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6970ed7a3126cfed873c5d21ece1cd5d6f83ca6c9afb71bbae21a0b034588d96"}, - {file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:235dadefb793ad12f7fa11e98a480db1f7c6469ff9e3da5e73c7809c700d746b"}, - {file = "orjson-3.10.5-cp310-none-win32.whl", hash = "sha256:be79e2393679eda6a590638abda16d167754393f5d0850dcbca2d0c3735cebe2"}, - {file = "orjson-3.10.5-cp310-none-win_amd64.whl", hash = "sha256:c4a65310ccb5c9910c47b078ba78e2787cb3878cdded1702ac3d0da71ddc5228"}, - {file = "orjson-3.10.5-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cdf7365063e80899ae3a697def1277c17a7df7ccfc979990a403dfe77bb54d40"}, - {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b68742c469745d0e6ca5724506858f75e2f1e5b59a4315861f9e2b1df77775a"}, - {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d10cc1b594951522e35a3463da19e899abe6ca95f3c84c69e9e901e0bd93d38"}, - {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dcbe82b35d1ac43b0d84072408330fd3295c2896973112d495e7234f7e3da2e1"}, - {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c0eb7e0c75e1e486c7563fe231b40fdd658a035ae125c6ba651ca3b07936f5"}, - {file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:53ed1c879b10de56f35daf06dbc4a0d9a5db98f6ee853c2dbd3ee9d13e6f302f"}, - {file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:099e81a5975237fda3100f918839af95f42f981447ba8f47adb7b6a3cdb078fa"}, - {file = "orjson-3.10.5-cp311-none-win32.whl", hash = "sha256:1146bf85ea37ac421594107195db8bc77104f74bc83e8ee21a2e58596bfb2f04"}, - {file = "orjson-3.10.5-cp311-none-win_amd64.whl", hash = "sha256:36a10f43c5f3a55c2f680efe07aa93ef4a342d2960dd2b1b7ea2dd764fe4a37c"}, - {file = "orjson-3.10.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:68f85ecae7af14a585a563ac741b0547a3f291de81cd1e20903e79f25170458f"}, - {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28afa96f496474ce60d3340fe8d9a263aa93ea01201cd2bad844c45cd21f5268"}, - {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cd684927af3e11b6e754df80b9ffafd9fb6adcaa9d3e8fdd5891be5a5cad51e"}, - {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d21b9983da032505f7050795e98b5d9eee0df903258951566ecc358f6696969"}, - {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ad1de7fef79736dde8c3554e75361ec351158a906d747bd901a52a5c9c8d24b"}, - {file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d97531cdfe9bdd76d492e69800afd97e5930cb0da6a825646667b2c6c6c0211"}, - {file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d69858c32f09c3e1ce44b617b3ebba1aba030e777000ebdf72b0d8e365d0b2b3"}, - {file = "orjson-3.10.5-cp312-none-win32.whl", hash = "sha256:64c9cc089f127e5875901ac05e5c25aa13cfa5dbbbd9602bda51e5c611d6e3e2"}, - {file = "orjson-3.10.5-cp312-none-win_amd64.whl", hash = "sha256:b2efbd67feff8c1f7728937c0d7f6ca8c25ec81373dc8db4ef394c1d93d13dc5"}, - {file = "orjson-3.10.5-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:03b565c3b93f5d6e001db48b747d31ea3819b89abf041ee10ac6988886d18e01"}, - {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:584c902ec19ab7928fd5add1783c909094cc53f31ac7acfada817b0847975f26"}, - {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a35455cc0b0b3a1eaf67224035f5388591ec72b9b6136d66b49a553ce9eb1e6"}, - {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1670fe88b116c2745a3a30b0f099b699a02bb3482c2591514baf5433819e4f4d"}, - {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:185c394ef45b18b9a7d8e8f333606e2e8194a50c6e3c664215aae8cf42c5385e"}, - {file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ca0b3a94ac8d3886c9581b9f9de3ce858263865fdaa383fbc31c310b9eac07c9"}, - {file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dfc91d4720d48e2a709e9c368d5125b4b5899dced34b5400c3837dadc7d6271b"}, - {file = "orjson-3.10.5-cp38-none-win32.whl", hash = "sha256:c05f16701ab2a4ca146d0bca950af254cb7c02f3c01fca8efbbad82d23b3d9d4"}, - {file = "orjson-3.10.5-cp38-none-win_amd64.whl", hash = "sha256:8a11d459338f96a9aa7f232ba95679fc0c7cedbd1b990d736467894210205c09"}, - {file = "orjson-3.10.5-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:85c89131d7b3218db1b24c4abecea92fd6c7f9fab87441cfc342d3acc725d807"}, - {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66215277a230c456f9038d5e2d84778141643207f85336ef8d2a9da26bd7ca"}, - {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51bbcdea96cdefa4a9b4461e690c75ad4e33796530d182bdd5c38980202c134a"}, - {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbead71dbe65f959b7bd8cf91e0e11d5338033eba34c114f69078d59827ee139"}, - {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df58d206e78c40da118a8c14fc189207fffdcb1f21b3b4c9c0c18e839b5a214"}, - {file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c4057c3b511bb8aef605616bd3f1f002a697c7e4da6adf095ca5b84c0fd43595"}, - {file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b39e006b00c57125ab974362e740c14a0c6a66ff695bff44615dcf4a70ce2b86"}, - {file = "orjson-3.10.5-cp39-none-win32.whl", hash = "sha256:eded5138cc565a9d618e111c6d5c2547bbdd951114eb822f7f6309e04db0fb47"}, - {file = "orjson-3.10.5-cp39-none-win_amd64.whl", hash = "sha256:cc28e90a7cae7fcba2493953cff61da5a52950e78dc2dacfe931a317ee3d8de7"}, - {file = "orjson-3.10.5.tar.gz", hash = "sha256:7a5baef8a4284405d96c90c7c62b755e9ef1ada84c2406c24a9ebec86b89f46d"}, + {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf6ba8ebc8ef5792e2337fb0419f8009729335bb400ece005606336b7fd7bab7"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7cf6222b29fbda9e3a472b41e6a5538b48f2c8f99261eecd60aafbdb60690c"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de817e2f5fc75a9e7dd350c4b0f54617b280e26d1631811a43e7e968fa71e3e9"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348bdd16b32556cf8d7257b17cf2bdb7ab7976af4af41ebe79f9796c218f7e91"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:479fd0844ddc3ca77e0fd99644c7fe2de8e8be1efcd57705b5c92e5186e8a250"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fdf5197a21dd660cf19dfd2a3ce79574588f8f5e2dbf21bda9ee2d2b46924d84"}, + {file = "orjson-3.10.7-cp310-none-win32.whl", hash = "sha256:d374d36726746c81a49f3ff8daa2898dccab6596864ebe43d50733275c629175"}, + {file = "orjson-3.10.7-cp310-none-win_amd64.whl", hash = "sha256:cb61938aec8b0ffb6eef484d480188a1777e67b05d58e41b435c74b9d84e0b9c"}, + {file = "orjson-3.10.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7db8539039698ddfb9a524b4dd19508256107568cdad24f3682d5773e60504a2"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:480f455222cb7a1dea35c57a67578848537d2602b46c464472c995297117fa09"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a9c9b168b3a19e37fe2778c0003359f07822c90fdff8f98d9d2a91b3144d8e0"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8de062de550f63185e4c1c54151bdddfc5625e37daf0aa1e75d2a1293e3b7d9a"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b0dd04483499d1de9c8f6203f8975caf17a6000b9c0c54630cef02e44ee624e"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b58d3795dafa334fc8fd46f7c5dc013e6ad06fd5b9a4cc98cb1456e7d3558bd6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33cfb96c24034a878d83d1a9415799a73dc77480e6c40417e5dda0710d559ee6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e724cebe1fadc2b23c6f7415bad5ee6239e00a69f30ee423f319c6af70e2a5c0"}, + {file = "orjson-3.10.7-cp311-none-win32.whl", hash = "sha256:82763b46053727a7168d29c772ed5c870fdae2f61aa8a25994c7984a19b1021f"}, + {file = "orjson-3.10.7-cp311-none-win_amd64.whl", hash = "sha256:eb8d384a24778abf29afb8e41d68fdd9a156cf6e5390c04cc07bbc24b89e98b5"}, + {file = "orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bdbb61dcc365dd9be94e8f7df91975edc9364d6a78c8f7adb69c1cdff318ec93"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b48b3db6bb6e0a08fa8c83b47bc169623f801e5cc4f24442ab2b6617da3b5313"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23820a1563a1d386414fef15c249040042b8e5d07b40ab3fe3efbfbbcbcb8864"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d352ee8ac1926d6193f602cbe36b1643bbd1bbcb25e3c1a657a4390f3000c9a5"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2d9f990623f15c0ae7ac608103c33dfe1486d2ed974ac3f40b693bad1a22a7b"}, + {file = "orjson-3.10.7-cp312-none-win32.whl", hash = "sha256:7c4c17f8157bd520cdb7195f75ddbd31671997cbe10aee559c2d613592e7d7eb"}, + {file = "orjson-3.10.7-cp312-none-win_amd64.whl", hash = "sha256:1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1"}, + {file = "orjson-3.10.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:77d325ed866876c0fa6492598ec01fe30e803272a6e8b10e992288b009cbe149"}, + {file = "orjson-3.10.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ea2c232deedcb605e853ae1db2cc94f7390ac776743b699b50b071b02bea6fe"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3dcfbede6737fdbef3ce9c37af3fb6142e8e1ebc10336daa05872bfb1d87839c"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11748c135f281203f4ee695b7f80bb1358a82a63905f9f0b794769483ea854ad"}, + {file = "orjson-3.10.7-cp313-none-win32.whl", hash = "sha256:a7e19150d215c7a13f39eb787d84db274298d3f83d85463e61d277bbd7f401d2"}, + {file = "orjson-3.10.7-cp313-none-win_amd64.whl", hash = "sha256:eef44224729e9525d5261cc8d28d6b11cafc90e6bd0be2157bde69a52ec83024"}, + {file = "orjson-3.10.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6ea2b2258eff652c82652d5e0f02bd5e0463a6a52abb78e49ac288827aaa1469"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:430ee4d85841e1483d487e7b81401785a5dfd69db5de01314538f31f8fbf7ee1"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b6146e439af4c2472c56f8540d799a67a81226e11992008cb47e1267a9b3225"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:084e537806b458911137f76097e53ce7bf5806dda33ddf6aaa66a028f8d43a23"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829cf2195838e3f93b70fd3b4292156fc5e097aac3739859ac0dcc722b27ac0"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1193b2416cbad1a769f868b1749535d5da47626ac29445803dae7cc64b3f5c98"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4e6c3da13e5a57e4b3dca2de059f243ebec705857522f188f0180ae88badd354"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c31008598424dfbe52ce8c5b47e0752dca918a4fdc4a2a32004efd9fab41d866"}, + {file = "orjson-3.10.7-cp38-none-win32.whl", hash = "sha256:7122a99831f9e7fe977dc45784d3b2edc821c172d545e6420c375e5a935f5a1c"}, + {file = "orjson-3.10.7-cp38-none-win_amd64.whl", hash = "sha256:a763bc0e58504cc803739e7df040685816145a6f3c8a589787084b54ebc9f16e"}, + {file = "orjson-3.10.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e76be12658a6fa376fcd331b1ea4e58f5a06fd0220653450f0d415b8fd0fbe20"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed350d6978d28b92939bfeb1a0570c523f6170efc3f0a0ef1f1df287cd4f4960"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144888c76f8520e39bfa121b31fd637e18d4cc2f115727865fdf9fa325b10412"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09b2d92fd95ad2402188cf51573acde57eb269eddabaa60f69ea0d733e789fe9"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b24a579123fa884f3a3caadaed7b75eb5715ee2b17ab5c66ac97d29b18fe57f"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591bcfe7512353bd609875ab38050efe3d55e18934e2f18950c108334b4ff"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f4db56635b58cd1a200b0a23744ff44206ee6aa428185e2b6c4a65b3197abdcd"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0fa5886854673222618638c6df7718ea7fe2f3f2384c452c9ccedc70b4a510a5"}, + {file = "orjson-3.10.7-cp39-none-win32.whl", hash = "sha256:8272527d08450ab16eb405f47e0f4ef0e5ff5981c3d82afe0efd25dcbef2bcd2"}, + {file = "orjson-3.10.7-cp39-none-win_amd64.whl", hash = "sha256:974683d4618c0c7dbf4f69c95a979734bf183d0658611760017f6e70a145af58"}, + {file = "orjson-3.10.7.tar.gz", hash = "sha256:75ef0640403f945f3a1f9f6400686560dbfb0fb5b16589ad62cd477043c4eee3"}, ] [[package]] @@ -815,19 +926,19 @@ files = [ [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] [[package]] name = "pluggy" @@ -872,116 +983,163 @@ files = [ [package.extras] test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] +[[package]] +name = "py-spy" +version = "0.3.14" +description = "Sampling profiler for Python programs" +optional = false +python-versions = "*" +files = [ + {file = "py_spy-0.3.14-py2.py3-none-macosx_10_7_x86_64.whl", hash = "sha256:5b342cc5feb8d160d57a7ff308de153f6be68dcf506ad02b4d67065f2bae7f45"}, + {file = "py_spy-0.3.14-py2.py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:fe7efe6c91f723442259d428bf1f9ddb9c1679828866b353d539345ca40d9dd2"}, + {file = "py_spy-0.3.14-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590905447241d789d9de36cff9f52067b6f18d8b5e9fb399242041568d414461"}, + {file = "py_spy-0.3.14-py2.py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd6211fe7f587b3532ba9d300784326d9a6f2b890af7bf6fff21a029ebbc812b"}, + {file = "py_spy-0.3.14-py2.py3-none-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3e8e48032e71c94c3dd51694c39e762e4bbfec250df5bf514adcdd64e79371e0"}, + {file = "py_spy-0.3.14-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f59b0b52e56ba9566305236375e6fc68888261d0d36b5addbe3cf85affbefc0e"}, + {file = "py_spy-0.3.14-py2.py3-none-win_amd64.whl", hash = "sha256:8f5b311d09f3a8e33dbd0d44fc6e37b715e8e0c7efefafcda8bfd63b31ab5a31"}, +] + [[package]] name = "pydantic" -version = "2.7.4" +version = "2.9.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"}, - {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"}, + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.18.4" -typing-extensions = ">=4.6.1" +annotated-types = ">=0.6.0" +pydantic-core = "2.23.4" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.18.4" +version = "2.23.4" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"}, - {file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"}, - {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"}, - {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"}, - {file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"}, - {file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"}, - {file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"}, - {file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"}, - {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"}, - {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"}, - {file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"}, - {file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"}, - {file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"}, - {file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"}, - {file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"}, - {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"}, - {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"}, - {file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"}, - {file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"}, - {file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"}, - {file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"}, - {file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"}, - {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"}, - {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"}, - {file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"}, - {file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"}, - {file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"}, - {file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"}, - {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"}, - {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"}, - {file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"}, - {file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"}, - {file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, ] [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pyperf" +version = "2.7.0" +description = "Python module to run and analyze benchmarks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyperf-2.7.0-py3-none-any.whl", hash = "sha256:dce63053b916b73d8736a77404309328f938851b5c2c5e8493cde910ce37e362"}, + {file = "pyperf-2.7.0.tar.gz", hash = "sha256:4201c6601032f374e9c900c6d2544a2f5891abedc1a96eec0e7b2338a6247589"}, +] + +[package.dependencies] +psutil = ">=5.9.0" + +[package.extras] +dev = ["importlib-metadata", "tox"] + [[package]] name = "pytest" version = "7.4.4" @@ -1135,51 +1293,64 @@ six = ">=1.5" [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] @@ -1282,13 +1453,13 @@ files = [ [[package]] name = "tqdm" -version = "4.66.4" +version = "4.66.5" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, - {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, + {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, + {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, ] [package.dependencies] @@ -1313,24 +1484,24 @@ files = [ [[package]] name = "types-pytz" -version = "2024.1.0.20240417" +version = "2024.2.0.20240913" description = "Typing stubs for pytz" optional = false python-versions = ">=3.8" files = [ - {file = "types-pytz-2024.1.0.20240417.tar.gz", hash = "sha256:6810c8a1f68f21fdf0f4f374a432487c77645a0ac0b31de4bf4690cf21ad3981"}, - {file = "types_pytz-2024.1.0.20240417-py3-none-any.whl", hash = "sha256:8335d443310e2db7b74e007414e74c4f53b67452c0cb0d228ca359ccfba59659"}, + {file = "types-pytz-2024.2.0.20240913.tar.gz", hash = "sha256:4433b5df4a6fc587bbed41716d86a5ba5d832b4378e506f40d34bc9c81df2c24"}, + {file = "types_pytz-2024.2.0.20240913-py3-none-any.whl", hash = "sha256:a1eebf57ebc6e127a99d2fa2ba0a88d2b173784ef9b3defcc2004ab6855a44df"}, ] [[package]] name = "types-pyyaml" -version = "6.0.12.20240311" +version = "6.0.12.20240917" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.8" files = [ - {file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"}, - {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"}, + {file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"}, + {file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"}, ] [[package]] @@ -1349,13 +1520,13 @@ types-urllib3 = "*" [[package]] name = "types-requests" -version = "2.32.0.20240602" +version = "2.32.0.20240914" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240602.tar.gz", hash = "sha256:3f98d7bbd0dd94ebd10ff43a7fbe20c3b8528acace6d8efafef0b6a184793f06"}, - {file = "types_requests-2.32.0.20240602-py3-none-any.whl", hash = "sha256:ed3946063ea9fbc6b5fc0c44fa279188bae42d582cb63760be6cb4b9d06c3de8"}, + {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, + {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, ] [package.dependencies] @@ -1411,13 +1582,13 @@ typing-extensions = ">=3.7.4" [[package]] name = "urllib3" -version = "1.26.19" +version = "1.26.20" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"}, - {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"}, + {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"}, + {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"}, ] [package.extras] @@ -1427,13 +1598,13 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "urllib3" -version = "2.2.2" +version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, ] [package.extras] @@ -1482,43 +1653,46 @@ tests = ["Werkzeug (==2.0.3)", "aiohttp", "boto3", "httplib2", "httpx", "pytest" [[package]] name = "watchdog" -version = "4.0.1" +version = "4.0.2" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, - {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, - {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, - {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, - {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, - {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, - {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, - {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, - {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, - {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, - {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, - {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, - {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8"}, + {file = "watchdog-4.0.2-py3-none-win32.whl", hash = "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19"}, + {file = "watchdog-4.0.2-py3-none-win_amd64.whl", hash = "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b"}, + {file = "watchdog-4.0.2-py3-none-win_ia64.whl", hash = "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c"}, + {file = "watchdog-4.0.2.tar.gz", hash = "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270"}, ] [package.extras] @@ -1605,101 +1779,103 @@ files = [ [[package]] name = "yarl" -version = "1.9.4" +version = "1.13.0" description = "Yet another URL library" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, - {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, - {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, - {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, - {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, - {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, - {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, - {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, - {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, - {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, - {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, - {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, - {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, - {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, - {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, - {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, - {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, - {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, + {file = "yarl-1.13.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:66c028066be36d54e7a0a38e832302b23222e75db7e65ed862dc94effc8ef062"}, + {file = "yarl-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:517f9d90ca0224bb7002266eba6e70d8fcc8b1d0c9321de2407e41344413ed46"}, + {file = "yarl-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5378cb60f4209505f6aa60423c174336bd7b22e0d8beb87a2a99ad50787f1341"}, + {file = "yarl-1.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0675a9cf65176e11692b20a516d5f744849251aa24024f422582d2d1bf7c8c82"}, + {file = "yarl-1.13.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:419c22b419034b4ee3ba1c27cbbfef01ca8d646f9292f614f008093143334cdc"}, + {file = "yarl-1.13.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaf10e525e461f43831d82149d904f35929d89f3ccd65beaf7422aecd500dd39"}, + {file = "yarl-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d78ebad57152d301284761b03a708aeac99c946a64ba967d47cbcc040e36688b"}, + {file = "yarl-1.13.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e480a12cec58009eeaeee7f48728dc8f629f8e0f280d84957d42c361969d84da"}, + {file = "yarl-1.13.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e5462756fb34c884ca9d4875b6d2ec80957a767123151c467c97a9b423617048"}, + {file = "yarl-1.13.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:bff0d468664cdf7b2a6bfd5e17d4a7025edb52df12e0e6e17223387b421d425c"}, + {file = "yarl-1.13.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ffd8a9758b5df7401a49d50e76491f4c582cf7350365439563062cdff45bf16"}, + {file = "yarl-1.13.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ca71238af0d247d07747cb7202a9359e6e1d6d9e277041e1ad2d9f36b3a111a6"}, + {file = "yarl-1.13.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fda4404bbb6f91e327827f4483d52fe24f02f92de91217954cf51b1cb9ee9c41"}, + {file = "yarl-1.13.0-cp310-cp310-win32.whl", hash = "sha256:e557e2681b47a0ecfdfbea44743b3184d94d31d5ce0e4b13ff64ce227a40f86e"}, + {file = "yarl-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:3590ed9c7477059aea067a58ec87b433bbd47a2ceb67703b1098cca1ba075f0d"}, + {file = "yarl-1.13.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8986fa2be78193dc8b8c27bd0d3667fe612f7232844872714c4200499d5225ca"}, + {file = "yarl-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0db15ce35dfd100bc9ab40173f143fbea26c84d7458d63206934fe5548fae25d"}, + {file = "yarl-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:49bee8c99586482a238a7b2ec0ef94e5f186bfdbb8204d14a3dd31867b3875ce"}, + {file = "yarl-1.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c73e0f8375b75806b8771890580566a2e6135e6785250840c4f6c45b69eb72d"}, + {file = "yarl-1.13.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ab16c9e94726fdfcbf5b37a641c9d9d0b35cc31f286a2c3b9cad6451cb53b2b"}, + {file = "yarl-1.13.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:784d6e50ea96b3bbb078eb7b40d8c0e3674c2f12da4f0061f889b2cfdbab8f37"}, + {file = "yarl-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:580fdb2ea48a40bcaa709ee0dc71f64e7a8f23b44356cc18cd9ce55dc3bc3212"}, + {file = "yarl-1.13.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9d2845f1a37438a8e11e4fcbbf6ffd64bc94dc9cb8c815f72d0eb6f6c622deb0"}, + {file = "yarl-1.13.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bcb374db7a609484941c01380c1450728ec84d9c3e68cd9a5feaecb52626c4be"}, + {file = "yarl-1.13.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:561a5f6c054927cf5a993dd7b032aeebc10644419e65db7dd6bdc0b848806e65"}, + {file = "yarl-1.13.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b536c2ac042add7f276d4e5857b08364fc32f28e02add153f6f214de50f12d07"}, + {file = "yarl-1.13.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:52b7bb09bb48f7855d574480e2efc0c30d31cab4e6ffc6203e2f7ffbf2e4496a"}, + {file = "yarl-1.13.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e4dddf99a853b3f60f3ce6363fb1ad94606113743cf653f116a38edd839a4461"}, + {file = "yarl-1.13.0-cp311-cp311-win32.whl", hash = "sha256:0b489858642e4e92203941a8fdeeb6373c0535aa986200b22f84d4b39cd602ba"}, + {file = "yarl-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:31748bee7078db26008bf94d39693c682a26b5c3a80a67194a4c9c8fe3b5cf47"}, + {file = "yarl-1.13.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3a9b2650425b2ab9cc68865978963601b3c2414e1d94ef04f193dd5865e1bd79"}, + {file = "yarl-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:73777f145cd591e1377bf8d8a97e5f8e39c9742ad0f100c898bba1f963aef662"}, + {file = "yarl-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:144b9e9164f21da81731c970dbda52245b343c0f67f3609d71013dd4d0db9ebf"}, + {file = "yarl-1.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3628e4e572b1db95285a72c4be102356f2dfc6214d9f126de975fd51b517ae55"}, + {file = "yarl-1.13.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0bd3caf554a52da78ec08415ebedeb6b9636436ca2afda9b5b9ff4a533478940"}, + {file = "yarl-1.13.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d7a44ae252efb0fcd79ac0997416721a44345f53e5aec4a24f489d983aa00e3"}, + {file = "yarl-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24b78a1f57780eeeb17f5e1be851ab9fa951b98811e1bb4b5a53f74eec3e2666"}, + {file = "yarl-1.13.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79de5f8432b53d1261d92761f71dfab5fc7e1c75faa12a3535c27e681dacfa9d"}, + {file = "yarl-1.13.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f603216d62e9680bfac7fb168ef9673fd98abbb50c43e73d97615dfa1afebf57"}, + {file = "yarl-1.13.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:acf27399c94270103d68f86118a183008d601e4c2c3a7e98dcde0e3b0163132f"}, + {file = "yarl-1.13.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:08037790f973367431b9406a7b9d940e872cca12e081bce3b7cea068daf81f0a"}, + {file = "yarl-1.13.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:33e2f5ef965e69a1f2a1b0071a70c4616157da5a5478f3c2f6e185e06c56a322"}, + {file = "yarl-1.13.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:38a3b742c923fe2cab7d2e2c67220d17da8d0433e8bfe038356167e697ef5524"}, + {file = "yarl-1.13.0-cp312-cp312-win32.whl", hash = "sha256:ab3ee57b25ce15f79ade27b7dfb5e678af26e4b93be5a4e22655acd9d40b81ba"}, + {file = "yarl-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:26214b0a9b8f4b7b04e67eee94a82c9b4e5c721f4d1ce7e8c87c78f0809b7684"}, + {file = "yarl-1.13.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:91251614cca1ba4ab0507f1ba5f5a44e17a5e9a4c7f0308ea441a994bdac3fc7"}, + {file = "yarl-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fe6946c3cbcfbed67c5e50dae49baff82ad054aaa10ff7a4db8dfac646b7b479"}, + {file = "yarl-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:de97ee57e00a82ebb8c378fc73c5d9a773e4c2cec8079ff34ebfef61c8ba5b11"}, + {file = "yarl-1.13.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1129737da2291c9952a93c015e73583dd66054f3ae991c8674f6e39c46d95dd3"}, + {file = "yarl-1.13.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:37049eb26d637a5b2f00562f65aad679f5d231c4c044edcd88320542ad66a2d9"}, + {file = "yarl-1.13.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08d15aff3477fecb7a469d1fdf5939a686fbc5a16858022897d3e9fc99301f19"}, + {file = "yarl-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa187a8599e0425f26b25987d884a8b67deb5565f1c450c3a6e8d3de2cdc8715"}, + {file = "yarl-1.13.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d95fcc9508390db73a0f1c7e78d9a1b1a3532a3f34ceff97c0b3b04140fbe6e4"}, + {file = "yarl-1.13.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d04ea92a3643a9bb28aa6954fff718342caab2cc3d25d0160fe16e26c4a9acb7"}, + {file = "yarl-1.13.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2842a89b697d8ca3dda6a25b4e4d835d14afe25a315c8a79dbdf5f70edfd0960"}, + {file = "yarl-1.13.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:db463fce425f935eee04a9182c74fdf9ed90d3bd2079d4a17f8fb7a2d7c11009"}, + {file = "yarl-1.13.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:3ff602aa84420b301c083ae7f07df858ae8e371bf3be294397bda3e0b27c6290"}, + {file = "yarl-1.13.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a9a1a600e8449f3a24bc7dca513be8d69db173fe842e8332a7318b5b8757a6af"}, + {file = "yarl-1.13.0-cp313-cp313-win32.whl", hash = "sha256:5540b4896b244a6539f22b613b32b5d1b737e08011aa4ed56644cb0519d687df"}, + {file = "yarl-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:08a3b0b8d10092dade46424fe775f2c9bc32e5a985fdd6afe410fe28598db6b2"}, + {file = "yarl-1.13.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:be828e92ae67a21d6a252aecd65668dddbf3bb5d5278660be607647335001119"}, + {file = "yarl-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e3b4293f02129cc2f5068f3687ef294846a79c9d19fabaa9bfdfeeebae11c001"}, + {file = "yarl-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2cec7b52903dcf9008311167036775346dcb093bb15ed7ec876debc3095e7dab"}, + {file = "yarl-1.13.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612bd8d2267558bea36347e4e6e3a96f436bdc5c011f1437824be4f2e3abc5e1"}, + {file = "yarl-1.13.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92a26956d268ad52bd2329c2c674890fe9e8669b41d83ed136e7037b1a29808e"}, + {file = "yarl-1.13.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:01953b5686e5868fd0d8eaea4e484482c158597b8ddb9d9d4d048303fa3334c7"}, + {file = "yarl-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01d3941d416e71ce65f33393beb50e93c1c9e8e516971b6653c96df6eb599a2c"}, + {file = "yarl-1.13.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:801fb5dfc05910cd5ef4806726e2129d8c9a16cdfa26a8166697da0861e59dfc"}, + {file = "yarl-1.13.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:cdcdd49136d423ee5234c9360eae7063d3120a429ee984d7d9da821c012da4d7"}, + {file = "yarl-1.13.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:6072ff51eeb7938ecac35bf24fc465be00e75217eaa1ffad3cc7620accc0f6f4"}, + {file = "yarl-1.13.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:d42227711a4180d0c22cec30fd81d263d7bb378389d8e70b5f4c597e8abae202"}, + {file = "yarl-1.13.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:ebb2236f8098205f59774a28e25a84601a4beb3e974157d418ee6c470d73e0dc"}, + {file = "yarl-1.13.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f997004ff530b5381290e82b212a93bd420fefe5a605872dc16fc7e4a7f4251e"}, + {file = "yarl-1.13.0-cp38-cp38-win32.whl", hash = "sha256:b9648e5ae280babcac867b16e845ce51ed21f8c43bced2ca40cff7eee983d6d4"}, + {file = "yarl-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:f3ef76df654f3547dcb76ba550f9ca59826588eecc6bd7df16937c620df32060"}, + {file = "yarl-1.13.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:92abbe37e3fb08935e0e95ac5f83f7b286a6f2575f542225ec7afde405ed1fa1"}, + {file = "yarl-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1932c7bfa537f89ad5ca3d1e7e05de3388bb9e893230a384159fb974f6e9f90c"}, + {file = "yarl-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4483680e129b2a4250be20947b554cd5f7140fa9e5a1e4f1f42717cf91f8676a"}, + {file = "yarl-1.13.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f6f4a352d0beea5dd39315ab16fc26f0122d13457a7e65ad4f06c7961dcf87a"}, + {file = "yarl-1.13.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a67f20e97462dee8a89e9b997a78932959d2ed991e8f709514cb4160143e7b1"}, + {file = "yarl-1.13.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf4f3a87bd52f8f33b0155cd0f6f22bdf2092d88c6c6acbb1aee3bc206ecbe35"}, + {file = "yarl-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:deb70c006be548076d628630aad9a3ef3a1b2c28aaa14b395cf0939b9124252e"}, + {file = "yarl-1.13.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf7a9b31729b97985d4a796808859dfd0e37b55f1ca948d46a568e56e51dd8fb"}, + {file = "yarl-1.13.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d807417ceebafb7ce18085a1205d28e8fcb1435a43197d7aa3fab98f5bfec5ef"}, + {file = "yarl-1.13.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:9671d0d65f86e0a0eee59c5b05e381c44e3d15c36c2a67da247d5d82875b4e4e"}, + {file = "yarl-1.13.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:13a9cd39e47ca4dc25139d3c63fe0dc6acf1b24f9d94d3b5197ac578fbfd84bf"}, + {file = "yarl-1.13.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:acf8c219a59df22609cfaff4a7158a0946f273e3b03a5385f1fdd502496f0cff"}, + {file = "yarl-1.13.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:12c92576633027f297c26e52aba89f6363b460f483d85cf7c14216eb55d06d02"}, + {file = "yarl-1.13.0-cp39-cp39-win32.whl", hash = "sha256:c2518660bd8166e770b76ce92514b491b8720ae7e7f5f975cd888b1592894d2c"}, + {file = "yarl-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:db90702060b1cdb7c7609d04df5f68a12fd5581d013ad379e58e0c2e651d92b8"}, + {file = "yarl-1.13.0-py3-none-any.whl", hash = "sha256:c7d35ff2a5a51bc6d40112cdb4ca3fd9636482ce8c6ceeeee2301e34f7ed7556"}, + {file = "yarl-1.13.0.tar.gz", hash = "sha256:02f117a63d11c8c2ada229029f8bb444a811e62e5041da962de548f26ac2c40f"}, ] [package.dependencies] @@ -1712,4 +1888,4 @@ vcr = [] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "e062da3051244f0d59796d6659149eee4e2f46d9332714d57edd459c80b7d8cd" +content-hash = "efc464f40b1618531c35a40a249abccadcbd52c081f8f36ea06a6abd796ecfd9" diff --git a/python/pyproject.toml b/python/pyproject.toml index 64656d057..0a47ed049 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.81" +version = "0.1.129" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" @@ -26,9 +26,13 @@ langsmith = "langsmith.cli.main:main" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -pydantic = [{version = ">=1,<3", python = "<3.12.4"}, {version = "^2.7.4", python=">=3.12.4"}] +pydantic = [ + { version = ">=1,<3", python = "<3.12.4" }, + { version = "^2.7.4", python = ">=3.12.4" }, +] requests = "^2" orjson = "^3.9.14" +httpx = ">=0.23.0,<1" [tool.poetry.group.dev.dependencies] pytest = "^7.3.1" @@ -53,6 +57,8 @@ fastapi = "^0.110.1" uvicorn = "^0.29.0" pytest-rerunfailures = "^14.0" pytest-socket = "^0.7.0" +pyperf = "^2.7.0" +py-spy = "^0.3.14" [tool.poetry.group.lint.dependencies] openai = "^1.10" @@ -75,8 +81,12 @@ lint.select = [ "I", # isort "D", # pydocstyle "D401", # First line should be in imperative mood + "T201", + "UP", ] lint.ignore = [ + "UP006", + "UP007", # Relax the convention by _not_ requiring documentation for every function parameter. "D417", ] @@ -84,20 +94,29 @@ lint.ignore = [ convention = "google" [tool.ruff.lint.per-file-ignores] -"tests/*" = ["D"] -"langsmith/cli/*" = ["D"] +"langsmith/run_helpers.py" = ["E501"] +"docs/conf.py" = ["E501"] +"langsmith/cli/*" = ["T201", "D", "UP"] +"docs/create_api_rst.py" = ["D101", "D103"] +"docs/scripts/custom_formatter.py" = ["D100"] +"langsmith/anonymizer.py" = ["E501"] +"langsmith/async_client.py" = ["E501"] +"langsmith/client.py" = ["E501"] +"langsmith/schemas.py" = ["E501"] +"tests/evaluation/__init__.py" = ["E501"] +"tests/*" = ["D", "UP"] +"bench/*" = ["D", "UP", "T"] +"docs/*" = ["T", "D"] [tool.ruff.format] docstring-code-format = true docstring-code-line-length = 80 [tool.mypy] -plugins = [ - "pydantic.v1.mypy", - "pydantic.mypy", -] +plugins = ["pydantic.v1.mypy", "pydantic.mypy"] ignore_missing_imports = "True" disallow_untyped_defs = "True" [tool.pytest.ini_options] asyncio_mode = "auto" +markers = ["slow: long-running tests"] diff --git a/python/tests/evaluation/__init__.py b/python/tests/evaluation/__init__.py index e69de29bb..f2f869cab 100644 --- a/python/tests/evaluation/__init__.py +++ b/python/tests/evaluation/__init__.py @@ -0,0 +1,31 @@ +"""LangSmith Evaluations. + +This module provides a comprehensive suite of tools for evaluating language models and their outputs using LangSmith. + +Key Features: +- Robust evaluation framework for assessing model performance across diverse tasks +- Flexible configuration options for customizing evaluation criteria and metrics +- Seamless integration with LangSmith's platform for end-to-end evaluation workflows +- Advanced analytics and reporting capabilities for actionable insights + +Usage: +1. Import the necessary components from this module +2. Configure your evaluation parameters and criteria +3. Run your language model through the evaluation pipeline +4. Analyze the results using our built-in tools or export for further processing + +Example: + from langsmith.evaluation import RunEvaluator, MetricCalculator + + evaluator = RunEvaluator(model="gpt-3.5-turbo", dataset_name="customer_support") + results = evaluator.run() + metrics = MetricCalculator(results).calculate() + + print(metrics.summary()) + +For detailed API documentation and advanced usage scenarios, visit: +https://docs.langsmith.com/evaluation + +Note: This module is designed to work seamlessly with the LangSmith platform. +Ensure you have the necessary credentials and permissions set up before use. +""" diff --git a/python/tests/evaluation/test_evaluation.py b/python/tests/evaluation/test_evaluation.py index ecb371806..62eb0551c 100644 --- a/python/tests/evaluation/test_evaluation.py +++ b/python/tests/evaluation/test_evaluation.py @@ -1,15 +1,40 @@ import asyncio -from typing import Sequence +import time +from typing import Callable, Sequence, Tuple, TypeVar import pytest from langsmith import Client, aevaluate, evaluate, expect, test from langsmith.schemas import Example, Run +T = TypeVar("T") + + +def wait_for( + condition: Callable[[], Tuple[T, bool]], + max_sleep_time: int = 120, + sleep_time: int = 3, +) -> T: + """Wait for a condition to be true.""" + start_time = time.time() + last_e = None + while time.time() - start_time < max_sleep_time: + try: + res, cond = condition() + if cond: + return res + except Exception as e: + last_e = e + time.sleep(sleep_time) + total_time = time.time() - start_time + if last_e is not None: + raise last_e + raise ValueError(f"Callable did not return within {total_time}") + def test_evaluate(): client = Client() - client.clone_public_dataset( + _ = client.clone_public_dataset( "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" ) dataset_name = "Evaluate Examples" @@ -41,16 +66,46 @@ def predict(inputs: dict) -> dict: }, num_repetitions=3, ) - results.wait() assert len(results) == 30 examples = client.list_examples(dataset_name=dataset_name) for example in examples: assert len([r for r in results if r["example"].id == example.id]) == 3 + # Run it again with the existing project + results2 = evaluate( + predict, + data=dataset_name, + evaluators=[accuracy], + summary_evaluators=[precision], + experiment=results.experiment_name, + ) + assert len(results2) == 10 + + # ... and again with the object + experiment = client.read_project(project_name=results.experiment_name) + results3 = evaluate( + predict, + data=dataset_name, + evaluators=[accuracy], + summary_evaluators=[precision], + experiment=experiment, + ) + assert len(results3) == 10 + + # ... and again with the ID + results4 = evaluate( + predict, + data=dataset_name, + evaluators=[accuracy], + summary_evaluators=[precision], + experiment=str(experiment.id), + ) + assert len(results4) == 10 + async def test_aevaluate(): client = Client() - client.clone_public_dataset( + _ = client.clone_public_dataset( "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" ) dataset_name = "Evaluate Examples" @@ -60,6 +115,12 @@ def accuracy(run: Run, example: Example): expected = example.outputs["answer"] # type: ignore return {"score": expected.lower() == pred.lower()} + async def slow_accuracy(run: Run, example: Example): + pred = run.outputs["output"] # type: ignore + expected = example.outputs["answer"] # type: ignore + await asyncio.sleep(5) + return {"score": expected.lower() == pred.lower()} + def precision(runs: Sequence[Run], examples: Sequence[Example]): predictions = [run.outputs["output"].lower() for run in runs] # type: ignore expected = [example.outputs["answer"].lower() for example in examples] # type: ignore @@ -74,7 +135,7 @@ async def apredict(inputs: dict) -> dict: results = await aevaluate( apredict, data=dataset_name, - evaluators=[accuracy], + evaluators=[accuracy, slow_accuracy], summary_evaluators=[precision], experiment_prefix="My Experiment", description="My Experiment Description", @@ -87,12 +148,61 @@ async def apredict(inputs: dict) -> dict: assert len(results) == 20 examples = client.list_examples(dataset_name=dataset_name) all_results = [r async for r in results] + all_examples = [] for example in examples: count = 0 for r in all_results: if r["run"].reference_example_id == example.id: count += 1 assert count == 2 + all_examples.append(example) + + # Wait for there to be 2x runs vs. examples + def check_run_count(): + current_runs = list( + client.list_runs(project_name=results.experiment_name, is_root=True) + ) + for r in current_runs: + assert "accuracy" in r.feedback_stats + assert "slow_accuracy" in r.feedback_stats + return current_runs, len(current_runs) == 2 * len(all_examples) + + final_runs = wait_for(check_run_count, max_sleep_time=60, sleep_time=2) + + assert len(final_runs) == 2 * len( + all_examples + ), f"Expected {2 * len(all_examples)} runs, but got {len(final_runs)}" + + # Run it again with the existing project + results2 = await aevaluate( + apredict, + data=dataset_name, + evaluators=[accuracy], + summary_evaluators=[precision], + experiment=results.experiment_name, + ) + assert len(results2) == 10 + + # ... and again with the object + experiment = client.read_project(project_name=results.experiment_name) + results3 = await aevaluate( + apredict, + data=dataset_name, + evaluators=[accuracy], + summary_evaluators=[precision], + experiment=experiment, + ) + assert len(results3) == 10 + + # ... and again with the ID + results4 = await aevaluate( + apredict, + data=dataset_name, + evaluators=[accuracy], + summary_evaluators=[precision], + experiment=str(experiment.id), + ) + assert len(results4) == 10 @test diff --git a/python/tests/integration_tests/conftest.py b/python/tests/integration_tests/conftest.py new file mode 100644 index 000000000..8ad66c3d2 --- /dev/null +++ b/python/tests/integration_tests/conftest.py @@ -0,0 +1,17 @@ +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--runslow", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) diff --git a/python/tests/integration_tests/fake_server.py b/python/tests/integration_tests/fake_server.py index 93850d9da..f42f328f2 100644 --- a/python/tests/integration_tests/fake_server.py +++ b/python/tests/integration_tests/fake_server.py @@ -14,6 +14,7 @@ def fake_function(): assert parent_run is not None assert "did-propagate" in span.tags or [] assert span.metadata["some-cool-value"] == 42 + assert span.session_name == "distributed-tracing" return "Fake function response" @@ -25,6 +26,7 @@ def fake_function_two(foo: str): assert parent_run is not None assert "did-propagate" in (span.tags or []) assert span.metadata["some-cool-value"] == 42 + assert span.session_name == "distributed-tracing" return "Fake function response" @@ -36,6 +38,7 @@ def fake_function_three(foo: str): assert parent_run is not None assert "did-propagate" in (span.tags or []) assert span.metadata["some-cool-value"] == 42 + assert span.session_name == "distributed-tracing" return "Fake function response" @@ -47,8 +50,16 @@ async def fake_route(request: Request): parent=request.headers, ): fake_function() - fake_function_two("foo", langsmith_extra={"parent": request.headers}) + fake_function_two( + "foo", + langsmith_extra={ + "parent": request.headers, + "project_name": "Definitely-not-your-grandpas-project", + }, + ) - with tracing_context(parent=request.headers): + with tracing_context( + parent=request.headers, project_name="Definitely-not-your-grandpas-project" + ): fake_function_three("foo") return {"message": "Fake route response"} diff --git a/python/tests/integration_tests/test_async_client.py b/python/tests/integration_tests/test_async_client.py new file mode 100644 index 000000000..6338ec2ae --- /dev/null +++ b/python/tests/integration_tests/test_async_client.py @@ -0,0 +1,278 @@ +import asyncio +import datetime +import uuid + +import pytest +from pydantic import BaseModel + +from langsmith import utils as ls_utils +from langsmith.async_client import AsyncClient +from langsmith.schemas import DataType, Run + + +@pytest.mark.asyncio +async def test_indexed_datasets(): + class InputsSchema(BaseModel): + name: str # type: ignore[annotation-unchecked] + age: int # type: ignore[annotation-unchecked] + + async with AsyncClient() as client: + # Create a new dataset + try: + dataset = await client.create_dataset( + "test_dataset_for_integration_tests_" + uuid.uuid4().hex, + inputs_schema_definition=InputsSchema.model_json_schema(), + ) + + example = await client.create_example( + inputs={"name": "Alice", "age": 30}, + outputs={"hi": "hello"}, + dataset_id=dataset.id, + ) + + await client.index_dataset(dataset_id=dataset.id) + + async def check_similar_examples(): + examples = await client.similar_examples( + {"name": "Alice", "age": 30}, dataset_id=dataset.id, limit=1 + ) + return len(examples) == 1 + + await wait_for(check_similar_examples, timeout=20) + examples = await client.similar_examples( + {"name": "Alice", "age": 30}, dataset_id=dataset.id, limit=1 + ) + assert examples[0].id == example.id + finally: + await client.delete_dataset(dataset_id=dataset.id) + + +# Helper function to wait for a condition +async def wait_for(condition, timeout=10): + start_time = asyncio.get_event_loop().time() + while True: + try: + if await condition(): + return + except Exception: + if asyncio.get_event_loop().time() - start_time > timeout: + raise TimeoutError("Condition not met within the timeout period") + await asyncio.sleep(0.1) + + +@pytest.fixture +async def async_client(): + ls_utils.get_env_var.cache_clear() + client = AsyncClient(api_url="https://api.smith.langchain.com") + yield client + await client.aclose() + + +@pytest.mark.asyncio +async def test_create_run(async_client: AsyncClient): + project_name = "__test_create_run" + uuid.uuid4().hex[:8] + run_id = uuid.uuid4() + + await async_client.create_run( + name="test_run", + inputs={"input": "hello"}, + run_type="llm", + project_name=project_name, + id=run_id, + start_time=datetime.datetime.now(datetime.timezone.utc), + ) + + async def check_run(): + try: + run = await async_client.read_run(run_id) + return run.name == "test_run" + except ls_utils.LangSmithError: + return False + + await wait_for(check_run) + run = await async_client.read_run(run_id) + assert run.name == "test_run" + assert run.inputs == {"input": "hello"} + + +@pytest.mark.asyncio +async def test_list_runs(async_client: AsyncClient): + project_name = "__test_list_runs" + run_ids = [uuid.uuid4() for _ in range(3)] + meta_uid = str(uuid.uuid4()) + + for i, run_id in enumerate(run_ids): + await async_client.create_run( + name=f"test_run_{i}", + inputs={"input": f"hello_{i}"}, + run_type="llm", + project_name=project_name, + id=run_id, + start_time=datetime.datetime.now(datetime.timezone.utc), + end_time=datetime.datetime.now(datetime.timezone.utc), + extra={"metadata": {"uid": meta_uid}}, + ) + + filter_ = f'and(eq(metadata_key, "uid"), eq(metadata_value, "{meta_uid}"))' + + async def check_runs(): + runs = [ + run + async for run in async_client.list_runs( + project_name=project_name, filter=filter_ + ) + ] + return len(runs) == 3 + + await wait_for(check_runs) + + runs = [ + run + async for run in async_client.list_runs( + project_name=project_name, filter=filter_ + ) + ] + assert len(runs) == 3 + assert all(isinstance(run, Run) for run in runs) + + +@pytest.mark.asyncio +async def test_create_dataset(async_client: AsyncClient): + dataset_name = "__test_create_dataset" + uuid.uuid4().hex[:8] + + dataset = await async_client.create_dataset(dataset_name, data_type=DataType.kv) + + assert dataset.name == dataset_name + assert dataset.data_type == DataType.kv + + await async_client.delete_dataset(dataset_id=dataset.id) + + +@pytest.mark.asyncio +async def test_create_example(async_client: AsyncClient): + dataset_name = "__test_create_example" + uuid.uuid4().hex[:8] + dataset = await async_client.create_dataset(dataset_name) + + example = await async_client.create_example( + inputs={"input": "hello"}, outputs={"output": "world"}, dataset_id=dataset.id + ) + + assert example.inputs == {"input": "hello"} + assert example.outputs == {"output": "world"} + + await async_client.delete_dataset(dataset_id=dataset.id) + + +@pytest.mark.asyncio +async def test_list_examples(async_client: AsyncClient): + dataset_name = "__test_list_examples" + uuid.uuid4().hex[:8] + dataset = await async_client.create_dataset(dataset_name) + + for i in range(3): + await async_client.create_example( + inputs={"input": f"hello_{i}"}, + outputs={"output": f"world_{i}"}, + dataset_id=dataset.id, + ) + + examples = [ + example async for example in async_client.list_examples(dataset_id=dataset.id) + ] + assert len(examples) == 3 + + await async_client.delete_dataset(dataset_id=dataset.id) + + +@pytest.mark.asyncio +async def test_create_feedback(async_client: AsyncClient): + project_name = "__test_create_feedback" + uuid.uuid4().hex[:8] + run_id = uuid.uuid4() + + await async_client.create_run( + name="test_run", + inputs={"input": "hello"}, + run_type="llm", + project_name=project_name, + id=run_id, + start_time=datetime.datetime.now(datetime.timezone.utc), + ) + + feedback = await async_client.create_feedback( + run_id=run_id, + key="test_key", + score=0.9, + value="test_value", + comment="test_comment", + ) + + assert feedback.run_id == run_id + assert feedback.key == "test_key" + assert feedback.score == 0.9 + assert feedback.value == "test_value" + assert feedback.comment == "test_comment" + + token = await async_client.create_presigned_feedback_token( + run_id=run_id, feedback_key="test_presigned_key" + ) + await async_client.create_feedback_from_token( + token.id, score=0.8, value="presigned_value", comment="presigned_comment" + ) + await async_client.create_feedback_from_token( + str(token.url), score=0.9, value="presigned_value", comment="presigned_comment" + ) + + async def check_feedback(): + feedbacks = [ + feedback async for feedback in async_client.list_feedback(run_ids=[run_id]) + ] + return sum(feedback.key == "test_presigned_key" for feedback in feedbacks) == 2 + + await wait_for(check_feedback, timeout=10) + feedbacks = [ + feedback async for feedback in async_client.list_feedback(run_ids=[run_id]) + ] + presigned_feedbacks = [f for f in feedbacks if f.key == "test_presigned_key"] + assert len(presigned_feedbacks) == 2 + assert all(f.value == "presigned_value" for f in presigned_feedbacks) + assert len(presigned_feedbacks) == 2 + for feedback in presigned_feedbacks: + assert feedback.value == "presigned_value" + assert feedback.comment == "presigned_comment" + assert feedback.score in {0.8, 0.9} + assert set(f.score for f in presigned_feedbacks) == {0.8, 0.9} + + shared_run_url = await async_client.share_run(run_id) + run_is_shared = await async_client.run_is_shared(run_id) + assert run_is_shared, f"Run isn't shared; failed link: {shared_run_url}" + + +@pytest.mark.asyncio +async def test_list_feedback(async_client: AsyncClient): + project_name = "__test_list_feedback" + run_id = uuid.uuid4() + + await async_client.create_run( + name="test_run", + inputs={"input": "hello"}, + run_type="llm", + project_name=project_name, + id=run_id, + start_time=datetime.datetime.now(datetime.timezone.utc), + ) + + for i in range(3): + await async_client.create_feedback( + run_id=run_id, + key=f"test_key_{i}", + score=0.9, + value=f"test_value_{i}", + comment=f"test_comment_{i}", + ) + + async def check_feedbacks(): + feedbacks = [ + feedback async for feedback in async_client.list_feedback(run_ids=[run_id]) + ] + return len(feedbacks) == 3 + + await wait_for(check_feedbacks, timeout=10) diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index c4d59e8c4..87c2c6f94 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -13,10 +13,15 @@ import pytest from freezegun import freeze_time +from pydantic import BaseModel from langsmith.client import ID_TYPE, Client from langsmith.schemas import DataType -from langsmith.utils import LangSmithConnectionError, LangSmithError +from langsmith.utils import ( + LangSmithConnectionError, + LangSmithError, + get_env_var, +) def wait_for( @@ -36,7 +41,8 @@ def wait_for( @pytest.fixture def langchain_client() -> Client: - return Client() + get_env_var.cache_clear() + return Client(api_url="https://api.smith.langchain.com") def test_datasets(langchain_client: Client) -> None: @@ -98,11 +104,45 @@ def test_datasets(langchain_client: Client) -> None: assert updated_example_value.outputs["col2"] == "updatedExampleCol2" assert (updated_example_value.metadata or {}).get("foo") == "bar" + new_example = langchain_client.create_example( + inputs={"col1": "newAddedExampleCol1"}, + outputs={"col2": "newAddedExampleCol2"}, + dataset_id=new_dataset.id, + ) + example_value = langchain_client.read_example(new_example.id) + assert example_value.inputs is not None + assert example_value.inputs["col1"] == "newAddedExampleCol1" + assert example_value.outputs is not None + assert example_value.outputs["col2"] == "newAddedExampleCol2" + + langchain_client.update_examples( + example_ids=[new_example.id, example.id], + inputs=[{"col1": "newUpdatedExampleCol1"}, {"col1": "newNewUpdatedExampleCol"}], + outputs=[ + {"col2": "newUpdatedExampleCol2"}, + {"col2": "newNewUpdatedExampleCol2"}, + ], + metadata=[{"foo": "baz"}, {"foo": "qux"}], + ) + updated_example = langchain_client.read_example(new_example.id) + assert updated_example.id == new_example.id + assert updated_example.inputs["col1"] == "newUpdatedExampleCol1" + assert updated_example.outputs is not None + assert updated_example.outputs["col2"] == "newUpdatedExampleCol2" + assert (updated_example.metadata or {}).get("foo") == "baz" + + updated_example = langchain_client.read_example(example.id) + assert updated_example.id == example.id + assert updated_example.inputs["col1"] == "newNewUpdatedExampleCol" + assert updated_example.outputs is not None + assert updated_example.outputs["col2"] == "newNewUpdatedExampleCol2" + assert (updated_example.metadata or {}).get("foo") == "qux" + langchain_client.delete_example(example.id) examples2 = list( langchain_client.list_examples(dataset_id=new_dataset.id) # type: ignore ) - assert len(examples2) == 1 + assert len(examples2) == 2 langchain_client.delete_dataset(dataset_id=dataset_id) @@ -128,6 +168,14 @@ def test_list_examples(langchain_client: Client) -> None: example_list = list(langchain_client.list_examples(dataset_id=dataset.id)) assert len(example_list) == len(examples) + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, offset=1, limit=2) + ) + assert len(example_list) == 2 + + example_list = list(langchain_client.list_examples(dataset_id=dataset.id, offset=1)) + assert len(example_list) == len(examples) - 1 + example_list = list( langchain_client.list_examples(dataset_id=dataset.id, splits=["train"]) ) @@ -202,6 +250,71 @@ def test_list_examples(langchain_client: Client) -> None: ) assert len(example_list) == 0 + example_list = list( + langchain_client.list_examples( + dataset_id=dataset.id, filter='exists(metadata, "baz")' + ) + ) + assert len(example_list) == 1 + + example_list = list( + langchain_client.list_examples( + dataset_id=dataset.id, filter='has("metadata", \'{"foo": "bar"}\')' + ) + ) + assert len(example_list) == 1 + + example_list = list( + langchain_client.list_examples( + dataset_id=dataset.id, filter='exists(metadata, "bazzz")' + ) + ) + assert len(example_list) == 0 + + langchain_client.delete_dataset(dataset_id=dataset.id) + + +@pytest.mark.slow +def test_similar_examples(langchain_client: Client) -> None: + inputs = [{"text": "how are you"}, {"text": "good bye"}, {"text": "see ya later"}] + outputs = [ + {"response": "good how are you"}, + {"response": "ta ta"}, + {"response": "tootles"}, + ] + dataset_name = "__test_similar_examples" + uuid4().hex[:4] + dataset = langchain_client.create_dataset( + dataset_name=dataset_name, + inputs_schema={ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "text": {"type": "string"}, + }, + "required": ["text"], + "additionalProperties": False, + }, + outputs_schema={ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "response": {"type": "string"}, + }, + "required": ["response"], + "additionalProperties": False, + }, + ) + langchain_client.create_examples( + inputs=inputs, outputs=outputs, dataset_id=dataset.id + ) + langchain_client.index_dataset(dataset_id=dataset.id) + # Need to wait for indexing to finish. + time.sleep(5) + similar_list = langchain_client.similar_examples( + {"text": "howdy"}, limit=2, dataset_id=dataset.id + ) + assert len(similar_list) == 2 + langchain_client.delete_dataset(dataset_id=dataset.id) @@ -228,9 +341,11 @@ def test_persist_update_run(langchain_client: Client) -> None: langchain_client.create_run(**run) run["outputs"] = {"output": ["Hi"]} run["extra"]["foo"] = "bar" + run["name"] = "test_run_updated" langchain_client.update_run(run["id"], **run) wait_for(lambda: langchain_client.read_run(run["id"]).end_time is not None) stored_run = langchain_client.read_run(run["id"]) + assert stored_run.name == run["name"] assert stored_run.id == run["id"] assert stored_run.outputs == run["outputs"] assert stored_run.start_time == run["start_time"] @@ -241,6 +356,7 @@ def test_persist_update_run(langchain_client: Client) -> None: @pytest.mark.parametrize("uri", ["http://localhost:1981", "http://api.langchain.minus"]) def test_error_surfaced_invalid_uri(monkeypatch: pytest.MonkeyPatch, uri: str) -> None: + get_env_var.cache_clear() monkeypatch.setenv("LANGCHAIN_ENDPOINT", uri) monkeypatch.setenv("LANGCHAIN_API_KEY", "test") client = Client() @@ -249,11 +365,7 @@ def test_error_surfaced_invalid_uri(monkeypatch: pytest.MonkeyPatch, uri: str) - client.create_run("My Run", inputs={"text": "hello world"}, run_type="llm") -def test_create_dataset( - monkeypatch: pytest.MonkeyPatch, langchain_client: Client -) -> None: - """Test persisting runs and adding feedback.""" - monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://dev.api.smith.langchain.com") +def test_create_dataset(langchain_client: Client) -> None: dataset_name = "__test_create_dataset" + uuid4().hex[:4] if langchain_client.has_dataset(dataset_name=dataset_name): langchain_client.delete_dataset(dataset_name=dataset_name) @@ -297,12 +409,67 @@ def test_create_dataset( langchain_client.delete_dataset(dataset_id=dataset.id) +def test_dataset_schema_validation(langchain_client: Client) -> None: + dataset_name = "__test_create_dataset" + uuid4().hex[:4] + if langchain_client.has_dataset(dataset_name=dataset_name): + langchain_client.delete_dataset(dataset_name=dataset_name) + + class InputSchema(BaseModel): + input: str + + class OutputSchema(BaseModel): + output: str + + dataset = langchain_client.create_dataset( + dataset_name, + data_type=DataType.kv, + inputs_schema=InputSchema.model_json_schema(), + outputs_schema=OutputSchema.model_json_schema(), + ) + + # confirm we store the schema from the create request + assert dataset.inputs_schema == InputSchema.model_json_schema() + assert dataset.outputs_schema == OutputSchema.model_json_schema() + + # create an example that matches the schema, which should succeed + langchain_client.create_example( + inputs={"input": "hello world"}, + outputs={"output": "hello"}, + dataset_id=dataset.id, + ) + + # create an example that does not match the input schema + with pytest.raises(LangSmithError): + langchain_client.create_example( + inputs={"john": 1}, + outputs={"output": "hello"}, + dataset_id=dataset.id, + ) + + # create an example that does not match the output schema + with pytest.raises(LangSmithError): + langchain_client.create_example( + inputs={"input": "hello world"}, + outputs={"john": 1}, + dataset_id=dataset.id, + ) + + # assert read API includes the schema definition + read_dataset = langchain_client.read_dataset(dataset_id=dataset.id) + assert read_dataset.inputs_schema == InputSchema.model_json_schema() + assert read_dataset.outputs_schema == OutputSchema.model_json_schema() + + langchain_client.delete_dataset(dataset_id=dataset.id) + + @freeze_time("2023-01-01") def test_list_datasets(langchain_client: Client) -> None: ds1n = "__test_list_datasets1" + uuid4().hex[:4] ds2n = "__test_list_datasets2" + uuid4().hex[:4] try: - dataset1 = langchain_client.create_dataset(ds1n, data_type=DataType.llm) + dataset1 = langchain_client.create_dataset( + ds1n, data_type=DataType.llm, metadata={"foo": "barqux"} + ) dataset2 = langchain_client.create_dataset(ds2n, data_type=DataType.kv) assert dataset1.url is not None assert dataset2.url is not None @@ -325,6 +492,13 @@ def test_list_datasets(langchain_client: Client) -> None: ) ) assert len(datasets) == 1 + # Sub-filter on metadata + datasets = list( + langchain_client.list_datasets( + dataset_ids=[dataset1.id, dataset2.id], metadata={"foo": "barqux"} + ) + ) + assert len(datasets) == 1 finally: # Delete datasets for name in [ds1n, ds2n]: @@ -533,14 +707,6 @@ def test_batch_ingest_runs(langchain_client: Client) -> None: assert run3.inputs == {"input1": 1, "input2": 2} assert run3.error == "error" - # read the project - result = langchain_client.read_project(project_name=_session) - assert result.error_rate > 0 - assert result.first_token_p50 is None - assert result.first_token_p99 is None - - langchain_client.delete_project(project_name=_session) - @freeze_time("2023-01-01") def test_get_info() -> None: @@ -643,3 +809,10 @@ def test_surrogates(): run_type="llm", end_time=datetime.datetime.now(datetime.timezone.utc), ) + + +def test_runs_stats(): + langchain_client = Client() + # We always have stuff in the "default" project... + stats = langchain_client.get_run_stats(project_names=["default"], run_type="llm") + assert stats diff --git a/python/tests/integration_tests/test_context_propagation.py b/python/tests/integration_tests/test_context_propagation.py index 32cd1f74d..096f8bb5d 100644 --- a/python/tests/integration_tests/test_context_propagation.py +++ b/python/tests/integration_tests/test_context_propagation.py @@ -54,6 +54,7 @@ async def test_tracing_fake_server(fake_server): langsmith_extra={ "metadata": {"some-cool-value": 42}, "tags": ["did-propagate"], + "project_name": "distributed-tracing", }, ) assert result["message"] == "Fake route response" diff --git a/python/tests/integration_tests/test_experiment_manager.py b/python/tests/integration_tests/test_experiment_manager.py new file mode 100644 index 000000000..93f35a709 --- /dev/null +++ b/python/tests/integration_tests/test_experiment_manager.py @@ -0,0 +1,25 @@ +import uuid + +from langsmith.client import Client +from langsmith.evaluation._runner import _ExperimentManager + + +def test_experiment_manager_existing_name(): + client = Client() + dataset_name = f"Test Dups: {str(uuid.uuid4())}" + ds = client.create_dataset(dataset_name) + client.create_example(inputs={"un": "important"}, dataset_id=ds.id) + prefix = "Some Test Prefix" + try: + manager = _ExperimentManager(dataset_name, experiment=prefix, client=client) + assert manager is not None + original_name = manager._experiment_name + assert original_name.startswith(prefix) + client.create_project(original_name, reference_dataset_id=ds.id) + manager.start() + new_name = manager._experiment_name + assert new_name.startswith(prefix) + assert new_name != original_name + + finally: + client.delete_dataset(dataset_id=ds.id) diff --git a/python/tests/integration_tests/test_llm_evaluator.py b/python/tests/integration_tests/test_llm_evaluator.py new file mode 100644 index 000000000..28b742096 --- /dev/null +++ b/python/tests/integration_tests/test_llm_evaluator.py @@ -0,0 +1,209 @@ +import pytest + +from langsmith import Client, aevaluate, evaluate +from langsmith.evaluation.llm_evaluator import ( + CategoricalScoreConfig, + ContinuousScoreConfig, + LLMEvaluator, +) + + +def test_llm_evaluator_init() -> None: + evaluator = LLMEvaluator( + prompt_template="Is the response vague? Y/N\n{input}", + score_config=CategoricalScoreConfig( + key="vagueness", + choices=["Y", "N"], + description="Whether the response is vague. Y for yes, N for no.", + include_explanation=True, + ), + ) + assert evaluator is not None + assert evaluator.prompt.input_variables == ["input"] + assert evaluator.score_schema == { + "title": "vagueness", + "description": "Whether the response is vague. Y for yes, N for no.", + "type": "object", + "properties": { + "score": { + "type": "string", + "enum": ["Y", "N"], + "description": "The score for the evaluation, one of Y, N.", + }, + "explanation": { + "type": "string", + "description": "The explanation for the score.", + }, + }, + "required": ["score", "explanation"], + } + + # Try a continuous score + evaluator = LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + + assert evaluator is not None + assert evaluator.prompt.input_variables == ["input"] + assert evaluator.score_schema == { + "title": "rating", + "description": "The rating of the response, from 0 to 1.", + "type": "object", + "properties": { + "score": { + "type": "number", + "minimum": 0, + "maximum": 1, + "description": "The score for the evaluation, " + "between 0 and 1, inclusive.", + }, + }, + "required": ["score"], + } + + # Test invalid model + with pytest.raises(ValueError): + LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + model_provider="invalid", + ) + + evaluator = LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input} {output} {expected}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + assert evaluator is not None + assert set(evaluator.prompt.input_variables) == {"input", "output", "expected"} + + with pytest.raises(ValueError): + # Test invalid input variable without map_variables + LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input} {output} {hello}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + + evaluator = LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input} {output} {hello}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + map_variables=lambda run, example: {"hello": "world"}, + ) + assert evaluator is not None + assert set(evaluator.prompt.input_variables) == {"input", "output", "hello"} + + +def test_from_model() -> None: + from langchain_openai import ChatOpenAI + + evaluator = LLMEvaluator.from_model( + ChatOpenAI(), + prompt_template="Rate the response from 0 to 1.\n{input}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + assert evaluator is not None + assert evaluator.prompt.input_variables == ["input"] + assert evaluator.score_schema == { + "title": "rating", + "description": "The rating of the response, from 0 to 1.", + "type": "object", + "properties": { + "score": { + "type": "number", + "minimum": 0, + "maximum": 1, + "description": "The score for the evaluation, " + "between 0 and 1, inclusive.", + }, + }, + "required": ["score"], + } + + +async def test_evaluate() -> None: + client = Client() + client.clone_public_dataset( + "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" + ) + dataset_name = "Evaluate Examples" + + def predict(inputs: dict) -> dict: + return {"answer": "Yes"} + + async def apredict(inputs: dict) -> dict: + return {"answer": "Yes"} + + reference_accuracy = LLMEvaluator( + prompt_template="Is the output accurate with respect to the expected output? " + "Y/N\nOutput: {output}\nExpected: {expected}", + score_config=CategoricalScoreConfig( + key="reference_accuracy", + choices=["Y", "N"], + description="Whether the output is accurate with respect to " + "the expected output.", + include_explanation=False, + ), + ) + + accuracy = LLMEvaluator( + prompt_template=[ + ( + "system", + "Is the output accurate with respect to the context and " + "question? Y/N", + ), + ("human", "Context: {context}\nQuestion: {question}\nOutput: {output}"), + ], + score_config=CategoricalScoreConfig( + key="accuracy", + choices=["Y", "N"], + description="Whether the output is accurate with respect to " + "the context and question.", + include_explanation=True, + ), + map_variables=lambda run, example: { + "context": example.inputs.get("context", "") if example else "", + "question": example.inputs.get("question", "") if example else "", + "output": run.outputs.get("output", "") if run.outputs else "", + }, + model_provider="anthropic", + model_name="claude-3-haiku-20240307", + ) + results = evaluate( + predict, + data=dataset_name, + evaluators=[reference_accuracy, accuracy], + experiment_prefix=__name__ + "::test_evaluate.evaluate", + ) + results.wait() + + await aevaluate( + apredict, + data=dataset_name, + evaluators=[reference_accuracy, accuracy], + experiment_prefix=__name__ + "::test_evaluate.aevaluate", + ) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py new file mode 100644 index 000000000..0bef1ba57 --- /dev/null +++ b/python/tests/integration_tests/test_prompts.py @@ -0,0 +1,561 @@ +from typing import Literal +from uuid import uuid4 + +import pytest +from langchain_core.prompts import ( + BasePromptTemplate, + ChatPromptTemplate, + PromptTemplate, +) +from langchain_core.runnables.base import RunnableSequence + +import langsmith.schemas as ls_schemas +import langsmith.utils as ls_utils +from langsmith.client import ( + Client, + convert_prompt_to_anthropic_format, + convert_prompt_to_openai_format, +) + + +@pytest.fixture +def langsmith_client() -> Client: + return Client(timeout_ms=(50_000, 90_000)) + + +@pytest.fixture +def prompt_template_1() -> ChatPromptTemplate: + return ChatPromptTemplate.from_template("tell me a joke about {topic}") + + +@pytest.fixture +def prompt_template_2() -> ChatPromptTemplate: + return ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant."), + ("human", "{question}"), + ] + ) + + +@pytest.fixture +def prompt_template_3() -> PromptTemplate: + return PromptTemplate.from_template("Summarize the following text: {text}") + + +@pytest.fixture +def prompt_with_model() -> dict: + return { + "id": ["langsmith", "playground", "PromptPlayground"], + "lc": 1, + "type": "constructor", + "kwargs": { + "last": { + "id": ["langchain", "schema", "runnable", "RunnableBinding"], + "lc": 1, + "type": "constructor", + "kwargs": { + "bound": { + "id": ["langchain", "chat_models", "openai", "ChatOpenAI"], + "lc": 1, + "type": "constructor", + "kwargs": { + "openai_api_key": { + "id": ["OPENAI_API_KEY"], + "lc": 1, + "type": "secret", + } + }, + }, + "kwargs": {}, + }, + }, + "first": { + "id": ["langchain", "prompts", "chat", "ChatPromptTemplate"], + "lc": 1, + "type": "constructor", + "kwargs": { + "messages": [ + { + "id": [ + "langchain", + "prompts", + "chat", + "SystemMessagePromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "prompt": { + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "template": "You are a chatbot.", + "input_variables": [], + "template_format": "f-string", + }, + } + }, + }, + { + "id": [ + "langchain", + "prompts", + "chat", + "HumanMessagePromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "prompt": { + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "template": "{question}", + "input_variables": ["question"], + "template_format": "f-string", + }, + } + }, + }, + ], + "input_variables": ["question"], + }, + }, + }, + } + + +@pytest.fixture +def chat_prompt_template(): + return ChatPromptTemplate.from_messages( + [ + ("system", "You are a chatbot"), + ("user", "{question}"), + ] + ) + + +def test_current_tenant_is_owner(langsmith_client: Client): + settings = langsmith_client._get_settings() + assert langsmith_client._current_tenant_is_owner(settings.tenant_handle or "-") + assert langsmith_client._current_tenant_is_owner("-") + assert not langsmith_client._current_tenant_is_owner("non_existent_owner") + + +def test_list_prompts(langsmith_client: Client): + response = langsmith_client.list_prompts(limit=10, offset=0) + assert isinstance(response, ls_schemas.ListPromptsResponse) + assert len(response.repos) <= 10 + + +def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + url = langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + assert isinstance(url, str) + assert langsmith_client._prompt_exists(prompt_name) + + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) + assert prompt.repo_handle == prompt_name + + langsmith_client.delete_prompt(prompt_name) + + +def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): + non_existent_prompt = f"non_existent_{uuid4().hex[:8]}" + assert not langsmith_client._prompt_exists(non_existent_prompt) + + existent_prompt = f"existent_{uuid4().hex[:8]}" + assert langsmith_client.push_prompt(existent_prompt, object=prompt_template_2) + assert langsmith_client._prompt_exists(existent_prompt) + + langsmith_client.delete_prompt(existent_prompt) + + +def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + updated_data = langsmith_client.update_prompt( + prompt_name, + description="Updated description", + is_public=True, + tags=["test", "update"], + ) + assert isinstance(updated_data, dict) + + updated_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(updated_prompt, ls_schemas.Prompt) + assert updated_prompt.description == "Updated description" + assert updated_prompt.is_public + assert set(updated_prompt.tags) == set(["test", "update"]) + + langsmith_client.delete_prompt(prompt_name) + + +def test_delete_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + assert langsmith_client._prompt_exists(prompt_name) + langsmith_client.delete_prompt(prompt_name) + assert not langsmith_client._prompt_exists(prompt_name) + + +def test_pull_prompt_object( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + manifest = langsmith_client.pull_prompt_commit(prompt_name) + assert isinstance(manifest, ls_schemas.PromptCommit) + assert manifest.repo == prompt_name + + langsmith_client.delete_prompt(prompt_name) + + +def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + # test pulling with just prompt name + pulled_prompt = langsmith_client.pull_prompt(prompt_name) + assert isinstance(pulled_prompt, ChatPromptTemplate) + assert ( + pulled_prompt.metadata and pulled_prompt.metadata["lc_hub_repo"] == prompt_name + ) + + # test pulling with private owner (-) and name + pulled_prompt_2 = langsmith_client.pull_prompt(f"-/{prompt_name}") + assert pulled_prompt == pulled_prompt_2 + + # test pulling with tenant handle and name + tenant_handle = langsmith_client._get_settings().tenant_handle + pulled_prompt_3 = langsmith_client.pull_prompt(f"{tenant_handle}/{prompt_name}") + assert pulled_prompt.metadata and pulled_prompt_3.metadata + assert ( + pulled_prompt.metadata["lc_hub_commit_hash"] + == pulled_prompt_3.metadata["lc_hub_commit_hash"] + ) + assert pulled_prompt_3.metadata["lc_hub_owner"] == tenant_handle + + # test pulling with handle, name and commit hash + tenant_handle = langsmith_client._get_settings().tenant_handle + pulled_prompt_4 = langsmith_client.pull_prompt( + f"{tenant_handle}/{prompt_name}:latest" + ) + assert pulled_prompt_3 == pulled_prompt_4 + + # test pulling without handle, with commit hash + assert pulled_prompt_4.metadata + pulled_prompt_5 = langsmith_client.pull_prompt( + f"{prompt_name}:{pulled_prompt_4.metadata['lc_hub_commit_hash']}" + ) + assert pulled_prompt_5.metadata + assert ( + pulled_prompt_4.metadata["lc_hub_commit_hash"] + == pulled_prompt_5.metadata["lc_hub_commit_hash"] + ) + + langsmith_client.delete_prompt(prompt_name) + + +def test_push_and_pull_prompt( + langsmith_client: Client, prompt_template_2: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + + push_result = langsmith_client.push_prompt(prompt_name, object=prompt_template_2) + assert isinstance(push_result, str) + + pulled_prompt = langsmith_client.pull_prompt(prompt_name) + assert isinstance(pulled_prompt, ChatPromptTemplate) + + langsmith_client.delete_prompt(prompt_name) + + # should fail + with pytest.raises(ls_utils.LangSmithUserError): + langsmith_client.push_prompt( + f"random_handle/{prompt_name}", object=prompt_template_2 + ) + + +def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: dict): + prompt_name = f"test_prompt_with_model_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_with_model) + + pulled_prompt = langsmith_client.pull_prompt(prompt_name, include_model=True) + assert isinstance(pulled_prompt, RunnableSequence) + if getattr(pulled_prompt, "first", None): + first = getattr(pulled_prompt, "first") + assert isinstance(first, BasePromptTemplate) + assert first.metadata and first.metadata["lc_hub_repo"] == prompt_name + else: + assert False, "pulled_prompt.first should exist, incorrect prompt format" + + langsmith_client.delete_prompt(prompt_name) + + +def test_like_unlike_prompt( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + langsmith_client.like_prompt(prompt_name) + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) + assert prompt.num_likes == 1 + + langsmith_client.unlike_prompt(prompt_name) + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) + assert prompt.num_likes == 0 + + langsmith_client.delete_prompt(prompt_name) + + +def test_get_latest_commit_hash( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + commit_hash = langsmith_client._get_latest_commit_hash(f"-/{prompt_name}") + assert isinstance(commit_hash, str) + assert len(commit_hash) > 0 + + langsmith_client.delete_prompt(prompt_name) + + +def test_create_prompt(langsmith_client: Client): + prompt_name = f"test_create_prompt_{uuid4().hex[:8]}" + created_prompt = langsmith_client.create_prompt( + prompt_name, + description="Test description", + readme="Test readme", + tags=["test", "create"], + is_public=False, + ) + assert isinstance(created_prompt, ls_schemas.Prompt) + assert created_prompt.repo_handle == prompt_name + assert created_prompt.description == "Test description" + assert created_prompt.readme == "Test readme" + assert set(created_prompt.tags) == set(["test", "create"]) + assert not created_prompt.is_public + + langsmith_client.delete_prompt(prompt_name) + + +def test_create_commit( + langsmith_client: Client, + prompt_template_2: ChatPromptTemplate, + prompt_template_3: PromptTemplate, +): + prompt_name = f"test_create_commit_{uuid4().hex[:8]}" + try: + # this should fail because the prompt does not exist + commit_url = langsmith_client.create_commit( + prompt_name, object=prompt_template_2 + ) + pytest.fail("Expected LangSmithNotFoundError was not raised") + except ls_utils.LangSmithNotFoundError as e: + assert str(e) == "Prompt does not exist, you must create it first." + except Exception as e: + pytest.fail(f"Unexpected exception raised: {e}") + + langsmith_client.push_prompt(prompt_name, object=prompt_template_3) + commit_url = langsmith_client.create_commit(prompt_name, object=prompt_template_2) + assert isinstance(commit_url, str) + assert prompt_name in commit_url + + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) + assert prompt.num_commits == 2 + + # try submitting different types of unaccepted manifests + try: + # this should fail + commit_url = langsmith_client.create_commit(prompt_name, object={"hi": "hello"}) + except ls_utils.LangSmithError as e: + err = str(e) + assert "Manifest must have an id field" in err + assert "400 Client Error" in err + except Exception as e: + pytest.fail(f"Unexpected exception raised: {e}") + + try: + # this should fail + commit_url = langsmith_client.create_commit(prompt_name, object={"id": ["hi"]}) + except ls_utils.LangSmithError as e: + err = str(e) + assert "Manifest type hi is not supported" in err + assert "400 Client Error" in err + except Exception as e: + pytest.fail(f"Unexpected exception raised: {e}") + + langsmith_client.delete_prompt(prompt_name) + + +def test_push_prompt( + langsmith_client: Client, + prompt_template_3: PromptTemplate, + prompt_template_2: ChatPromptTemplate, +): + prompt_name = f"test_push_new_{uuid4().hex[:8]}" + url = langsmith_client.push_prompt( + prompt_name, + object=prompt_template_3, + is_public=True, + description="New prompt", + tags=["new", "test"], + ) + + assert isinstance(url, str) + assert prompt_name in url + + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) + assert prompt.is_public + assert prompt.description == "New prompt" + assert "new" in prompt.tags + assert "test" in prompt.tags + assert prompt.num_commits == 1 + + # test updating prompt metadata but not manifest + url = langsmith_client.push_prompt( + prompt_name, + is_public=False, + description="Updated prompt", + ) + + updated_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(updated_prompt, ls_schemas.Prompt) + assert updated_prompt.description == "Updated prompt" + assert not updated_prompt.is_public + assert updated_prompt.num_commits == 1 + + # test updating prompt manifest but not metadata + url = langsmith_client.push_prompt( + prompt_name, + object=prompt_template_2, + ) + assert isinstance(url, str) + + langsmith_client.delete_prompt(prompt_name) + + +@pytest.mark.parametrize("is_public,expected_count", [(True, 1), (False, 1)]) +def test_list_prompts_filter( + langsmith_client: Client, + prompt_template_1: ChatPromptTemplate, + is_public: bool, + expected_count: int, +): + prompt_name = f"test_list_filter_{uuid4().hex[:8]}" + langsmith_client.push_prompt( + prompt_name, object=prompt_template_1, is_public=is_public + ) + + response = langsmith_client.list_prompts(is_public=is_public, query=prompt_name) + + assert response.total == expected_count + if expected_count > 0: + assert response.repos[0].repo_handle == prompt_name + + langsmith_client.delete_prompt(prompt_name) + + +def test_update_prompt_archive( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_archive_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + + langsmith_client.update_prompt(prompt_name, is_archived=True) + archived_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(archived_prompt, ls_schemas.Prompt) + assert archived_prompt.is_archived + + langsmith_client.update_prompt(prompt_name, is_archived=False) + unarchived_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(unarchived_prompt, ls_schemas.Prompt) + assert not unarchived_prompt.is_archived + + langsmith_client.delete_prompt(prompt_name) + + +@pytest.mark.parametrize( + "sort_field, sort_direction", + [ + (ls_schemas.PromptSortField.updated_at, "desc"), + ], +) +def test_list_prompts_sorting( + langsmith_client: Client, + prompt_template_1: ChatPromptTemplate, + sort_field: ls_schemas.PromptSortField, + sort_direction: Literal["asc", "desc"], +): + prompt_names = [f"test_sort_{i}_{uuid4().hex[:8]}" for i in range(3)] + for name in prompt_names: + langsmith_client.push_prompt(name, object=prompt_template_1) + + response = langsmith_client.list_prompts( + sort_field=sort_field, sort_direction=sort_direction, limit=10 + ) + + assert len(response.repos) >= 3 + sorted_names = [ + repo.repo_handle for repo in response.repos if repo.repo_handle in prompt_names + ] + assert sorted_names == sorted(sorted_names, reverse=(sort_direction == "desc")) + + for name in prompt_names: + langsmith_client.delete_prompt(name) + + +def test_convert_to_openai_format(chat_prompt_template: ChatPromptTemplate): + invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) + + res = convert_prompt_to_openai_format( + invoked, + ) + expected = { + "messages": [ + {"content": "You are a chatbot", "role": "system"}, + {"content": "What is the meaning of life?", "role": "user"}, + ], + "model": "gpt-3.5-turbo", + "stream": False, + "n": 1, + "temperature": 0.7, + } + assert {k: res[k] for k in expected.keys()} == expected + + +def test_convert_to_anthropic_format(chat_prompt_template: ChatPromptTemplate): + invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) + + res = convert_prompt_to_anthropic_format(invoked, {"model_name": "claude-2"}) + + assert res == { + "model": "claude-2", + "max_tokens": 1024, + "messages": [{"role": "user", "content": "What is the meaning of life?"}], + "system": "You are a chatbot", + } diff --git a/python/tests/integration_tests/test_runs.py b/python/tests/integration_tests/test_runs.py index 165a0cf6f..c9b62661e 100644 --- a/python/tests/integration_tests/test_runs.py +++ b/python/tests/integration_tests/test_runs.py @@ -1,8 +1,9 @@ import asyncio import time +import uuid from collections import defaultdict from concurrent.futures import ThreadPoolExecutor -from typing import AsyncGenerator, Generator, Optional +from typing import AsyncGenerator, Generator, Optional, Sequence import pytest # type: ignore @@ -24,11 +25,14 @@ def poll_runs_until_count( max_retries: int = 10, sleep_time: int = 2, require_success: bool = True, + filter_: Optional[str] = None, ): retries = 0 while retries < max_retries: try: - runs = list(langchain_client.list_runs(project_name=project_name)) + runs = list( + langchain_client.list_runs(project_name=project_name, filter=filter_) + ) if len(runs) == count: if not require_success or all( [run.status == "success" for run in runs] @@ -45,8 +49,7 @@ def test_nested_runs( langchain_client: Client, ): project_name = "__My Tracer Project - test_nested_runs" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex @traceable(run_type="chain") def my_run(text: str): @@ -61,10 +64,20 @@ def my_llm_run(text: str): def my_chain_run(text: str): return my_run(text) - my_chain_run("foo", langsmith_extra=dict(project_name=project_name)) + my_chain_run( + "foo", + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) for _ in range(15): try: - runs = list(langchain_client.list_runs(project_name=project_name)) + runs = list( + langchain_client.list_runs( + project_name=project_name, + filter=f"and(eq(metadata_key,'test_run'),eq(metadata_value,'{run_meta}'))", + ) + ) assert len(runs) == 3 break except (ls_utils.LangSmithError, AssertionError): @@ -81,10 +94,6 @@ def my_chain_run(text: str): assert runs_dict["my_llm_run"].parent_run_id == runs_dict["my_run"].id assert runs_dict["my_llm_run"].run_type == "llm" assert runs_dict["my_llm_run"].inputs == {"text": "foo"} - try: - langchain_client.delete_project(project_name=project_name) - except Exception: - pass async def test_list_runs_multi_project(langchain_client: Client): @@ -92,35 +101,36 @@ async def test_list_runs_multi_project(langchain_client: Client): "__My Tracer Project - test_list_runs_multi_project", "__My Tracer Project - test_list_runs_multi_project2", ] - try: - for project_name in project_names: - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) - - @traceable(run_type="chain") - async def my_run(text: str): - return "Completed: " + text - - for project_name in project_names: - await my_run("foo", langsmith_extra=dict(project_name=project_name)) - poll_runs_until_count(langchain_client, project_names[0], 1) - poll_runs_until_count(langchain_client, project_names[1], 1) - runs = list(langchain_client.list_runs(project_name=project_names)) - assert len(runs) == 2 - assert all([run.outputs["output"] == "Completed: foo" for run in runs]) # type: ignore - assert runs[0].session_id != runs[1].session_id - - finally: - for project_name in project_names: - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + + @traceable(run_type="chain") + async def my_run(text: str): + return "Completed: " + text + + run_meta = uuid.uuid4().hex + for project_name in project_names: + await my_run( + "foo", + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' + + poll_runs_until_count(langchain_client, project_names[0], 1, filter_=filter_) + runs = list( + langchain_client.list_runs( + project_name=project_names, + filter=filter_, + ) + ) + assert len(runs) == 2 + assert all([run.outputs["output"] == "Completed: foo" for run in runs]) # type: ignore + assert runs[0].session_id != runs[1].session_id async def test_nested_async_runs(langchain_client: Client): """Test nested runs with a mix of async and sync functions.""" project_name = "__My Tracer Project - test_nested_async_runs" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) executor = ThreadPoolExecutor(max_workers=1) @traceable(run_type="chain") @@ -143,10 +153,15 @@ def my_sync_tool(text: str, *, my_arg: int = 10): async def my_chain_run(text: str): return await my_run(text) - await my_chain_run("foo", langsmith_extra=dict(project_name=project_name)) + meta = uuid.uuid4().hex + await my_chain_run( + "foo", + langsmith_extra=dict(project_name=project_name, metadata={"test_run": meta}), + ) executor.shutdown(wait=True) - poll_runs_until_count(langchain_client, project_name, 4) - runs = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))' + poll_runs_until_count(langchain_client, project_name, 4, filter_=_filter) + runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) assert len(runs) == 4 runs_dict = {run.name: run for run in runs} assert runs_dict["my_chain_run"].parent_run_id is None @@ -162,14 +177,11 @@ async def my_chain_run(text: str): "text": "foo", "my_arg": 20, } - langchain_client.delete_project(project_name=project_name) async def test_nested_async_runs_with_threadpool(langchain_client: Client): """Test nested runs with a mix of async and sync functions.""" project_name = "__My Tracer Project - test_nested_async_runs_with_threadpol" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) @traceable(run_type="llm") async def async_llm(text: str): @@ -191,7 +203,12 @@ def my_run(text: str, *, run_tree: Optional[RunTree] = None): thread_pool = ThreadPoolExecutor(max_workers=1) for i in range(3): thread_pool.submit( - my_tool_run, f"Child Tool {i}", langsmith_extra={"run_tree": run_tree} + my_tool_run, + f"Child Tool {i}", + langsmith_extra={ + "run_tree": run_tree, + "metadata": getattr(run_tree, "metadata", {}), + }, ) thread_pool.shutdown(wait=True) return llm_run_result @@ -203,16 +220,27 @@ async def my_chain_run(text: str, run_tree: RunTree): thread_pool = ThreadPoolExecutor(max_workers=3) for i in range(2): thread_pool.submit( - my_run, f"Child {i}", langsmith_extra=dict(run_tree=run_tree) + my_run, + f"Child {i}", + langsmith_extra=dict(run_tree=run_tree, metadata=run_tree.metadata), ) thread_pool.shutdown(wait=True) return text - await my_chain_run("foo", langsmith_extra=dict(project_name=project_name)) + meta = uuid.uuid4().hex + await my_chain_run( + "foo", + langsmith_extra=dict(project_name=project_name, metadata={"test_run": meta}), + ) executor.shutdown(wait=True) - poll_runs_until_count(langchain_client, project_name, 17) - runs = list(langchain_client.list_runs(project_name=project_name)) - trace_runs = list(langchain_client.list_runs(trace_id=runs[0].trace_id)) + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))' + poll_runs_until_count(langchain_client, project_name, 17, filter_=filter_) + runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_)) + trace_runs = list( + langchain_client.list_runs( + trace_id=runs[0].trace_id, project_name=project_name, filter=filter_ + ) + ) assert len(trace_runs) == 17 assert len(runs) == 17 assert sum([run.run_type == "llm" for run in runs]) == 8 @@ -244,14 +272,15 @@ async def my_chain_run(text: str, run_tree: RunTree): async def test_context_manager(langchain_client: Client) -> None: project_name = "__My Tracer Project - test_context_manager" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) @traceable(run_type="llm") async def my_llm(prompt: str) -> str: return f"LLM {prompt}" - with trace("my_context", "chain", project_name=project_name) as run_tree: + meta = uuid.uuid4().hex + with trace( + "my_context", "chain", project_name=project_name, metadata={"test_run": meta} + ) as run_tree: await my_llm("foo") with trace("my_context2", "chain", run_tree=run_tree) as run_tree2: runs = [my_llm("baz"), my_llm("qux")] @@ -260,25 +289,35 @@ async def my_llm(prompt: str) -> str: await my_llm("corge") await asyncio.gather(*runs) run_tree.end(outputs={"End val": "my_context2"}) - poll_runs_until_count(langchain_client, project_name, 8) - runs_ = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))' + poll_runs_until_count(langchain_client, project_name, 8, filter_=_filter) + runs_ = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) assert len(runs_) == 8 -async def test_sync_generator(langchain_client: Client): +def test_sync_generator(langchain_client: Client): project_name = "__My Tracer Project - test_sync_generator" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex @traceable(run_type="chain") def my_generator(num: int) -> Generator[str, None, None]: for i in range(num): yield f"Yielded {i}" - results = list(my_generator(5, langsmith_extra=dict(project_name=project_name))) + results = list( + my_generator( + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) + ) assert results == ["Yielded 0", "Yielded 1", "Yielded 2", "Yielded 3", "Yielded 4"] - poll_runs_until_count(langchain_client, project_name, 1, max_retries=20) - runs = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' + poll_runs_until_count( + langchain_client, project_name, 1, max_retries=20, filter_=_filter + ) + runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_generator" @@ -287,12 +326,11 @@ def my_generator(num: int) -> Generator[str, None, None]: } -async def test_sync_generator_reduce_fn(langchain_client: Client): +def test_sync_generator_reduce_fn(langchain_client: Client): project_name = "__My Tracer Project - test_sync_generator_reduce_fn" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex - def reduce_fn(outputs: list) -> dict: + def reduce_fn(outputs: Sequence) -> dict: return {"my_output": " ".join(outputs)} @traceable(run_type="chain", reduce_fn=reduce_fn) @@ -300,10 +338,20 @@ def my_generator(num: int) -> Generator[str, None, None]: for i in range(num): yield f"Yielded {i}" - results = list(my_generator(5, langsmith_extra=dict(project_name=project_name))) + results = list( + my_generator( + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) + ) + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' assert results == ["Yielded 0", "Yielded 1", "Yielded 2", "Yielded 3", "Yielded 4"] - poll_runs_until_count(langchain_client, project_name, 1, max_retries=20) - runs = list(langchain_client.list_runs(project_name=project_name)) + poll_runs_until_count( + langchain_client, project_name, 1, max_retries=20, filter_=filter_ + ) + runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_generator" @@ -316,8 +364,7 @@ def my_generator(num: int) -> Generator[str, None, None]: async def test_async_generator(langchain_client: Client): project_name = "__My Tracer Project - test_async_generator" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex @traceable(run_type="chain") async def my_async_generator(num: int) -> AsyncGenerator[str, None]: @@ -328,7 +375,10 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: results = [ item async for item in my_async_generator( - 5, langsmith_extra=dict(project_name=project_name) + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), ) ] assert results == [ @@ -338,8 +388,11 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: "Async yielded 3", "Async yielded 4", ] - poll_runs_until_count(langchain_client, project_name, 1, max_retries=20) - runs = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' + poll_runs_until_count( + langchain_client, project_name, 1, max_retries=20, filter_=_filter + ) + runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_async_generator" @@ -356,10 +409,9 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: async def test_async_generator_reduce_fn(langchain_client: Client): project_name = "__My Tracer Project - test_async_generator_reduce_fn" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex - def reduce_fn(outputs: list) -> dict: + def reduce_fn(outputs: Sequence) -> dict: return {"my_output": " ".join(outputs)} @traceable(run_type="chain", reduce_fn=reduce_fn) @@ -371,7 +423,10 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: results = [ item async for item in my_async_generator( - 5, langsmith_extra=dict(project_name=project_name) + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), ) ] assert results == [ @@ -381,11 +436,11 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: "Async yielded 3", "Async yielded 4", ] - + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' poll_runs_until_count( - langchain_client, project_name, 1, max_retries=20, sleep_time=5 + langchain_client, project_name, 1, max_retries=20, sleep_time=5, filter_=filter_ ) - runs = list(langchain_client.list_runs(project_name=project_name)) + runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_async_generator" diff --git a/python/tests/integration_tests/wrappers/test_openai.py b/python/tests/integration_tests/wrappers/test_openai.py index d12e77da6..32dcd85c2 100644 --- a/python/tests/integration_tests/wrappers/test_openai.py +++ b/python/tests/integration_tests/wrappers/test_openai.py @@ -114,6 +114,8 @@ def test_completions_sync_api(mock_session: mock.MagicMock, stream: bool): patched_chunks = list(patched) assert len(original_chunks) == len(patched_chunks) assert [o.choices == p.choices for o, p in zip(original_chunks, patched_chunks)] + assert original.response + assert patched.response else: assert type(original) == type(patched) assert original.choices == patched.choices @@ -165,6 +167,8 @@ async def test_completions_async_api(mock_session: mock.MagicMock, stream: bool) patched_chunks.append(chunk) assert len(original_chunks) == len(patched_chunks) assert [o.choices == p.choices for o, p in zip(original_chunks, patched_chunks)] + assert original.response + assert patched.response else: assert type(original) == type(patched) assert original.choices == patched.choices diff --git a/python/tests/unit_tests/caching/.test_tracing_fake_server.yaml b/python/tests/unit_tests/caching/.test_tracing_fake_server.yaml new file mode 100644 index 000000000..4b56a25e7 --- /dev/null +++ b/python/tests/unit_tests/caching/.test_tracing_fake_server.yaml @@ -0,0 +1,38 @@ +interactions: +- request: + body: '{"val": 8, "should_err": 0}' + headers: {} + method: POST + uri: http://localhost:8257/fake-route + response: + body: + string: '{"STATUS":"SUCCESS"}' + headers: + content-length: + - '20' + content-type: + - application/json + status: + code: 200 + message: OK +- request: + body: '{"val": 8, "should_err": 0}' + headers: {} + method: POST + uri: http://localhost:8257/fake-route + response: + body: + string: '{"STATUS":"SUCCESS"}' + headers: + Content-Length: + - '20' + Content-Type: + - application/json + Date: + - Thu, 23 May 2024 05:39:12 GMT + Server: + - uvicorn + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/unit_tests/evaluation/test_evaluator.py b/python/tests/unit_tests/evaluation/test_evaluator.py index a66f89cc3..c3b907701 100644 --- a/python/tests/unit_tests/evaluation/test_evaluator.py +++ b/python/tests/unit_tests/evaluation/test_evaluator.py @@ -1,10 +1,16 @@ import asyncio -from typing import Optional +import logging +import uuid +from typing import Any, Optional +from unittest import mock from unittest.mock import MagicMock import pytest +from langsmith import schemas from langsmith.evaluation.evaluator import ( + ComparisonEvaluationResult, + DynamicComparisonRunEvaluator, DynamicRunEvaluator, EvaluationResult, EvaluationResults, @@ -12,6 +18,7 @@ Run, run_evaluator, ) +from langsmith.evaluation.integrations._langchain import LangChainStringEvaluator from langsmith.run_helpers import tracing_context @@ -44,6 +51,24 @@ def sample_evaluator(run: Run, example: Optional[Example]) -> EvaluationResult: assert result.score == 1.0 +async def test_dynamie_comparison_run_evaluator(): + def foo(runs: list, example): + return ComparisonEvaluationResult(key="bar", scores={uuid.uuid4(): 3.1}) + + async def afoo(runs: list, example): + return ComparisonEvaluationResult(key="bar", scores={uuid.uuid4(): 3.1}) + + evaluators = [ + DynamicComparisonRunEvaluator(foo), + DynamicComparisonRunEvaluator(afoo), + DynamicComparisonRunEvaluator(foo, afoo), + ] + for e in evaluators: + res = await e.acompare_runs([], None) + assert res.key == "bar" + repr(e) + + def test_run_evaluator_decorator_dict(run_1: Run, example_1: Example): @run_evaluator def sample_evaluator(run: Run, example: Optional[Example]) -> dict: @@ -294,3 +319,80 @@ async def sample_evaluator( assert result["results"][0].score == 1.0 assert result["results"][1].key == "test2" assert result["results"][1].score == 2.0 + + +@pytest.mark.parametrize("response", [None, {}, {"accuracy": 5}]) +async def test_evaluator_raises_for_null_ouput(response: Any): + @run_evaluator # type: ignore + def bad_evaluator(run: schemas.Run, example: schemas.Example): + return response + + @run_evaluator # type: ignore + async def abad_evaluator(run: schemas.Run, example: schemas.Example): + return response + + fake_run = MagicMock() + fake_example = MagicMock() + + with pytest.raises(ValueError, match="Expected an EvaluationResult "): + bad_evaluator.evaluate_run(fake_run, fake_example) + + with pytest.raises(ValueError, match="Expected an EvaluationResult "): + await bad_evaluator.aevaluate_run(fake_run, fake_example) + + with pytest.raises(ValueError, match="Expected an EvaluationResult "): + await abad_evaluator.aevaluate_run(fake_run, fake_example) + + +def test_check_value_non_numeric(caplog): + # Test when score is None and value is numeric + with caplog.at_level(logging.WARNING): + EvaluationResult(key="test", value=5) + + assert ( + "Numeric values should be provided in the 'score' field, not 'value'. Got: 5" + in caplog.text + ) + + # Test when score is provided and value is numeric (should not log) + with caplog.at_level(logging.WARNING): + caplog.clear() + EvaluationResult(key="test", score=5, value="non-numeric") + + assert ( + "Numeric values should be provided in the 'score' field, not 'value'." + not in caplog.text + ) + + # Test when both score and value are None (should not log) + with caplog.at_level(logging.WARNING): + caplog.clear() + EvaluationResult(key="test") + + assert ( + "Numeric values should be provided in the 'score' field, not 'value'." + not in caplog.text + ) + + # Test when value is non-numeric (should not log) + with caplog.at_level(logging.WARNING): + caplog.clear() + EvaluationResult(key="test", value="non-numeric") + + assert ( + "Numeric values should be provided in the 'score' field, not 'value'." + not in caplog.text + ) + + +def test_langchain_run_evaluator_native_async(): + try: + from langchain.evaluation import load_evaluator # noqa + except ImportError: + pytest.skip("Skipping test that requires langchain") + + with mock.patch.dict("os.environ", {"OPENAI_API_KEY": "fake_api_key"}): + res = LangChainStringEvaluator(evaluator="qa") + run_evaluator = res.as_run_evaluator() + assert hasattr(run_evaluator, "afunc") + assert hasattr(run_evaluator, "func") diff --git a/python/tests/unit_tests/evaluation/test_runner.py b/python/tests/unit_tests/evaluation/test_runner.py new file mode 100644 index 000000000..1229590c9 --- /dev/null +++ b/python/tests/unit_tests/evaluation/test_runner.py @@ -0,0 +1,382 @@ +"""Test the eval runner.""" + +import asyncio +import itertools +import json +import random +import sys +import time +import uuid +from datetime import datetime, timezone +from threading import Lock +from typing import Callable, List +from unittest import mock +from unittest.mock import MagicMock + +import pytest + +from langsmith import evaluate +from langsmith import schemas as ls_schemas +from langsmith.client import Client +from langsmith.evaluation._arunner import aevaluate, aevaluate_existing +from langsmith.evaluation._runner import evaluate_existing + + +class FakeRequest: + def __init__(self, ds_id, ds_name, ds_examples, tenant_id): + self.created_session = None + self.runs = {} + self.should_fail = False + self.ds_id = ds_id + self.ds_name = ds_name + self.ds_examples = ds_examples + self.tenant_id = tenant_id + + def request(self, verb: str, endpoint: str, *args, **kwargs): + if verb == "GET": + if endpoint == "http://localhost:1984/datasets": + res = MagicMock() + res.json.return_value = { + "id": self.ds_id, + "created_at": "2021-09-01T00:00:00Z", + "name": self.ds_name, + } + return res + elif endpoint == "http://localhost:1984/examples": + res = MagicMock() + res.json.return_value = [e.dict() for e in self.ds_examples] + return res + elif endpoint == "http://localhost:1984/sessions": + res = {} # type: ignore + if kwargs["params"]["name"] == self.created_session["name"]: # type: ignore + res = self.created_session # type: ignore + response = MagicMock() + response.json.return_value = res + return response + + else: + self.should_fail = True + raise ValueError(f"Unknown endpoint: {endpoint}") + elif verb == "POST": + if endpoint == "http://localhost:1984/sessions": + self.created_session = json.loads(kwargs["data"]) | { + "tenant_id": self.tenant_id + } + response = MagicMock() + response.json.return_value = self.created_session + return response + elif endpoint == "http://localhost:1984/runs/batch": + loaded_runs = json.loads(kwargs["data"]) + posted = loaded_runs.get("post", []) + patched = loaded_runs.get("patch", []) + for p in posted: + self.runs[p["id"]] = p + for p in patched: + self.runs[p["id"]].update(p) + response = MagicMock() + return response + elif endpoint == "http://localhost:1984/runs/query": + res = MagicMock() + res.json.return_value = { + "runs": [ + r for r in self.runs.values() if "reference_example_id" in r + ] + } + return res + elif endpoint == "http://localhost:1984/feedback": + response = MagicMock() + response.json.return_value = {} + return response + + else: + raise ValueError(f"Unknown endpoint: {endpoint}") + elif verb == "PATCH": + if ( + endpoint + == f"http://localhost:1984/sessions/{self.created_session['id']}" + ): # type: ignore + updates = json.loads(kwargs["data"]) + self.created_session.update({k: v for k, v in updates.items() if v}) # type: ignore + response = MagicMock() + response.json.return_value = self.created_session + return response + else: + self.should_fail = True + raise ValueError(f"Unknown endpoint: {endpoint}") + else: + self.should_fail = True + raise ValueError(f"Unknown verb: {verb}, {endpoint}") + + +def _wait_until(condition: Callable, timeout: int = 8): + start = time.time() + while time.time() - start < timeout: + if condition(): + return + time.sleep(0.1) + raise TimeoutError("Condition not met") + + +@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher") +@pytest.mark.parametrize("blocking", [False, True]) +def test_evaluate_results(blocking: bool) -> None: + session = mock.Mock() + ds_name = "my-dataset" + ds_id = "00886375-eb2a-4038-9032-efff60309896" + + def _create_example(idx: int) -> ls_schemas.Example: + return ls_schemas.Example( + id=uuid.uuid4(), + inputs={"in": idx}, + outputs={"answer": idx + 1}, + dataset_id=ds_id, + created_at=datetime.now(timezone.utc), + ) + + SPLIT_SIZE = 3 + NUM_REPETITIONS = 4 + ds_examples = [_create_example(i) for i in range(10)] + dev_split = random.sample(ds_examples, SPLIT_SIZE) + tenant_id = str(uuid.uuid4()) + fake_request = FakeRequest(ds_id, ds_name, ds_examples, tenant_id) + session.request = fake_request.request + client = Client( + api_url="http://localhost:1984", + api_key="123", + session=session, + info=ls_schemas.LangSmithInfo( + batch_ingest_config=ls_schemas.BatchIngestConfig( + size_limit_bytes=None, # Note this field is not used here + size_limit=100, + scale_up_nthreads_limit=16, + scale_up_qsize_trigger=1000, + scale_down_nempty_trigger=4, + ) + ), + ) + client._tenant_id = tenant_id # type: ignore + + ordering_of_stuff: List[str] = [] + locked = False + + lock = Lock() + slow_index = None + + def predict(inputs: dict) -> dict: + nonlocal locked + nonlocal slow_index + if len(ordering_of_stuff) > 2 and not locked: + with lock: + if len(ordering_of_stuff) > 2 and not locked: + locked = True + time.sleep(3) + slow_index = len(ordering_of_stuff) + ordering_of_stuff.append("predict") + else: + ordering_of_stuff.append("predict") + + else: + ordering_of_stuff.append("predict") + return {"output": inputs["in"] + 1} + + def score_value_first(run, example): + ordering_of_stuff.append("evaluate") + return {"score": 0.3} + + results = evaluate( + predict, + client=client, + data=dev_split, + evaluators=[score_value_first], + num_repetitions=NUM_REPETITIONS, + blocking=blocking, + ) + if not blocking: + deltas = [] + last = None + start = time.time() + now = start + for _ in results: + now = time.time() + deltas.append((now - last) if last is not None else 0) # type: ignore + last = now + assert now - start > 1.5 + # Essentially we want to check that 1 delay is > 1.5s and the rest are < 0.1s + assert len(deltas) == SPLIT_SIZE * NUM_REPETITIONS + assert slow_index is not None + + total_quick = sum([d < 0.5 for d in deltas]) + total_slow = sum([d > 0.5 for d in deltas]) + tolerance = 3 + assert total_slow < tolerance + assert total_quick > (SPLIT_SIZE * NUM_REPETITIONS - 1) - tolerance + + for r in results: + assert r["run"].outputs["output"] == r["example"].inputs["in"] + 1 # type: ignore + assert set(r["run"].outputs.keys()) == {"output"} # type: ignore + + assert fake_request.created_session + _wait_until(lambda: fake_request.runs) + N_PREDS = SPLIT_SIZE * NUM_REPETITIONS + _wait_until(lambda: len(ordering_of_stuff) == N_PREDS * 2) + _wait_until(lambda: slow_index is not None) + # Want it to be interleaved + assert ordering_of_stuff != ["predict"] * N_PREDS + ["evaluate"] * N_PREDS + + # It's delayed, so it'll be the penultimate event + # Will run all other preds and evals, then this, then the last eval + assert slow_index == (N_PREDS * 2) - 2 + + def score_value(run, example): + return {"score": 0.7} + + ex_results = evaluate_existing( + fake_request.created_session["name"], evaluators=[score_value], client=client + ) + second_item = next(itertools.islice(iter(ex_results), 1, 2)) + first_list = list(ex_results) + second_list = list(ex_results) + second_item_after = next(itertools.islice(iter(ex_results), 1, 2)) + assert len(first_list) == len(second_list) == SPLIT_SIZE * NUM_REPETITIONS + assert first_list == second_list + assert second_item == second_item_after + dev_xample_ids = [e.id for e in dev_split] + for r in ex_results: + assert r["example"].id in dev_xample_ids + assert r["evaluation_results"]["results"][0].score == 0.7 + assert r["run"].reference_example_id in dev_xample_ids + assert not fake_request.should_fail + + +@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher") +@pytest.mark.parametrize("blocking", [False, True]) +async def test_aevaluate_results(blocking: bool) -> None: + session = mock.Mock() + ds_name = "my-dataset" + ds_id = "00886375-eb2a-4038-9032-efff60309896" + + def _create_example(idx: int) -> ls_schemas.Example: + return ls_schemas.Example( + id=uuid.uuid4(), + inputs={"in": idx}, + outputs={"answer": idx + 1}, + dataset_id=ds_id, + created_at=datetime.now(timezone.utc), + ) + + SPLIT_SIZE = 3 + NUM_REPETITIONS = 4 + ds_examples = [_create_example(i) for i in range(10)] + dev_split = random.sample(ds_examples, SPLIT_SIZE) + tenant_id = str(uuid.uuid4()) + fake_request = FakeRequest(ds_id, ds_name, ds_examples, tenant_id) + session.request = fake_request.request + client = Client( + api_url="http://localhost:1984", + api_key="123", + session=session, + info=ls_schemas.LangSmithInfo( + batch_ingest_config=ls_schemas.BatchIngestConfig( + size_limit_bytes=None, # Note this field is not used here + size_limit=100, + scale_up_nthreads_limit=16, + scale_up_qsize_trigger=1000, + scale_down_nempty_trigger=4, + ) + ), + ) + client._tenant_id = tenant_id # type: ignore + + ordering_of_stuff: List[str] = [] + locked = False + + lock = asyncio.Lock() + slow_index = None + + async def predict(inputs: dict) -> dict: + nonlocal locked + nonlocal slow_index + + if len(ordering_of_stuff) > 2 and not locked: + async with lock: + if len(ordering_of_stuff) > 2 and not locked: + locked = True + await asyncio.sleep(3) + slow_index = len(ordering_of_stuff) + ordering_of_stuff.append("predict") + else: + ordering_of_stuff.append("predict") + + else: + ordering_of_stuff.append("predict") + return {"output": inputs["in"] + 1} + + async def score_value_first(run, example): + ordering_of_stuff.append("evaluate") + return {"score": 0.3} + + results = await aevaluate( + predict, + client=client, + data=dev_split, + evaluators=[score_value_first], + num_repetitions=NUM_REPETITIONS, + blocking=blocking, + ) + if not blocking: + deltas = [] + last = None + start = time.time() + now = None + async for _ in results: + now = time.time() + if last is None: + elapsed = now - start + assert elapsed < 3 + deltas.append((now - last) if last is not None else 0) # type: ignore + last = now + total = now - start # type: ignore + assert total > 1.5 + + # Essentially we want to check that 1 delay is > 1.5s and the rest are < 0.1s + assert len(deltas) == SPLIT_SIZE * NUM_REPETITIONS + + total_quick = sum([d < 0.5 for d in deltas]) + total_slow = sum([d > 0.5 for d in deltas]) + tolerance = 3 + assert total_slow < tolerance + assert total_quick > (SPLIT_SIZE * NUM_REPETITIONS - 1) - tolerance + assert any([d > 1 for d in deltas]) + + async for r in results: + assert r["run"].outputs["output"] == r["example"].inputs["in"] + 1 # type: ignore + assert set(r["run"].outputs.keys()) == {"output"} # type: ignore + + assert fake_request.created_session + _wait_until(lambda: fake_request.runs) + N_PREDS = SPLIT_SIZE * NUM_REPETITIONS + _wait_until(lambda: len(ordering_of_stuff) == N_PREDS * 2) + _wait_until(lambda: slow_index is not None) + # Want it to be interleaved + assert ordering_of_stuff != ["predict"] * N_PREDS + ["evaluate"] * N_PREDS + assert slow_index is not None + # It's delayed, so it'll be the penultimate event + # Will run all other preds and evals, then this, then the last eval + assert slow_index == (N_PREDS * 2) - 2 + + assert fake_request.created_session["name"] + + async def score_value(run, example): + return {"score": 0.7} + + ex_results = await aevaluate_existing( + fake_request.created_session["name"], evaluators=[score_value], client=client + ) + all_results = [r async for r in ex_results] + assert len(all_results) == SPLIT_SIZE * NUM_REPETITIONS + dev_xample_ids = [e.id for e in dev_split] + async for r in ex_results: + assert r["example"].id in dev_xample_ids + assert r["evaluation_results"]["results"][0].score == 0.7 + assert r["run"].reference_example_id in dev_xample_ids + assert not fake_request.should_fail diff --git a/python/tests/unit_tests/test_client.py b/python/tests/unit_tests/test_client.py index a653cf704..0e648ffc4 100644 --- a/python/tests/unit_tests/test_client.py +++ b/python/tests/unit_tests/test_client.py @@ -10,11 +10,12 @@ import threading import time import uuid +import warnings import weakref from datetime import datetime, timezone from enum import Enum from io import BytesIO -from typing import Any, NamedTuple, Optional +from typing import Any, NamedTuple, Optional, Type, Union from unittest import mock from unittest.mock import MagicMock, patch @@ -28,15 +29,12 @@ import langsmith.env as ls_env import langsmith.utils as ls_utils -from langsmith import run_trees +from langsmith import AsyncClient, EvaluationResult, run_trees from langsmith import schemas as ls_schemas from langsmith.client import ( Client, _dumps_json, - _get_api_key, - _get_api_url, _is_langchain_hosted, - _is_localhost, _serialize_json, ) @@ -44,10 +42,10 @@ def test_is_localhost() -> None: - assert _is_localhost("http://localhost:1984") - assert _is_localhost("http://localhost:1984") - assert _is_localhost("http://0.0.0.0:1984") - assert not _is_localhost("http://example.com:1984") + assert ls_utils._is_localhost("http://localhost:1984") + assert ls_utils._is_localhost("http://localhost:1984") + assert ls_utils._is_localhost("http://0.0.0.0:1984") + assert not ls_utils._is_localhost("http://example.com:1984") def test__is_langchain_hosted() -> None: @@ -56,19 +54,14 @@ def test__is_langchain_hosted() -> None: assert _is_langchain_hosted("https://dev.api.smith.langchain.com") -def test_validate_api_key_if_hosted(monkeypatch: pytest.MonkeyPatch) -> None: - monkeypatch.delenv("LANGCHAIN_API_KEY", raising=False) - monkeypatch.delenv("LANGSMITH_API_KEY", raising=False) - with pytest.raises(ls_utils.LangSmithUserError, match="API key must be provided"): - Client(api_url="https://api.smith.langchain.com") - client = Client(api_url="http://localhost:1984") - assert client.api_url == "http://localhost:1984" - assert client.api_key is None +def _clear_env_cache(): + ls_utils.get_env_var.cache_clear() def test_validate_api_url(monkeypatch: pytest.MonkeyPatch) -> None: # Scenario 1: Both LANGCHAIN_ENDPOINT and LANGSMITH_ENDPOINT # are set, but api_url is not + _clear_env_cache() monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://api.smith.langchain-endpoint.com") monkeypatch.setenv("LANGSMITH_ENDPOINT", "https://api.smith.langsmith-endpoint.com") @@ -77,6 +70,7 @@ def test_validate_api_url(monkeypatch: pytest.MonkeyPatch) -> None: # Scenario 2: Both LANGCHAIN_ENDPOINT and LANGSMITH_ENDPOINT # are set, and api_url is set + _clear_env_cache() monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://api.smith.langchain-endpoint.com") monkeypatch.setenv("LANGSMITH_ENDPOINT", "https://api.smith.langsmith-endpoint.com") @@ -84,6 +78,7 @@ def test_validate_api_url(monkeypatch: pytest.MonkeyPatch) -> None: assert client.api_url == "https://api.smith.langchain.com" # Scenario 3: LANGCHAIN_ENDPOINT is set, but LANGSMITH_ENDPOINT is not + _clear_env_cache() monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://api.smith.langchain-endpoint.com") monkeypatch.delenv("LANGSMITH_ENDPOINT", raising=False) @@ -91,6 +86,7 @@ def test_validate_api_url(monkeypatch: pytest.MonkeyPatch) -> None: assert client.api_url == "https://api.smith.langchain-endpoint.com" # Scenario 4: LANGCHAIN_ENDPOINT is not set, but LANGSMITH_ENDPOINT is set + _clear_env_cache() monkeypatch.delenv("LANGCHAIN_ENDPOINT", raising=False) monkeypatch.setenv("LANGSMITH_ENDPOINT", "https://api.smith.langsmith-endpoint.com") @@ -101,6 +97,7 @@ def test_validate_api_url(monkeypatch: pytest.MonkeyPatch) -> None: def test_validate_api_key(monkeypatch: pytest.MonkeyPatch) -> None: # Scenario 1: Both LANGCHAIN_API_KEY and LANGSMITH_API_KEY are set, # but api_key is not + _clear_env_cache() monkeypatch.setenv("LANGCHAIN_API_KEY", "env_langchain_api_key") monkeypatch.setenv("LANGSMITH_API_KEY", "env_langsmith_api_key") @@ -109,6 +106,7 @@ def test_validate_api_key(monkeypatch: pytest.MonkeyPatch) -> None: # Scenario 2: Both LANGCHAIN_API_KEY and LANGSMITH_API_KEY are set, # and api_key is set + _clear_env_cache() monkeypatch.setenv("LANGCHAIN_API_KEY", "env_langchain_api_key") monkeypatch.setenv("LANGSMITH_API_KEY", "env_langsmith_api_key") @@ -123,6 +121,7 @@ def test_validate_api_key(monkeypatch: pytest.MonkeyPatch) -> None: assert client.api_key == "env_langchain_api_key" # Scenario 4: LANGCHAIN_API_KEY is not set, but LANGSMITH_API_KEY is set + _clear_env_cache() monkeypatch.delenv("LANGCHAIN_API_KEY", raising=False) monkeypatch.setenv("LANGSMITH_API_KEY", "env_langsmith_api_key") @@ -131,6 +130,7 @@ def test_validate_api_key(monkeypatch: pytest.MonkeyPatch) -> None: def test_validate_multiple_urls(monkeypatch: pytest.MonkeyPatch) -> None: + _clear_env_cache() monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://api.smith.langchain-endpoint.com") monkeypatch.setenv("LANGSMITH_ENDPOINT", "https://api.smith.langsmith-endpoint.com") monkeypatch.setenv("LANGSMITH_RUNS_ENDPOINTS", "{}") @@ -161,6 +161,7 @@ def test_validate_multiple_urls(monkeypatch: pytest.MonkeyPatch) -> None: def test_headers(monkeypatch: pytest.MonkeyPatch) -> None: + _clear_env_cache() monkeypatch.delenv("LANGCHAIN_API_KEY", raising=False) with patch.dict("os.environ", {}, clear=True): client = Client(api_url="http://localhost:1984", api_key="123") @@ -173,6 +174,7 @@ def test_headers(monkeypatch: pytest.MonkeyPatch) -> None: @mock.patch("langsmith.client.requests.Session") def test_upload_csv(mock_session_cls: mock.Mock) -> None: + _clear_env_cache() dataset_id = str(uuid.uuid4()) example_1 = ls_schemas.Example( id=str(uuid.uuid4()), @@ -260,40 +262,6 @@ def test_async_methods() -> None: ) -def test_get_api_key() -> None: - assert _get_api_key("provided_api_key") == "provided_api_key" - assert _get_api_key("'provided_api_key'") == "provided_api_key" - assert _get_api_key('"_provided_api_key"') == "_provided_api_key" - - with patch.dict("os.environ", {"LANGCHAIN_API_KEY": "env_api_key"}, clear=True): - assert _get_api_key(None) == "env_api_key" - - with patch.dict("os.environ", {}, clear=True): - assert _get_api_key(None) is None - - assert _get_api_key("") is None - assert _get_api_key(" ") is None - - -def test_get_api_url() -> None: - assert _get_api_url("http://provided.url") == "http://provided.url" - - with patch.dict("os.environ", {"LANGCHAIN_ENDPOINT": "http://env.url"}): - assert _get_api_url(None) == "http://env.url" - - with patch.dict("os.environ", {}, clear=True): - assert _get_api_url(None) == "https://api.smith.langchain.com" - - with patch.dict("os.environ", {}, clear=True): - assert _get_api_url(None) == "https://api.smith.langchain.com" - - with patch.dict("os.environ", {"LANGCHAIN_ENDPOINT": "http://env.url"}): - assert _get_api_url(None) == "http://env.url" - - with pytest.raises(ls_utils.LangSmithUserError): - _get_api_url(" ") - - def test_create_run_unicode() -> None: inputs = { "foo": "これは私の友達です", @@ -898,6 +866,9 @@ def test_host_url(_: MagicMock) -> None: client = Client(api_url="http://localhost:8000", api_key="API_KEY") assert client._host_url == "http://localhost" + client = Client(api_url="https://eu.api.smith.langchain.com", api_key="API_KEY") + assert client._host_url == "https://eu.smith.langchain.com" + client = Client(api_url="https://dev.api.smith.langchain.com", api_key="API_KEY") assert client._host_url == "https://dev.smith.langchain.com" @@ -1074,3 +1045,56 @@ def test_batch_ingest_run_splits_large_batches(payload_size: int): # Check that no duplicate run_ids are present in the request bodies assert len(request_bodies) == len(set([body["id"] for body in request_bodies])) + + +def test_select_eval_results(): + expected = EvaluationResult( + key="foo", + value="bar", + score=7899082, + metadata={"a": "b"}, + comment="hi", + feedback_config={"c": "d"}, + ) + client = Client(api_key="test") + for count, input_ in [ + (1, expected), + (1, expected.dict()), + (1, {"results": [expected]}), + (1, {"results": [expected.dict()]}), + (2, {"results": [expected.dict(), expected.dict()]}), + (2, {"results": [expected, expected]}), + ]: + op = client._select_eval_results(input_) + assert len(op) == count + assert op == [expected] * count + + expected2 = EvaluationResult( + key="foo", + metadata={"a": "b"}, + comment="this is a comment", + feedback_config={"c": "d"}, + ) + + as_reasoning = { + "reasoning": expected2.comment, + **expected2.dict(exclude={"comment"}), + } + for input_ in [as_reasoning, {"results": [as_reasoning]}, {"results": [expected2]}]: + assert client._select_eval_results(input_) == [ + expected2, + ] + + +@pytest.mark.parametrize("client_cls", [Client, AsyncClient]) +def test_validate_api_key_if_hosted( + monkeypatch: pytest.MonkeyPatch, client_cls: Union[Type[Client], Type[AsyncClient]] +) -> None: + monkeypatch.delenv("LANGCHAIN_API_KEY", raising=False) + monkeypatch.delenv("LANGSMITH_API_KEY", raising=False) + with pytest.warns(ls_utils.LangSmithMissingAPIKeyWarning): + client_cls(api_url="https://api.smith.langchain.com") + with warnings.catch_warnings(): + # Check no warning is raised here. + warnings.simplefilter("error") + client_cls(api_url="http://localhost:1984") diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index 4ea0d564e..2f48dbff7 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -2,6 +2,7 @@ import functools import inspect import json +import os import sys import time import uuid @@ -13,6 +14,9 @@ import langsmith from langsmith import Client +from langsmith import schemas as ls_schemas +from langsmith import utils as ls_utils +from langsmith._internal import _aiter as aitertools from langsmith.run_helpers import ( _get_inputs, as_runnable, @@ -29,7 +33,7 @@ def _get_calls( mock_client: Any, minimum: Optional[int] = 0, verbs: Set[str] = {"POST"}, - attempts: int = 5, + attempts: int = 10, ) -> list: calls = [] for _ in range(attempts): @@ -197,29 +201,38 @@ def mock_client() -> Client: @pytest.mark.parametrize("use_next", [True, False]) -def test_traceable_iterator(use_next: bool, mock_client: Client) -> None: +@pytest.mark.parametrize("return_val", [None, "foo"]) +def test_traceable_iterator( + use_next: bool, return_val: Optional[str], mock_client: Client +) -> None: with tracing_context(enabled=True): @traceable(client=mock_client) - def my_iterator_fn(a, b, d, **kwargs): + def my_iterator_fn(a, b, d, **kwargs) -> Any: assert kwargs == {"e": 5} for i in range(a + b + d): yield i + return return_val expected = [0, 1, 2, 3, 4, 5] + if return_val is not None: + expected.append(return_val) genout = my_iterator_fn(1, 2, 3, e=5) if use_next: results = [] while True: try: results.append(next(genout)) - except StopIteration: + except StopIteration as e: + assert e.value == return_val + if e.value is not None: + results.append(e.value) break else: results = list(genout) + if return_val is not None: + results.append(return_val) assert results == expected - # Wait for batcher - # check the mock_calls mock_calls = _get_calls(mock_client, minimum=1) assert 1 <= len(mock_calls) <= 2 @@ -232,6 +245,109 @@ def my_iterator_fn(a, b, d, **kwargs): assert body["post"][0]["outputs"]["output"] == expected +class MyStreamObject: + def __init__(self, some_values: list): + self.vals = some_values + self._iter = iter(self.vals) + + def __next__(self): + return next(self._iter) + + def __iter__(self): + yield from self.vals + + +class MyAsyncStreamObject: + def __init__(self, some_values: list): + self.vals = some_values + + async def iter(): + for val in some_values: + yield val + + self._iter = iter() + + async def __anext__(self): + return await aitertools.py_anext(self._iter) + + async def __aiter__(self): + async for val in self._iter: + yield val + + +@pytest.mark.parametrize("use_next", [True, False]) +@pytest.mark.parametrize("response_type", ["async", "async"]) +async def test_traceable_stream( + use_next: bool, response_type: str, mock_client: Client +) -> None: + def reduce_fn(results: list): + return {"my_output": results} + + @traceable(client=mock_client, reduce_fn=reduce_fn) + def my_stream_fn(a, b, d, **kwargs): + assert kwargs == {"e": 5} + vals = [0, 1, 2, 3, 4, 5] + if response_type == "sync": + return MyStreamObject(vals) + else: + return MyAsyncStreamObject(vals) + + with tracing_context(enabled=True): + expected = [0, 1, 2, 3, 4, 5] + genout = my_stream_fn(1, 2, 3, e=5) + # assert getattr(genout, "vals") == expected + if use_next: + results = [] + if response_type == "sync": + while True: + try: + results.append(next(genout)) + except StopIteration: + break + else: + while True: + try: + results.append(await aitertools.py_anext(genout)) + except StopAsyncIteration: + break + + else: + if response_type == "sync": + results = list(genout) + else: + results = [r async for r in genout] + assert results == expected + # check the mock_calls + mock_calls = _get_calls(mock_client, minimum=1) + assert 1 <= len(mock_calls) <= 2 + + call = mock_calls[0] + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + call_data = [json.loads(mock_call.kwargs["data"]) for mock_call in mock_calls] + body = call_data[0] + assert body["post"] + assert body["post"][0]["name"] == "my_stream_fn" + if body["post"][0]["outputs"]: + assert body["post"][0]["outputs"] == {"my_output": expected} + else: + first_patch = next((d for d in call_data if d.get("patch")), None) + attempt = 0 + while first_patch is None: + time.sleep(0.2) + if attempt > 2: + assert False, "Could not get patch" + mock_calls = _get_calls(mock_client, minimum=1) + call_data = [ + json.loads(mock_call.kwargs["data"]) for mock_call in mock_calls + ] + first_patch = next((d for d in call_data if d.get("patch")), None) + attempt += 1 + + assert first_patch["name"] == "my_stream_fn" + assert first_patch[0]["outputs"] == {"my_output": expected} + + @pytest.mark.parametrize("use_next", [True, False]) async def test_traceable_async_iterator(use_next: bool, mock_client: Client) -> None: with tracing_context(enabled=True): @@ -263,9 +379,17 @@ async def my_iterator_fn(a, b, d, **kwargs): assert call.args[1].startswith("https://api.smith.langchain.com") body = json.loads(call.kwargs["data"]) assert body["post"] - assert body["post"][0]["outputs"]["output"] == expected - # Assert the inputs are filtered as expected assert body["post"][0]["inputs"] == {"a": "FOOOOOO", "b": 2, "d": 3} + outputs_ = body["post"][0]["outputs"] + if "output" in outputs_: + assert outputs_["output"] == expected + # Assert the inputs are filtered as expected + else: + # It was put in the second batch + assert len(mock_calls) == 2 + body_2 = json.loads(mock_calls[1].kwargs["data"]) + assert body_2["patch"] + assert body_2["patch"][0]["outputs"]["output"] == expected @patch("langsmith.run_trees.Client", autospec=True) @@ -726,7 +850,12 @@ def _get_run(r: RunTree) -> None: with tracing_context(enabled=True): chunks = my_answer( - "some_query", langsmith_extra={"on_end": _get_run, "client": mock_client_} + "some_query", + langsmith_extra={ + "name": "test_overridding_name", + "on_end": _get_run, + "client": mock_client_, + }, ) all_chunks = [] for chunk in chunks: @@ -741,7 +870,7 @@ def _get_run(r: RunTree) -> None: ] assert run is not None run = cast(RunTree, run) - assert run.name == "expand_and_answer_questions" + assert run.name == "test_overridding_name" child_runs = run.child_runs assert child_runs and len(child_runs) == 5 names = [run.name for run in child_runs] @@ -947,6 +1076,65 @@ def _get_run(r: RunTree) -> None: assert child_runs[0].inputs == {"a": 1, "b": 2} +async def test_traceable_to_atrace(): + @traceable + async def great_grandchild_fn(a: int, b: int) -> int: + return a + b + + @traceable + async def parent_fn(a: int, b: int) -> int: + async with langsmith.trace( + name="child_fn", inputs={"a": a, "b": b} + ) as run_tree: + async with langsmith.trace( + "grandchild_fn", inputs={"a": a, "b": b, "c": "oh my"} + ) as run_tree_gc: + try: + async with langsmith.trace("expect_error", inputs={}): + raise ValueError("oh no") + except ValueError: + pass + result = await great_grandchild_fn(a, b) + run_tree_gc.end(outputs={"result": result}) + run_tree.end(outputs={"result": result}) + return result + + run: Optional[RunTree] = None # type: ignore + + def _get_run(r: RunTree) -> None: + nonlocal run + run = r + + with tracing_context(enabled=True): + result = await parent_fn( + 1, 2, langsmith_extra={"on_end": _get_run, "client": _get_mock_client()} + ) + + assert result == 3 + assert run is not None + run = cast(RunTree, run) + assert run.name == "parent_fn" + assert run.outputs == {"output": 3} + assert run.inputs == {"a": 1, "b": 2} + child_runs = run.child_runs + assert child_runs + assert len(child_runs) == 1 + child = child_runs[0] + assert child.name == "child_fn" + assert child.inputs == {"a": 1, "b": 2} + assert len(child.child_runs) == 1 + grandchild = child.child_runs[0] + assert grandchild.name == "grandchild_fn" + assert grandchild.inputs == {"a": 1, "b": 2, "c": "oh my"} + assert len(grandchild.child_runs) == 2 + ggcerror = grandchild.child_runs[0] + assert ggcerror.name == "expect_error" + assert "oh no" in str(ggcerror.error) + ggc = grandchild.child_runs[1] + assert ggc.name == "great_grandchild_fn" + assert ggc.inputs == {"a": 1, "b": 2} + + def test_trace_to_traceable(): @traceable def child_fn(a: int, b: int) -> int: @@ -998,11 +1186,11 @@ def test_client_passed_when_trace_parent(): mock_client = _get_mock_client() rt = RunTree(name="foo", client=mock_client) headers = rt.to_headers() - - with trace( - name="foo", inputs={"foo": "bar"}, parent=headers, client=mock_client - ) as rt: - rt.outputs["bar"] = "baz" + with tracing_context(enabled=True): + with trace( + name="foo", inputs={"foo": "bar"}, parent=headers, client=mock_client + ) as rt: + rt.outputs["bar"] = "baz" calls = _get_calls(mock_client) assert len(calls) == 1 call = calls[0] @@ -1031,6 +1219,7 @@ def my_grandchild_tool(text: str, callbacks: Any = None) -> str: run = lct.run_map[str(gc_run_id)] assert run.name == "my_grandchild_tool" assert run.run_type == "tool" + assert lct.project_name == "foo" parent_run = lct.run_map[str(run.parent_run_id)] assert parent_run assert parent_run.name == "my_traceable" @@ -1049,6 +1238,7 @@ def my_traceable(text: str) -> str: assert rt.parent_run_id assert rt.parent_run assert rt.parent_run.run_type == "tool" + assert rt.session_name == "foo" return my_grandchild_tool.invoke({"text": text}, {"run_id": gc_run_id}) @tool @@ -1057,14 +1247,16 @@ def my_tool(text: str) -> str: return my_traceable(text) mock_client = _get_mock_client() - tracer = LangChainTracer(client=mock_client) + tracer = LangChainTracer(client=mock_client, project_name="foo") my_tool.invoke({"text": "hello"}, {"callbacks": [tracer]}) def test_io_interops(): try: - from langchain.callbacks.tracers import LangChainTracer - from langchain.schema.runnable import RunnableLambda + from langchain_core.language_models import FakeListChatModel + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.runnables import RunnableLambda + from langchain_core.tracers import LangChainTracer except ImportError: pytest.skip("Skipping test that requires langchain") tracer = LangChainTracer(client=_get_mock_client(auto_batch_tracing=False)) @@ -1075,12 +1267,18 @@ def test_io_interops(): "parent_output": {"parent_output": "parent_output_value"}, } + llm = FakeListChatModel(responses=["bar"]) + prompt = ChatPromptTemplate.from_messages([("system", "Hi {name}")]) + some_chain = prompt | llm + @RunnableLambda def child(inputs: dict) -> dict: + res = some_chain.invoke({"name": "foo"}) + assert res.content == "bar" return {**stage_added["child_output"], **inputs} @RunnableLambda - def parent(inputs: dict) -> dict: + def the_parent(inputs: dict) -> dict: return { **stage_added["parent_output"], **child.invoke({**stage_added["child_input"], **inputs}), @@ -1091,28 +1289,340 @@ def parent(inputs: dict) -> dict: for stage in stage_added: current = {**current, **stage_added[stage]} expected_at_stage[stage] = current - parent_result = parent.invoke(stage_added["parent_input"], {"callbacks": [tracer]}) + parent_result = the_parent.invoke( + stage_added["parent_input"], {"callbacks": [tracer]} + ) assert parent_result == expected_at_stage["parent_output"] - mock_posts = _get_calls(tracer.client, minimum=2) - assert len(mock_posts) == 2 + mock_posts = _get_calls(tracer.client, minimum=5) + assert len(mock_posts) == 5 datas = [json.loads(mock_post.kwargs["data"]) for mock_post in mock_posts] - assert datas[0]["name"] == "parent" + names = [ + "the_parent", + "child", + "RunnableSequence", + "ChatPromptTemplate", + "FakeListChatModel", + ] + contains_serialized = {"ChatPromptTemplate", "FakeListChatModel"} + ids_contains_serialized = set() + for n, d in zip(names, datas): + assert n == d["name"] + if n in contains_serialized: + assert d["serialized"] + assert "graph" not in d["serialized"] + ids_contains_serialized.add(d["id"]) + else: + assert d.get("serialized") is None + + assert datas[0]["name"] == "the_parent" assert datas[0]["inputs"] == expected_at_stage["parent_input"] assert not datas[0]["outputs"] assert datas[1]["name"] == "child" assert datas[1]["inputs"] == expected_at_stage["child_input"] assert not datas[1]["outputs"] - parent_uid = datas[0]["id"] - child_uid = datas[1]["id"] + ids = {d["name"]: d["id"] for d in datas} # Check the patch requests - mock_patches = _get_calls(tracer.client, verbs={"PATCH"}, minimum=2) - assert len(mock_patches) == 2 - child_patch = json.loads(mock_patches[0].kwargs["data"]) - assert child_patch["id"] == child_uid + mock_patches = _get_calls(tracer.client, verbs={"PATCH"}, minimum=5) + assert len(mock_patches) == 5 + patches_datas = [ + json.loads(mock_patch.kwargs["data"]) for mock_patch in mock_patches + ] + patches_dict = {d["id"]: d for d in patches_datas} + child_patch = patches_dict[ids["child"]] assert child_patch["outputs"] == expected_at_stage["child_output"] assert child_patch["inputs"] == expected_at_stage["child_input"] - parent_patch = json.loads(mock_patches[1].kwargs["data"]) - assert parent_patch["id"] == parent_uid + assert child_patch["name"] == "child" + parent_patch = patches_dict[ids["the_parent"]] assert parent_patch["outputs"] == expected_at_stage["parent_output"] assert parent_patch["inputs"] == expected_at_stage["parent_input"] + assert parent_patch["name"] == "the_parent" + for d in patches_datas: + if d["id"] in ids_contains_serialized: + assert "serialized" not in d or d.get("serialized") + else: + assert d.get("serialized") is None + + +def test_trace_respects_tracing_context(): + mock_client = _get_mock_client() + with tracing_context(enabled=False): + with trace(name="foo", inputs={"a": 1}, client=mock_client): + pass + + mock_calls = _get_calls(mock_client) + assert not mock_calls + + +def test_trace_nested_enable_disable(): + # Test that you can disable then re-enable tracing + # and the trace connects as expected + mock_client = _get_mock_client() + with tracing_context(enabled=True): + with trace(name="foo", inputs={"a": 1}, client=mock_client) as run: + with tracing_context(enabled=False): + with trace(name="bar", inputs={"b": 2}, client=mock_client) as run2: + with tracing_context(enabled=True): + with trace( + name="baz", inputs={"c": 3}, client=mock_client + ) as run3: + run3.end(outputs={"c": 3}) + run2.end(outputs={"b": 2}) + run.end(outputs={"a": 1}) + + # Now we need to ensure that there are 2 runs created (2 posts and 2 patches), + # run -> run3 + # with run2 being invisible + mock_calls = _get_calls(mock_client, verbs={"POST", "PATCH"}) + datas = [json.loads(mock_post.kwargs["data"]) for mock_post in mock_calls] + assert "post" in datas[0] + posted = datas[0]["post"] + assert len(posted) == 2 + assert posted[0]["name"] == "foo" + assert posted[1]["name"] == "baz" + dotted_parts = posted[1]["dotted_order"].split(".") + assert len(dotted_parts) == 2 + parent_dotted = posted[0]["dotted_order"] + assert parent_dotted == dotted_parts[0] + + +def test_tracing_disabled_project_name_set(): + mock_client = _get_mock_client() + + @traceable + def foo(a: int) -> int: + return a + + with tracing_context(enabled=False): + with trace( + name="foo", inputs={"a": 1}, client=mock_client, project_name="my_project" + ): + pass + foo(1, langsmith_extra={"client": mock_client, "project_name": "my_project"}) + + mock_calls = _get_calls(mock_client) + assert not mock_calls + + +@pytest.mark.parametrize("auto_batch_tracing", [True, False]) +async def test_traceable_async_exception(auto_batch_tracing: bool): + mock_client = _get_mock_client( + auto_batch_tracing=auto_batch_tracing, + info=ls_schemas.LangSmithInfo( + batch_ingest_config=ls_schemas.BatchIngestConfig( + size_limit_bytes=None, # Note this field is not used here + size_limit=100, + scale_up_nthreads_limit=16, + scale_up_qsize_trigger=1000, + scale_down_nempty_trigger=4, + ) + ), + ) + + @traceable + async def my_function(a: int) -> int: + raise ValueError("foo") + + with tracing_context(enabled=True): + with pytest.raises(ValueError, match="foo"): + await my_function(1, langsmith_extra={"client": mock_client}) + + # Get ALL the call args for the mock_client + num_calls = 1 if auto_batch_tracing else 2 + mock_calls = _get_calls( + mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls + ) + assert len(mock_calls) >= num_calls + + +@pytest.mark.parametrize("auto_batch_tracing", [True, False]) +async def test_traceable_async_gen_exception(auto_batch_tracing: bool): + mock_client = _get_mock_client( + auto_batch_tracing=auto_batch_tracing, + info=ls_schemas.LangSmithInfo( + batch_ingest_config=ls_schemas.BatchIngestConfig( + size_limit_bytes=None, # Note this field is not used here + size_limit=100, + scale_up_nthreads_limit=16, + scale_up_qsize_trigger=1000, + scale_down_nempty_trigger=4, + ) + ), + ) + + @traceable + async def my_function(a: int) -> AsyncGenerator[int, None]: + for i in range(5): + yield i + raise ValueError("foo") + + with tracing_context(enabled=True): + with pytest.raises(ValueError, match="foo"): + async for _ in my_function(1, langsmith_extra={"client": mock_client}): + pass + + # Get ALL the call args for the mock_client + num_calls = 1 if auto_batch_tracing else 2 + mock_calls = _get_calls( + mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls + ) + assert len(mock_calls) == num_calls + + +@pytest.mark.parametrize("env_var", [True, False]) +@pytest.mark.parametrize("context", [True, False, None]) +async def test_trace_respects_env_var(env_var: bool, context: Optional[bool]): + ls_utils.get_env_var.cache_clear() + mock_client = _get_mock_client() + with patch.dict(os.environ, {"LANGSMITH_TRACING": "true" if env_var else "false "}): + with tracing_context(enabled=context): + with trace(name="foo", inputs={"a": 1}, client=mock_client) as run: + assert run.name == "foo" + pass + async with trace(name="bar", inputs={"b": 2}, client=mock_client) as run2: + assert run2.name == "bar" + pass + + mock_calls = _get_calls(mock_client) + if context is None: + expect = env_var + else: + expect = context + if expect: + assert len(mock_calls) >= 1 + else: + assert not mock_calls + + +async def test_process_inputs_outputs(): + mock_client = _get_mock_client() + in_s = "what's life's meaning" + + def process_inputs(inputs: dict) -> dict: + assert inputs == {"val": in_s, "ooblek": "nada"} + inputs["val2"] = "this is mutated" + return {"serialized_in": "what's the meaning of life?"} + + def process_outputs(outputs: int) -> dict: + assert outputs == 42 + return {"serialized_out": 24} + + @traceable(process_inputs=process_inputs, process_outputs=process_outputs) + def my_function(val: str, **kwargs: Any) -> int: + assert not kwargs.get("val2") + return 42 + + with tracing_context(enabled=True): + my_function( + in_s, + ooblek="nada", + langsmith_extra={"client": mock_client}, + ) + + def _check_client(client: Client) -> None: + mock_calls = _get_calls(client) + assert len(mock_calls) == 1 + call = mock_calls[0] + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + assert body["post"][0]["inputs"] == { + "serialized_in": "what's the meaning of life?" + } + assert body["post"][0]["outputs"] == {"serialized_out": 24} + + _check_client(mock_client) + + class Untruthy: + def __init__(self, val: Any) -> None: + self.val = val + + def __bool__(self) -> bool: + raise ValueError("I'm not truthy") + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Untruthy): + return self.val == other.val + return self.val == other + + @traceable(process_inputs=process_inputs, process_outputs=process_outputs) + async def amy_function(val: str, **kwargs: Any) -> int: + assert not kwargs.get("val2") + return Untruthy(42) # type: ignore + + mock_client = _get_mock_client() + with tracing_context(enabled=True): + await amy_function( + in_s, + ooblek="nada", + langsmith_extra={"client": mock_client}, + ) + + _check_client(mock_client) + + # Do generator + + def reducer(outputs: list) -> dict: + return {"reduced": outputs[0]} + + def process_reduced_outputs(outputs: dict) -> dict: + assert outputs == {"reduced": 42} + return {"serialized_out": 24} + + @traceable( + process_inputs=process_inputs, + process_outputs=process_reduced_outputs, + reduce_fn=reducer, + ) + def my_gen(val: str, **kwargs: Any) -> Generator[int, None, None]: + assert not kwargs.get("val2") + yield 42 + + mock_client = _get_mock_client() + with tracing_context(enabled=True): + result = list( + my_gen( + in_s, + ooblek="nada", + langsmith_extra={"client": mock_client}, + ) + ) + assert result == [42] + + _check_client(mock_client) + + @traceable( + process_inputs=process_inputs, + process_outputs=process_reduced_outputs, + reduce_fn=reducer, + ) + async def amy_gen(val: str, **kwargs: Any) -> AsyncGenerator[int, None]: + assert not kwargs.get("val2") + yield Untruthy(42) # type: ignore + + mock_client = _get_mock_client() + with tracing_context(enabled=True): + result = [ + i + async for i in amy_gen( + in_s, ooblek="nada", langsmith_extra={"client": mock_client} + ) + ] + assert result == [42] + _check_client(mock_client) + + +def test_traceable_stop_iteration(): + def my_generator(): + yield from range(5) + return ("last", "vals") + + def consume(gen): + last_vals = yield from gen() + assert last_vals == ("last", "vals") + + assert list(consume(my_generator)) == list(range(5)) + + wrapped = traceable(my_generator) + assert list(consume(wrapped)) == list(range(5)) diff --git a/python/tests/unit_tests/test_run_trees.py b/python/tests/unit_tests/test_run_trees.py index 77618ab5f..d2b410c2f 100644 --- a/python/tests/unit_tests/test_run_trees.py +++ b/python/tests/unit_tests/test_run_trees.py @@ -1,3 +1,4 @@ +import json from concurrent.futures import ThreadPoolExecutor from datetime import datetime from unittest.mock import MagicMock @@ -7,6 +8,7 @@ from langsmith import run_trees from langsmith.client import Client +from langsmith.run_trees import RunTree def test_run_tree_accepts_tpe() -> None: @@ -19,6 +21,40 @@ def test_run_tree_accepts_tpe() -> None: ) +def test_lazy_rt() -> None: + run_tree = RunTree(name="foo") + assert run_tree.ls_client is None + assert run_tree._client is None + assert isinstance(run_tree.client, Client) + client = Client(api_key="foo") + run_tree._client = client + assert run_tree._client == client + + assert RunTree(name="foo", client=client).client == client + assert RunTree(name="foo", ls_client=client).client == client + + +def test_json_serializable(): + run_tree = RunTree(name="foo") + d = run_tree.dict() + assert not d.get("client") and not d.get("ls_client") + assert isinstance(run_tree.client, Client) + d = run_tree.dict() + assert not d.get("client") and not d.get("ls_client") + d = json.loads(run_tree.json()) + assert not d.get("client") and not d.get("ls_client") + run_tree = RunTree(name="foo", ls_client=Client()) + d = run_tree.dict() + assert not d.get("client") and not d.get("ls_client") + d = json.loads(run_tree.json()) + assert not d.get("client") and not d.get("ls_client") + run_tree = RunTree(name="foo", client=Client()) + d = run_tree.dict() + assert not d.get("client") and not d.get("ls_client") + d = json.loads(run_tree.json()) + assert not d.get("client") and not d.get("ls_client") + + @pytest.mark.parametrize( "inputs, expected", [ diff --git a/python/tests/unit_tests/test_utils.py b/python/tests/unit_tests/test_utils.py index 9cadaa9cb..857e55e7f 100644 --- a/python/tests/unit_tests/test_utils.py +++ b/python/tests/unit_tests/test_utils.py @@ -1,6 +1,7 @@ # mypy: disable-error-code="annotation-unchecked" import copy import dataclasses +import functools import itertools import threading import unittest @@ -31,6 +32,7 @@ def __init__( self.return_default_value = return_default_value def test_correct_get_tracer_project(self): + ls_utils.get_env_var.cache_clear() cases = [ self.GetTracerProjectTestCase( test_name="default to 'default' when no project provided", @@ -75,6 +77,8 @@ def test_correct_get_tracer_project(self): ] for case in cases: + ls_utils.get_env_var.cache_clear() + ls_utils.get_tracer_project.cache_clear() with self.subTest(msg=case.test_name): with pytest.MonkeyPatch.context() as mp: for k, v in case.envvars.items(): @@ -89,6 +93,7 @@ def test_correct_get_tracer_project(self): def test_tracing_enabled(): + ls_utils.get_env_var.cache_clear() with patch.dict( "os.environ", {"LANGCHAIN_TRACING_V2": "false", "LANGSMITH_TRACING": "false"} ): @@ -123,6 +128,7 @@ def parent_function(): assert not ls_utils.tracing_is_enabled() return untraced_child_function() + ls_utils.get_env_var.cache_clear() with patch.dict( "os.environ", {"LANGCHAIN_TRACING_V2": "true", "LANGSMITH_TRACING": "true"} ): @@ -131,6 +137,7 @@ def parent_function(): def test_tracing_disabled(): + ls_utils.get_env_var.cache_clear() with patch.dict( "os.environ", {"LANGCHAIN_TRACING_V2": "true", "LANGSMITH_TRACING": "true"} ): @@ -264,3 +271,130 @@ class MyNamedTuple(NamedTuple): "fake_json": ClassWithFakeJson(), } assert ls_utils.deepish_copy(my_dict) == my_dict + + +def test_is_version_greater_or_equal(): + # Test versions equal to 0.5.23 + assert ls_utils.is_version_greater_or_equal("0.5.23", "0.5.23") + + # Test versions greater than 0.5.23 + assert ls_utils.is_version_greater_or_equal("0.5.24", "0.5.23") + assert ls_utils.is_version_greater_or_equal("0.6.0", "0.5.23") + assert ls_utils.is_version_greater_or_equal("1.0.0", "0.5.23") + + # Test versions less than 0.5.23 + assert not ls_utils.is_version_greater_or_equal("0.5.22", "0.5.23") + assert not ls_utils.is_version_greater_or_equal("0.5.0", "0.5.23") + assert not ls_utils.is_version_greater_or_equal("0.4.99", "0.5.23") + + +def test_parse_prompt_identifier(): + # Valid cases + assert ls_utils.parse_prompt_identifier("name") == ("-", "name", "latest") + assert ls_utils.parse_prompt_identifier("owner/name") == ("owner", "name", "latest") + assert ls_utils.parse_prompt_identifier("owner/name:commit") == ( + "owner", + "name", + "commit", + ) + assert ls_utils.parse_prompt_identifier("name:commit") == ("-", "name", "commit") + + # Invalid cases + invalid_identifiers = [ + "", + "/", + ":", + "owner/", + "/name", + "owner//name", + "owner/name/", + "owner/name/extra", + ":commit", + ] + + for invalid_id in invalid_identifiers: + try: + ls_utils.parse_prompt_identifier(invalid_id) + assert False, f"Expected ValueError for identifier: {invalid_id}" + except ValueError: + pass # This is the expected behavior + + +def test_get_api_key() -> None: + ls_utils.get_env_var.cache_clear() + assert ls_utils.get_api_key("provided_api_key") == "provided_api_key" + assert ls_utils.get_api_key("'provided_api_key'") == "provided_api_key" + assert ls_utils.get_api_key('"_provided_api_key"') == "_provided_api_key" + + with patch.dict("os.environ", {"LANGCHAIN_API_KEY": "env_api_key"}, clear=True): + api_key_ = ls_utils.get_api_key(None) + assert api_key_ == "env_api_key" + + ls_utils.get_env_var.cache_clear() + + with patch.dict("os.environ", {}, clear=True): + assert ls_utils.get_api_key(None) is None + ls_utils.get_env_var.cache_clear() + assert ls_utils.get_api_key("") is None + assert ls_utils.get_api_key(" ") is None + + +def test_get_api_url() -> None: + ls_utils.get_env_var.cache_clear() + assert ls_utils.get_api_url("http://provided.url") == "http://provided.url" + + with patch.dict("os.environ", {"LANGCHAIN_ENDPOINT": "http://env.url"}): + assert ls_utils.get_api_url(None) == "http://env.url" + + ls_utils.get_env_var.cache_clear() + with patch.dict("os.environ", {}, clear=True): + assert ls_utils.get_api_url(None) == "https://api.smith.langchain.com" + ls_utils.get_env_var.cache_clear() + with patch.dict("os.environ", {}, clear=True): + assert ls_utils.get_api_url(None) == "https://api.smith.langchain.com" + ls_utils.get_env_var.cache_clear() + with patch.dict("os.environ", {"LANGCHAIN_ENDPOINT": "http://env.url"}): + assert ls_utils.get_api_url(None) == "http://env.url" + ls_utils.get_env_var.cache_clear() + with pytest.raises(ls_utils.LangSmithUserError): + ls_utils.get_api_url(" ") + + +def test_get_func_name(): + class Foo: + def __call__(self, foo: int): + return "bar" + + assert ls_utils._get_function_name(Foo()) == "Foo" + assert ls_utils._get_function_name(functools.partial(Foo(), foo=3)) == "Foo" + + class AFoo: + async def __call__(self, foo: int): + return "bar" + + assert ls_utils._get_function_name(AFoo()) == "AFoo" + assert ls_utils._get_function_name(functools.partial(AFoo(), foo=3)) == "AFoo" + + def foo(bar: int) -> None: + return bar + + assert ls_utils._get_function_name(foo) == "foo" + assert ls_utils._get_function_name(functools.partial(foo, bar=3)) == "foo" + + async def afoo(bar: int) -> None: + return bar + + assert ls_utils._get_function_name(afoo) == "afoo" + assert ls_utils._get_function_name(functools.partial(afoo, bar=3)) == "afoo" + + lambda_func = lambda x: x + 1 # noqa + assert ls_utils._get_function_name(lambda_func) == "" + + class BarClass: + pass + + assert ls_utils._get_function_name(BarClass) == "BarClass" + + assert ls_utils._get_function_name(print) == "print" + + assert ls_utils._get_function_name("not_a_function") == "not_a_function"