diff --git a/.github/contributing/INTEGRATIONS.md b/.github/contributing/INTEGRATIONS.md
index ecab1a44d26c..b14722e9a27b 100644
--- a/.github/contributing/INTEGRATIONS.md
+++ b/.github/contributing/INTEGRATIONS.md
@@ -122,16 +122,9 @@ Above, we have a document loader that we're sure will always require a specific
We highly appreciate documentation and integration tests showing how to set up and use your integration. Providing this will make it much easier for reviewers to verify that your integration works and will streamline the review process.
-New docs pages should be added as `.mdx` files in the appropriate location under `docs/` (`.mdx` is an extended markdown format that allows use of additional statements like `import`). Code examples within docs pages should be under `examples` and imported like this:
+New docs pages should be added as the appropriate template from here:
-```md
-import CodeBlock from "@theme/CodeBlock";
-import LangCoExample from "@examples/document_loaders/langco.ts";
-
-{LangCoExample}
-```
-
-This allows the linter and formatter to pick up example code blocks within docs as well.
+https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-scripts/src/cli/docs/templates
### Linting and formatting
diff --git a/dependency_range_tests/docker-compose.yml b/dependency_range_tests/docker-compose.yml
index 3305a85d20a2..579707fbe1ad 100644
--- a/dependency_range_tests/docker-compose.yml
+++ b/dependency_range_tests/docker-compose.yml
@@ -2,7 +2,7 @@ version: "3"
services:
# LangChain
langchain-latest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -13,7 +13,7 @@ services:
- ./scripts:/scripts
command: bash /scripts/langchain/test-with-latest-deps.sh
langchain-lowest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -23,10 +23,10 @@ services:
- ../langchain:/langchain
- ./scripts:/scripts
command: bash /scripts/langchain/test-with-lowest-deps.sh
-
+
# Community
community-latest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -34,13 +34,13 @@ services:
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/community/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-community:/libs/langchain-community
- ./scripts:/scripts
command: bash /scripts/with_standard_tests/community/test-with-latest-deps.sh
community-lowest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -48,20 +48,20 @@ services:
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/community/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-community:/libs/langchain-community
- ./scripts:/scripts
command: bash /scripts/with_standard_tests/community/test-with-lowest-deps.sh
community-npm-install:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/community/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-community:/libs/langchain-community
- ./scripts:/scripts
@@ -69,7 +69,7 @@ services:
# OpenAI
openai-latest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -77,13 +77,13 @@ services:
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/openai/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-openai:/libs/langchain-openai
- ./scripts:/scripts
command: bash /scripts/with_standard_tests/openai/test-with-latest-deps.sh
openai-lowest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -91,7 +91,7 @@ services:
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/openai/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-openai:/libs/langchain-openai
- ./scripts:/scripts
@@ -99,7 +99,7 @@ services:
# Anthropic
anthropic-latest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -107,13 +107,13 @@ services:
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/anthropic/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-anthropic:/libs/langchain-anthropic
- ./scripts:/scripts
command: bash /scripts/with_standard_tests/anthropic/test-with-latest-deps.sh
anthropic-lowest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -121,7 +121,7 @@ services:
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/anthropic/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-anthropic:/libs/langchain-anthropic
- ./scripts:/scripts
@@ -129,7 +129,7 @@ services:
# Google VertexAI
google-vertexai-latest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -137,13 +137,13 @@ services:
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/google-vertexai/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-google-vertexai:/libs/langchain-google-vertexai
- ./scripts:/scripts
command: bash /scripts/with_standard_tests/google-vertexai/test-with-latest-deps.sh
google-vertexai-lowest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -151,7 +151,7 @@ services:
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/google-vertexai/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-google-vertexai:/libs/langchain-google-vertexai
- ./scripts:/scripts
@@ -159,7 +159,7 @@ services:
# Cohere
cohere-latest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -167,13 +167,13 @@ services:
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/cohere/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-cohere:/libs/langchain-cohere
- ./scripts:/scripts
command: bash /scripts/with_standard_tests/cohere/test-with-latest-deps.sh
cohere-lowest-deps:
- image: node:18
+ image: node:20
environment:
PUPPETEER_SKIP_DOWNLOAD: "true"
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true"
@@ -181,8 +181,8 @@ services:
working_dir: /app
volumes:
- ../turbo.json:/turbo.json
- - ../package.json:/package.json
+ - ./scripts/with_standard_tests/cohere/node/package.json:/package.json
- ../libs/langchain-standard-tests:/libs/langchain-standard-tests
- ../libs/langchain-cohere:/libs/langchain-cohere
- ./scripts:/scripts
- command: bash /scripts/with_standard_tests/cohere/test-with-lowest-deps.sh
\ No newline at end of file
+ command: bash /scripts/with_standard_tests/cohere/test-with-lowest-deps.sh
diff --git a/dependency_range_tests/scripts/with_standard_tests/anthropic/node/package.json b/dependency_range_tests/scripts/with_standard_tests/anthropic/node/package.json
index a4622fc74597..472405f4ca22 100644
--- a/dependency_range_tests/scripts/with_standard_tests/anthropic/node/package.json
+++ b/dependency_range_tests/scripts/with_standard_tests/anthropic/node/package.json
@@ -2,8 +2,11 @@
"name": "dependency-range-tests",
"version": "0.0.0",
"private": true,
+ "workspaces": [
+ "libs/*"
+ ],
"description": "Tests dependency ranges for LangChain.",
"dependencies": {
"semver": "^7.5.4"
}
-}
\ No newline at end of file
+}
diff --git a/dependency_range_tests/scripts/with_standard_tests/anthropic/test-with-latest-deps.sh b/dependency_range_tests/scripts/with_standard_tests/anthropic/test-with-latest-deps.sh
index bcf32dfbaa9a..46b76fba9993 100644
--- a/dependency_range_tests/scripts/with_standard_tests/anthropic/test-with-latest-deps.sh
+++ b/dependency_range_tests/scripts/with_standard_tests/anthropic/test-with-latest-deps.sh
@@ -27,6 +27,7 @@ node "update_resolutions_latest.js"
# Navigate back to monorepo root and install dependencies
cd "$monorepo_dir"
+touch yarn.lock
yarn
# Navigate into `@langchain/anthropic` to build and run tests
diff --git a/dependency_range_tests/scripts/with_standard_tests/anthropic/test-with-lowest-deps.sh b/dependency_range_tests/scripts/with_standard_tests/anthropic/test-with-lowest-deps.sh
index 11eae9e21a80..d8b109b41db3 100644
--- a/dependency_range_tests/scripts/with_standard_tests/anthropic/test-with-lowest-deps.sh
+++ b/dependency_range_tests/scripts/with_standard_tests/anthropic/test-with-lowest-deps.sh
@@ -25,6 +25,7 @@ node "update_resolutions_lowest.js"
# Navigate back to monorepo root and install dependencies
cd "$monorepo_dir"
+touch yarn.lock
yarn
# Navigate into `@langchain/anthropic` to build and run tests
diff --git a/dependency_range_tests/scripts/with_standard_tests/cohere/node/package.json b/dependency_range_tests/scripts/with_standard_tests/cohere/node/package.json
index a4622fc74597..472405f4ca22 100644
--- a/dependency_range_tests/scripts/with_standard_tests/cohere/node/package.json
+++ b/dependency_range_tests/scripts/with_standard_tests/cohere/node/package.json
@@ -2,8 +2,11 @@
"name": "dependency-range-tests",
"version": "0.0.0",
"private": true,
+ "workspaces": [
+ "libs/*"
+ ],
"description": "Tests dependency ranges for LangChain.",
"dependencies": {
"semver": "^7.5.4"
}
-}
\ No newline at end of file
+}
diff --git a/dependency_range_tests/scripts/with_standard_tests/cohere/test-with-latest-deps.sh b/dependency_range_tests/scripts/with_standard_tests/cohere/test-with-latest-deps.sh
index 24c5ccc872f8..afb78b0bdcd8 100644
--- a/dependency_range_tests/scripts/with_standard_tests/cohere/test-with-latest-deps.sh
+++ b/dependency_range_tests/scripts/with_standard_tests/cohere/test-with-latest-deps.sh
@@ -26,6 +26,7 @@ node "update_resolutions_latest.js"
# Navigate back to monorepo root and install dependencies
cd "$monorepo_dir"
+touch yarn.lock
yarn
# Navigate into `@langchain/cohere` to build and run tests
diff --git a/dependency_range_tests/scripts/with_standard_tests/cohere/test-with-lowest-deps.sh b/dependency_range_tests/scripts/with_standard_tests/cohere/test-with-lowest-deps.sh
index 3868e0a9d6c0..7411c49e564c 100644
--- a/dependency_range_tests/scripts/with_standard_tests/cohere/test-with-lowest-deps.sh
+++ b/dependency_range_tests/scripts/with_standard_tests/cohere/test-with-lowest-deps.sh
@@ -26,6 +26,7 @@ node "update_resolutions_lowest.js"
# Navigate back to monorepo root and install dependencies
cd "$monorepo_dir"
+touch yarn.lock
yarn
# Navigate into `@langchain/cohere` to build and run tests
diff --git a/dependency_range_tests/scripts/with_standard_tests/community/node/package.json b/dependency_range_tests/scripts/with_standard_tests/community/node/package.json
index a4622fc74597..472405f4ca22 100644
--- a/dependency_range_tests/scripts/with_standard_tests/community/node/package.json
+++ b/dependency_range_tests/scripts/with_standard_tests/community/node/package.json
@@ -2,8 +2,11 @@
"name": "dependency-range-tests",
"version": "0.0.0",
"private": true,
+ "workspaces": [
+ "libs/*"
+ ],
"description": "Tests dependency ranges for LangChain.",
"dependencies": {
"semver": "^7.5.4"
}
-}
\ No newline at end of file
+}
diff --git a/dependency_range_tests/scripts/with_standard_tests/community/test-with-latest-deps.sh b/dependency_range_tests/scripts/with_standard_tests/community/test-with-latest-deps.sh
index df63a38f8546..5e5d5c893316 100644
--- a/dependency_range_tests/scripts/with_standard_tests/community/test-with-latest-deps.sh
+++ b/dependency_range_tests/scripts/with_standard_tests/community/test-with-latest-deps.sh
@@ -26,6 +26,7 @@ node "update_resolutions_latest.js"
# Navigate back to monorepo root and install dependencies
cd "$monorepo_dir"
+touch yarn.lock
yarn
# Navigate into `@langchain/community` to build and run tests
diff --git a/dependency_range_tests/scripts/with_standard_tests/community/test-with-lowest-deps.sh b/dependency_range_tests/scripts/with_standard_tests/community/test-with-lowest-deps.sh
index e6d70012a809..5575f53b0b5c 100644
--- a/dependency_range_tests/scripts/with_standard_tests/community/test-with-lowest-deps.sh
+++ b/dependency_range_tests/scripts/with_standard_tests/community/test-with-lowest-deps.sh
@@ -26,6 +26,7 @@ node "update_resolutions_lowest.js"
# Navigate back to monorepo root and install dependencies
cd "$monorepo_dir"
+touch yarn.lock
yarn
# Navigate into `@langchain/package` to build and run tests
diff --git a/dependency_range_tests/scripts/with_standard_tests/google-vertexai/node/package.json b/dependency_range_tests/scripts/with_standard_tests/google-vertexai/node/package.json
index a4622fc74597..268de13087d8 100644
--- a/dependency_range_tests/scripts/with_standard_tests/google-vertexai/node/package.json
+++ b/dependency_range_tests/scripts/with_standard_tests/google-vertexai/node/package.json
@@ -2,8 +2,12 @@
"name": "dependency-range-tests",
"version": "0.0.0",
"private": true,
+ "workspaces": [
+ "libs/*"
+ ],
"description": "Tests dependency ranges for LangChain.",
"dependencies": {
"semver": "^7.5.4"
- }
-}
\ No newline at end of file
+ },
+ "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e"
+}
diff --git a/dependency_range_tests/scripts/with_standard_tests/google-vertexai/test-with-latest-deps.sh b/dependency_range_tests/scripts/with_standard_tests/google-vertexai/test-with-latest-deps.sh
index 567366beb19e..00d5cb3e983b 100644
--- a/dependency_range_tests/scripts/with_standard_tests/google-vertexai/test-with-latest-deps.sh
+++ b/dependency_range_tests/scripts/with_standard_tests/google-vertexai/test-with-latest-deps.sh
@@ -26,6 +26,8 @@ node "update_resolutions_latest.js"
# Navigate back to monorepo root and install dependencies
cd "$monorepo_dir"
+touch yarn.lock
+cat ./package.json
yarn
# Navigate into `@langchain/google-vertexai` to build and run tests
diff --git a/dependency_range_tests/scripts/with_standard_tests/google-vertexai/test-with-lowest-deps.sh b/dependency_range_tests/scripts/with_standard_tests/google-vertexai/test-with-lowest-deps.sh
index 23dcf3ba0d82..d04849a08c7f 100644
--- a/dependency_range_tests/scripts/with_standard_tests/google-vertexai/test-with-lowest-deps.sh
+++ b/dependency_range_tests/scripts/with_standard_tests/google-vertexai/test-with-lowest-deps.sh
@@ -26,6 +26,7 @@ node "update_resolutions_lowest.js"
# Navigate back to monorepo root and install dependencies
cd "$monorepo_dir"
+touch yarn.lock
yarn
# Navigate into `@langchain/package` to build and run tests
diff --git a/dependency_range_tests/scripts/with_standard_tests/node/package.json b/dependency_range_tests/scripts/with_standard_tests/node/package.json
index ba8b482e7515..3b53ce354167 100644
--- a/dependency_range_tests/scripts/with_standard_tests/node/package.json
+++ b/dependency_range_tests/scripts/with_standard_tests/node/package.json
@@ -2,6 +2,9 @@
"name": "dependency-range-tests",
"version": "0.0.0",
"private": true,
+ "workspaces": [
+ "libs/*"
+ ],
"description": "Tests dependency ranges for LangChain.",
"dependencies": {}
-}
\ No newline at end of file
+}
diff --git a/dependency_range_tests/scripts/with_standard_tests/openai/node/package.json b/dependency_range_tests/scripts/with_standard_tests/openai/node/package.json
index a4622fc74597..472405f4ca22 100644
--- a/dependency_range_tests/scripts/with_standard_tests/openai/node/package.json
+++ b/dependency_range_tests/scripts/with_standard_tests/openai/node/package.json
@@ -2,8 +2,11 @@
"name": "dependency-range-tests",
"version": "0.0.0",
"private": true,
+ "workspaces": [
+ "libs/*"
+ ],
"description": "Tests dependency ranges for LangChain.",
"dependencies": {
"semver": "^7.5.4"
}
-}
\ No newline at end of file
+}
diff --git a/dependency_range_tests/scripts/with_standard_tests/openai/test-with-latest-deps.sh b/dependency_range_tests/scripts/with_standard_tests/openai/test-with-latest-deps.sh
index 2080ec786467..7c399d0fb223 100644
--- a/dependency_range_tests/scripts/with_standard_tests/openai/test-with-latest-deps.sh
+++ b/dependency_range_tests/scripts/with_standard_tests/openai/test-with-latest-deps.sh
@@ -26,6 +26,7 @@ node "update_resolutions_latest.js"
# Navigate back to monorepo root and install dependencies
cd "$monorepo_dir"
+touch yarn.lock
yarn
# Navigate into `@langchain/openai` to build and run tests
diff --git a/dependency_range_tests/scripts/with_standard_tests/openai/test-with-lowest-deps.sh b/dependency_range_tests/scripts/with_standard_tests/openai/test-with-lowest-deps.sh
index 6f47d13e8aa1..a462b8d20c12 100644
--- a/dependency_range_tests/scripts/with_standard_tests/openai/test-with-lowest-deps.sh
+++ b/dependency_range_tests/scripts/with_standard_tests/openai/test-with-lowest-deps.sh
@@ -26,6 +26,7 @@ node "update_resolutions_lowest.js"
# Navigate back to monorepo root and install dependencies
cd "$monorepo_dir"
+touch yarn.lock
yarn
# Navigate into `@langchain/package` to build and run tests
diff --git a/docs/core_docs/.gitignore b/docs/core_docs/.gitignore
index 8f648f622a5e..235b78c4b8e2 100644
--- a/docs/core_docs/.gitignore
+++ b/docs/core_docs/.gitignore
@@ -340,6 +340,8 @@ docs/integrations/chat/openai.md
docs/integrations/chat/openai.mdx
docs/integrations/chat/ollama.md
docs/integrations/chat/ollama.mdx
+docs/integrations/chat/novita.md
+docs/integrations/chat/novita.mdx
docs/integrations/chat/mistral.md
docs/integrations/chat/mistral.mdx
docs/integrations/chat/ibm.md
diff --git a/docs/core_docs/docs/how_to/example_selectors.ipynb b/docs/core_docs/docs/how_to/example_selectors.ipynb
index fbda2184ecd1..6a47ab1116b7 100644
--- a/docs/core_docs/docs/how_to/example_selectors.ipynb
+++ b/docs/core_docs/docs/how_to/example_selectors.ipynb
@@ -223,7 +223,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "Translate the following words from English to Italain:\n",
+ "Translate the following words from English to Italian:\n",
"\n",
"Input: hand -> Output: mano\n",
"\n",
@@ -236,7 +236,7 @@
" exampleSelector,\n",
" examplePrompt,\n",
" suffix: \"Input: {input} -> Output:\",\n",
- " prefix: \"Translate the following words from English to Italain:\",\n",
+ " prefix: \"Translate the following words from English to Italian:\",\n",
" inputVariables: [\"input\"],\n",
"})\n",
"\n",
@@ -283,4 +283,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
-}
\ No newline at end of file
+}
diff --git a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb
index f3d0e81ed3ce..3e2332d74d4b 100644
--- a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb
+++ b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb
@@ -796,6 +796,80 @@
"console.log(codeExecutionExplanation.content);"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "a464c1a9",
+ "metadata": {},
+ "source": [
+ "## Context Caching\n",
+ "\n",
+ "Context caching allows you to pass some content to the model once, cache the input tokens, and then refer to the cached tokens for subsequent requests to reduce cost. You can create a `CachedContent` object using `GoogleAICacheManager` class and then pass the `CachedContent` object to your `ChatGoogleGenerativeAIModel` with `enableCachedContent()` method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9a649be0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n",
+ "import {\n",
+ " GoogleAICacheManager,\n",
+ " GoogleAIFileManager,\n",
+ "} from \"@google/generative-ai/server\";\n",
+ "\n",
+ "const fileManager = new GoogleAIFileManager(process.env.GOOGLE_API_KEY);\n",
+ "const cacheManager = new GoogleAICacheManager(process.env.GOOGLE_API_KEY);\n",
+ "\n",
+ "// uploads file for caching\n",
+ "const pathToVideoFile = \"/path/to/video/file\";\n",
+ "const displayName = \"example-video\";\n",
+ "const fileResult = await fileManager.uploadFile(pathToVideoFile, {\n",
+ " displayName,\n",
+ " mimeType: \"video/mp4\",\n",
+ "});\n",
+ "\n",
+ "// creates cached content AFTER uploading is finished\n",
+ "const cachedContent = await cacheManager.create({\n",
+ " model: \"models/gemini-1.5-flash-001\",\n",
+ " displayName: displayName,\n",
+ " systemInstruction: \"You are an expert video analyzer, and your job is to answer \" +\n",
+ " \"the user's query based on the video file you have access to.\",\n",
+ " contents: [\n",
+ " {\n",
+ " role: \"user\",\n",
+ " parts: [\n",
+ " {\n",
+ " fileData: {\n",
+ " mimeType: fileResult.file.mimeType,\n",
+ " fileUri: fileResult.file.uri,\n",
+ " },\n",
+ " },\n",
+ " ],\n",
+ " },\n",
+ " ],\n",
+ " ttlSeconds: 300,\n",
+ "});\n",
+ "\n",
+ "// passes cached video to model\n",
+ "const model = new ChatGoogleGenerativeAI({});\n",
+ "model.useCachedContent(cachedContent);\n",
+ "\n",
+ "// invokes model with cached video\n",
+ "await model.invoke(\"Summarize the video\");"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "12e978ff",
+ "metadata": {},
+ "source": [
+ "**Note**\n",
+ "- Context caching supports both Gemini 1.5 Pro and Gemini 1.5 Flash. Context caching is only available for stable models with fixed versions (for example, gemini-1.5-pro-001). You must include the version postfix (for example, the -001 in gemini-1.5-pro-001).\n",
+ "- The minimum input token count for context caching is 32,768, and the maximum is the same as the maximum for the given model."
+ ]
+ },
{
"cell_type": "markdown",
"id": "0c6a950f",
diff --git a/docs/core_docs/docs/integrations/chat/novita.ipynb b/docs/core_docs/docs/integrations/chat/novita.ipynb
new file mode 100644
index 000000000000..9a6adc4884eb
--- /dev/null
+++ b/docs/core_docs/docs/integrations/chat/novita.ipynb
@@ -0,0 +1,206 @@
+{
+ "cells": [
+ {
+ "cell_type": "raw",
+ "metadata": {
+ "vscode": {
+ "languageId": "raw"
+ }
+ },
+ "source": [
+ "---\n",
+ "sidebar_label: Novita AI\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# ChatNovita\n",
+ "\n",
+ "Delivers an affordable, reliable, and simple inference platform for running top LLM models.\n",
+ "\n",
+ "You can find all the models we support here: [Novita AI Featured Models](https://novita.ai/model-api/product/llm-api?utm_source=github_langchain&utm_medium=github_readme&utm_campaign=link) or request the [Models API](https://novita.ai/docs/model-api/reference/llm/models.html?utm_source=github_langchain&utm_medium=github_readme&utm_campaign=link) to get all available models.\n",
+ "\n",
+ "Try the [Novita AI Llama 3 API Demo](https://novita.ai/model-api/product/llm-api/playground#meta-llama-llama-3.1-8b-instruct?utm_source=github_langchain&utm_medium=github_readme&utm_campaign=link) today!"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Overview\n",
+ "\n",
+ "### Model features\n",
+ "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
+ "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
+ "| ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ |"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Setup\n",
+ "\n",
+ "To access Novita AI models you'll need to create a Novita account and get an API key.\n",
+ "\n",
+ "### Credentials\n",
+ "\n",
+ "Head to [this page](https://novita.ai/settings#key-management?utm_source=github_langchain&utm_medium=github_readme&utm_campaign=link) to sign up to Novita AI and generate an API key. Once you've done this set the NOVITA_API_KEY environment variable:\n",
+ "\n",
+ "```bash\n",
+ "export NOVITA_API_KEY=\"your-api-key\"\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Installation\n",
+ "\n",
+ "The LangChain Novita integration lives in the `@langchain-community` package:\n",
+ "\n",
+ "```{=mdx}\n",
+ "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n",
+ "import Npm2Yarn from \"@theme/Npm2Yarn\";\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " @langchain/community @langchain/core\n",
+ "\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Instantiation\n",
+ "\n",
+ "Now we can instantiate our model object and generate chat completions. Try the [Novita AI Llama 3 API Demo](https://novita.ai/model-api/product/llm-api/playground#meta-llama-llama-3.1-8b-instruct?utm_source=github_langchain&utm_medium=github_readme&utm_campaign=link) today!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "javascript"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import { ChatNovitaAI } from \"@langchain/community/chat_models/novita\";\n",
+ "\n",
+ "const llm = new ChatNovitaAI({\n",
+ " model: \"meta-llama/llama-3.1-8b-instruct\",\n",
+ " temperature: 0,\n",
+ " // other params...\n",
+ "})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Invocation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "javascript"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "const aiMsg = await llm.invoke([\n",
+ " {\n",
+ " role: \"system\",\n",
+ " content: \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
+ " },\n",
+ " {\n",
+ " role: \"human\",\n",
+ " content: \"I love programming.\"\n",
+ " },\n",
+ "]);"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "javascript"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "console.log(aiMsg.content)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Chaining\n",
+ "\n",
+ "We can [chain](/docs/how_to/sequence) our model with a prompt template like so:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "javascript"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n",
+ "\n",
+ "const prompt = ChatPromptTemplate.fromMessages(\n",
+ " [\n",
+ " [\n",
+ " \"system\",\n",
+ " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
+ " ],\n",
+ " [\"human\", \"{input}\"],\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "const chain = prompt.pipe(llm);\n",
+ "await chain.invoke(\n",
+ " {\n",
+ " input_language: \"English\",\n",
+ " output_language: \"German\",\n",
+ " input: \"I love programming.\",\n",
+ " }\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## API reference\n",
+ "\n",
+ "For detailed documentation of Novita AI LLM APIs, head to [Novita AI LLM API reference](https://novita.ai/docs/model-api/reference/llm/llm.html?utm_source=github_langchain&utm_medium=github_readme&utm_campaign=link)\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/core_docs/docs/integrations/memory/file.mdx b/docs/core_docs/docs/integrations/memory/file.mdx
new file mode 100644
index 000000000000..d485be9d29c6
--- /dev/null
+++ b/docs/core_docs/docs/integrations/memory/file.mdx
@@ -0,0 +1,31 @@
+---
+hide_table_of_contents: true
+---
+
+import CodeBlock from "@theme/CodeBlock";
+
+# File Chat Message History
+
+The `FileChatMessageHistory` uses a JSON file to store chat message history. For longer-term persistence across chat sessions, you can swap out the default in-memory `chatHistory` that backs chat memory classes like `BufferMemory`.
+
+## Setup
+
+You'll first need to install the [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) package:
+
+```bash npm2yarn
+npm install @langchain/community @langchain/core
+```
+
+import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
+
+
+
+```bash npm2yarn
+npm install @langchain/openai @langchain/community @langchain/core
+```
+
+## Usage
+
+import Example from "@examples/memory/file.ts";
+
+{Example}
diff --git a/docs/core_docs/docs/integrations/vectorstores/hanavector.mdx b/docs/core_docs/docs/integrations/vectorstores/hanavector.mdx
index 3d1a23eef46c..5aed8ee2c273 100644
--- a/docs/core_docs/docs/integrations/vectorstores/hanavector.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/hanavector.mdx
@@ -35,12 +35,47 @@ import ExampleLoader from "@examples/indexes/vector_stores/hana_vector/fromDocs.
{ExampleLoader}
+## Creating an HNSW Vector Index
+
+A vector index can significantly speed up top-k nearest neighbor queries for vectors. Users can create a Hierarchical Navigable Small World (HNSW) vector index using the `create_hnsw_index` function.
+
+For more information about creating an index at the database level, such as parameters requirement, please refer to the [official documentation](https://help.sap.com/docs/hana-cloud-database/sap-hana-cloud-sap-hana-database-vector-engine-guide/create-vector-index-statement-data-definition).
+
+import ExampleIndex from "@examples/indexes/vector_stores/hana_vector/createHnswIndex.ts";
+
+{ExampleIndex}
+
## Basic Vectorstore Operations
import ExampleBasic from "@examples/indexes/vector_stores/hana_vector/basics.ts";
{ExampleBasic}
+## Advanced filtering
+
+import { Table, Tr, Th, Td } from "@mdx-js/react";
+
+In addition to the basic value-based filtering capabilities, it is possible to use more advanced filtering. The table below shows the available filter operators.
+
+| Operator | Semantic |
+| ---------- | -------------------------------------------------------------------------- |
+| `$eq` | Equality (==) |
+| `$ne` | Inequality (!=) |
+| `$lt` | Less than (<) |
+| `$lte` | Less than or equal (<=) |
+| `$gt` | Greater than (>) |
+| `$gte` | Greater than or equal (>=) |
+| `$in` | Contained in a set of given values (in) |
+| `$nin` | Not contained in a set of given values (not in) |
+| `$between` | Between the range of two boundary values |
+| `$like` | Text equality based on the "LIKE" semantics in SQL (using "%" as wildcard) |
+| `$and` | Logical "and", supporting 2 or more operands |
+| `$or` | Logical "or", supporting 2 or more operands |
+
+import ExampleAdvancedFilter from "@examples/indexes/vector_stores/hana_vector/advancedFiltering.ts";
+
+{ExampleAdvancedFilter}
+
## Using a VectorStore as a retriever in chains for retrieval augmented generation (RAG)
import ExampleChain from "@examples/indexes/vector_stores/hana_vector/chains.ts";
diff --git a/docs/core_docs/docs/integrations/vectorstores/opensearch.mdx b/docs/core_docs/docs/integrations/vectorstores/opensearch.mdx
index 62c33165b26c..f0c9a6cb6a3c 100644
--- a/docs/core_docs/docs/integrations/vectorstores/opensearch.mdx
+++ b/docs/core_docs/docs/integrations/vectorstores/opensearch.mdx
@@ -31,7 +31,7 @@ You'll also need to have an OpenSearch instance running. You can use the [offici
import { Client } from "@opensearch-project/opensearch";
import { Document } from "langchain/document";
import { OpenAIEmbeddings } from "@langchain/openai";
-import { OpenSearchVectorStore } from "langchain/vectorstores/opensearch";
+import { OpenSearchVectorStore } from "@langchain/community/vectorstores/opensearch";
const client = new Client({
nodes: [process.env.OPENSEARCH_URL ?? "http://127.0.0.1:9200"],
@@ -70,7 +70,7 @@ import { Client } from "@opensearch-project/opensearch";
import { VectorDBQAChain } from "langchain/chains";
import { OpenAIEmbeddings } from "@langchain/openai";
import { OpenAI } from "@langchain/openai";
-import { OpenSearchVectorStore } from "langchain/vectorstores/opensearch";
+import { OpenSearchVectorStore } from "@langchain/community/vectorstores/opensearch";
const client = new Client({
nodes: [process.env.OPENSEARCH_URL ?? "http://127.0.0.1:9200"],
diff --git a/examples/package.json b/examples/package.json
index 792c866804fb..177aa6433317 100644
--- a/examples/package.json
+++ b/examples/package.json
@@ -93,7 +93,7 @@
"ioredis": "^5.3.2",
"js-yaml": "^4.1.0",
"langchain": "workspace:*",
- "langsmith": "^0.2.0",
+ "langsmith": "^0.2.8",
"mongodb": "^6.3.0",
"pg": "^8.11.0",
"pickleparser": "^0.2.1",
diff --git a/examples/src/indexes/vector_stores/hana_vector/advancedFiltering.ts b/examples/src/indexes/vector_stores/hana_vector/advancedFiltering.ts
new file mode 100644
index 000000000000..a3095c29b17e
--- /dev/null
+++ b/examples/src/indexes/vector_stores/hana_vector/advancedFiltering.ts
@@ -0,0 +1,210 @@
+import { OpenAIEmbeddings } from "@langchain/openai";
+import hanaClient from "hdb";
+import { Document } from "@langchain/core/documents";
+import {
+ HanaDB,
+ HanaDBArgs,
+} from "@langchain/community/vectorstores/hanavector";
+
+const connectionParams = {
+ host: process.env.HANA_HOST,
+ port: process.env.HANA_PORT,
+ user: process.env.HANA_UID,
+ password: process.env.HANA_PWD,
+};
+const client = hanaClient.createClient(connectionParams);
+
+// Connect to SAP HANA
+await new Promise((resolve, reject) => {
+ client.connect((err: Error) => {
+ if (err) {
+ reject(err);
+ } else {
+ console.log("Connected to SAP HANA successfully.");
+ resolve();
+ }
+ });
+});
+
+const docs: Document[] = [
+ {
+ pageContent: "First",
+ metadata: { name: "adam", is_active: true, id: 1, height: 10.0 },
+ },
+ {
+ pageContent: "Second",
+ metadata: { name: "bob", is_active: false, id: 2, height: 5.7 },
+ },
+ {
+ pageContent: "Third",
+ metadata: { name: "jane", is_active: true, id: 3, height: 2.4 },
+ },
+];
+
+// Initialize embeddings
+const embeddings = new OpenAIEmbeddings();
+
+const args: HanaDBArgs = {
+ connection: client,
+ tableName: "testAdvancedFilters",
+};
+
+// Create a LangChain VectorStore interface for the HANA database and specify the table (collection) to use in args.
+const vectorStore = new HanaDB(embeddings, args);
+// need to initialize once an instance is created.
+await vectorStore.initialize();
+// Delete already existing documents from the table
+await vectorStore.delete({ filter: {} });
+await vectorStore.addDocuments(docs);
+
+// Helper function to print filter results
+function printFilterResult(result: Document[]) {
+ if (result.length === 0) {
+ console.log("");
+ } else {
+ result.forEach((doc) => console.log(doc.metadata));
+ }
+}
+
+let advancedFilter;
+
+// Not equal
+advancedFilter = { id: { $ne: 1 } };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"id":{"$ne":1}}
+{ name: 'bob', is_active: false, id: 2, height: 5.7 }
+{ name: 'jane', is_active: true, id: 3, height: 2.4 }
+*/
+
+// Between range
+advancedFilter = { id: { $between: [1, 2] } };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"id":{"$between":[1,2]}}
+{ name: 'adam', is_active: true, id: 1, height: 10 }
+{ name: 'bob', is_active: false, id: 2, height: 5.7 } */
+
+// In list
+advancedFilter = { name: { $in: ["adam", "bob"] } };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"name":{"$in":["adam","bob"]}}
+{ name: 'adam', is_active: true, id: 1, height: 10 }
+{ name: 'bob', is_active: false, id: 2, height: 5.7 } */
+
+// Not in list
+advancedFilter = { name: { $nin: ["adam", "bob"] } };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"name":{"$nin":["adam","bob"]}}
+{ name: 'jane', is_active: true, id: 3, height: 2.4 } */
+
+// Greater than
+advancedFilter = { id: { $gt: 1 } };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"id":{"$gt":1}}
+{ name: 'bob', is_active: false, id: 2, height: 5.7 }
+{ name: 'jane', is_active: true, id: 3, height: 2.4 } */
+
+// Greater than or equal to
+advancedFilter = { id: { $gte: 1 } };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"id":{"$gte":1}}
+{ name: 'adam', is_active: true, id: 1, height: 10 }
+{ name: 'bob', is_active: false, id: 2, height: 5.7 }
+{ name: 'jane', is_active: true, id: 3, height: 2.4 } */
+
+// Less than
+advancedFilter = { id: { $lt: 1 } };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"id":{"$lt":1}}
+ */
+
+// Less than or equal to
+advancedFilter = { id: { $lte: 1 } };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"id":{"$lte":1}}
+{ name: 'adam', is_active: true, id: 1, height: 10 } */
+
+// Text filtering with $like
+advancedFilter = { name: { $like: "a%" } };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"name":{"$like":"a%"}}
+{ name: 'adam', is_active: true, id: 1, height: 10 } */
+
+advancedFilter = { name: { $like: "%a%" } };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"name":{"$like":"%a%"}}
+{ name: 'adam', is_active: true, id: 1, height: 10 }
+{ name: 'jane', is_active: true, id: 3, height: 2.4 } */
+
+// Combined filtering with $or
+advancedFilter = { $or: [{ id: 1 }, { name: "bob" }] };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"$or":[{"id":1},{"name":"bob"}]}
+{ name: 'adam', is_active: true, id: 1, height: 10 }
+{ name: 'bob', is_active: false, id: 2, height: 5.7 } */
+
+// Combined filtering with $and
+advancedFilter = { $and: [{ id: 1 }, { id: 2 }] };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"$and":[{"id":1},{"id":2}]}
+ */
+
+advancedFilter = { $or: [{ id: 1 }, { id: 2 }, { id: 3 }] };
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"$or":[{"id":1},{"id":2},{"id":3}]}
+{ name: 'adam', is_active: true, id: 1, height: 10 }
+{ name: 'bob', is_active: false, id: 2, height: 5.7 }
+{ name: 'jane', is_active: true, id: 3, height: 2.4 } */
+
+// You can also define a nested filter with $and and $or.
+advancedFilter = {
+ $and: [{ $or: [{ id: 1 }, { id: 2 }] }, { height: { $gte: 5.0 } }],
+};
+console.log(`Filter: ${JSON.stringify(advancedFilter)}`);
+printFilterResult(
+ await vectorStore.similaritySearch("just testing", 5, advancedFilter)
+);
+/* Filter: {"$and":[{"$or":[{"id":1},{"id":2}]},{"height":{"$gte":5.0}}]}
+{ name: 'adam', is_active: true, id: 1, height: 10 }
+{ name: 'bob', is_active: false, id: 2, height: 5.7 } */
+
+// Disconnect from SAP HANA aft er the operations
+client.disconnect();
diff --git a/examples/src/indexes/vector_stores/hana_vector/createHnswIndex.ts b/examples/src/indexes/vector_stores/hana_vector/createHnswIndex.ts
new file mode 100644
index 000000000000..206ef9a402cf
--- /dev/null
+++ b/examples/src/indexes/vector_stores/hana_vector/createHnswIndex.ts
@@ -0,0 +1,98 @@
+import hanaClient from "hdb";
+import {
+ HanaDB,
+ HanaDBArgs,
+} from "@langchain/community/vectorstores/hanavector";
+import { OpenAIEmbeddings } from "@langchain/openai";
+
+// table "test_fromDocs" is already created with the previous example.
+// Now, we will use this existing table to create indexes and perform similarity search.
+
+const connectionParams = {
+ host: process.env.HANA_HOST,
+ port: process.env.HANA_PORT,
+ user: process.env.HANA_UID,
+ password: process.env.HANA_PWD,
+};
+const client = hanaClient.createClient(connectionParams);
+
+// Connect to SAP HANA
+await new Promise((resolve, reject) => {
+ client.connect((err: Error) => {
+ if (err) {
+ reject(err);
+ } else {
+ console.log("Connected to SAP HANA successfully.");
+ resolve();
+ }
+ });
+});
+
+// Initialize embeddings
+const embeddings = new OpenAIEmbeddings();
+
+// First instance using the existing table "test_fromDocs" (default: Cosine similarity)
+const argsCosine: HanaDBArgs = {
+ connection: client,
+ tableName: "test_fromDocs",
+};
+
+// Second instance using the existing table "test_fromDocs" but with L2 Euclidean distance
+const argsL2: HanaDBArgs = {
+ connection: client,
+ tableName: "test_fromDocs",
+ distanceStrategy: "euclidean", // Use Euclidean distance for this instance
+};
+
+// Initialize both HanaDB instances
+const vectorStoreCosine = new HanaDB(embeddings, argsCosine);
+const vectorStoreL2 = new HanaDB(embeddings, argsL2);
+
+// Create HNSW index with Cosine similarity (default)
+await vectorStoreCosine.createHnswIndex({
+ indexName: "hnsw_cosine_index",
+ efSearch: 400,
+ m: 50,
+ efConstruction: 150,
+});
+
+// Create HNSW index with Euclidean (L2) distance
+await vectorStoreL2.createHnswIndex({
+ indexName: "hnsw_l2_index",
+ efSearch: 400,
+ m: 50,
+ efConstruction: 150,
+});
+
+// Query text for similarity search
+const query = "What did the president say about Ketanji Brown Jackson";
+
+// Perform similarity search using the default Cosine index
+const docsCosine = await vectorStoreCosine.similaritySearch(query, 2);
+console.log("Cosine Similarity Results:");
+docsCosine.forEach((doc) => {
+ console.log("-".repeat(80));
+ console.log(doc.pageContent);
+});
+/*
+Cosine Similarity Results:
+----------------------------------------------------------------------
+One of the most serious constitutional ...
+
+And I did that 4 days ago, when I ...
+----------------------------------------------------------------------
+As I said last year, especially ...
+
+While it often appears that we never agree, that isn’t true...
+*/
+// Perform similarity search using Euclidean distance (L2 index)
+const docsL2 = await vectorStoreL2.similaritySearch(query, 2);
+console.log("Euclidean (L2) Distance Results:");
+docsL2.forEach((doc) => {
+ console.log("-".repeat(80));
+ console.log(doc.pageContent);
+});
+// The L2 distance results should be the same as cosine search results.
+
+// Disconnect from SAP HANA after the operations
+client.disconnect();
diff --git a/examples/src/memory/azure_cosmosdb_nosql.ts b/examples/src/memory/azure_cosmosdb_nosql.ts
index 2f3cddf4460f..415a64b91f94 100644
--- a/examples/src/memory/azure_cosmosdb_nosql.ts
+++ b/examples/src/memory/azure_cosmosdb_nosql.ts
@@ -44,7 +44,7 @@ const res1 = await chainWithHistory.invoke(
);
console.log({ res1 });
/*
-{ res1: 'Hi Jim! How can I assist you today?' }
+ { res1: 'Hi Jim! How can I assist you today?' }
*/
const res2 = await chainWithHistory.invoke(
@@ -52,7 +52,23 @@ const res2 = await chainWithHistory.invoke(
{ configurable: { sessionId: "langchain-test-session" } }
);
console.log({ res2 });
-
/*
{ res2: { response: 'You said your name was Jim.' }
- */
+ */
+
+// Give this session a title
+const chatHistory = (await chainWithHistory.getMessageHistory(
+ "langchain-test-session"
+)) as AzureCosmsosDBNoSQLChatMessageHistory;
+
+await chatHistory.setContext({ title: "Introducing Jim" });
+
+// List all session for the user
+const sessions = await chatHistory.getAllSessions();
+
+console.log(sessions);
+/*
+ [
+ { sessionId: 'langchain-test-session', context: { title: "Introducing Jim" } }
+ ]
+ */
diff --git a/examples/src/memory/file.ts b/examples/src/memory/file.ts
new file mode 100644
index 000000000000..5728ec3af26d
--- /dev/null
+++ b/examples/src/memory/file.ts
@@ -0,0 +1,71 @@
+import { ChatOpenAI } from "@langchain/openai";
+import { FileSystemChatMessageHistory } from "@langchain/community/stores/message/file_system";
+import { RunnableWithMessageHistory } from "@langchain/core/runnables";
+import { StringOutputParser } from "@langchain/core/output_parsers";
+import {
+ ChatPromptTemplate,
+ MessagesPlaceholder,
+} from "@langchain/core/prompts";
+
+const model = new ChatOpenAI({
+ model: "gpt-3.5-turbo",
+ temperature: 0,
+});
+
+const prompt = ChatPromptTemplate.fromMessages([
+ [
+ "system",
+ "You are a helpful assistant. Answer all questions to the best of your ability.",
+ ],
+ new MessagesPlaceholder("chat_history"),
+ ["human", "{input}"],
+]);
+
+const chain = prompt.pipe(model).pipe(new StringOutputParser());
+
+const chainWithHistory = new RunnableWithMessageHistory({
+ runnable: chain,
+ inputMessagesKey: "input",
+ historyMessagesKey: "chat_history",
+ getMessageHistory: async (sessionId) => {
+ const chatHistory = new FileSystemChatMessageHistory({
+ sessionId,
+ userId: "user-id",
+ });
+ return chatHistory;
+ },
+});
+
+const res1 = await chainWithHistory.invoke(
+ { input: "Hi! I'm Jim." },
+ { configurable: { sessionId: "langchain-test-session" } }
+);
+console.log({ res1 });
+/*
+ { res1: 'Hi Jim! How can I assist you today?' }
+ */
+
+const res2 = await chainWithHistory.invoke(
+ { input: "What did I just say my name was?" },
+ { configurable: { sessionId: "langchain-test-session" } }
+);
+console.log({ res2 });
+/*
+ { res2: { response: 'You said your name was Jim.' }
+ */
+
+// Give this session a title
+const chatHistory = (await chainWithHistory.getMessageHistory(
+ "langchain-test-session"
+)) as FileSystemChatMessageHistory;
+
+await chatHistory.setContext({ title: "Introducing Jim" });
+
+// List all session for the user
+const sessions = await chatHistory.getAllSessions();
+console.log(sessions);
+/*
+ [
+ { sessionId: 'langchain-test-session', context: { title: "Introducing Jim" } }
+ ]
+ */
diff --git a/langchain-core/package.json b/langchain-core/package.json
index 15284c578396..a9f2bfc74d01 100644
--- a/langchain-core/package.json
+++ b/langchain-core/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/core",
- "version": "0.3.19",
+ "version": "0.3.22",
"description": "Core LangChain.js abstractions and schemas",
"type": "module",
"engines": {
@@ -37,7 +37,7 @@
"camelcase": "6",
"decamelize": "1.2.0",
"js-tiktoken": "^1.0.12",
- "langsmith": "^0.2.0",
+ "langsmith": "^0.2.8",
"mustache": "^4.2.0",
"p-queue": "^6.6.2",
"p-retry": "4",
diff --git a/langchain-core/src/runnables/base.ts b/langchain-core/src/runnables/base.ts
index 1cab402513ec..d2ba10b6f6bd 100644
--- a/langchain-core/src/runnables/base.ts
+++ b/langchain-core/src/runnables/base.ts
@@ -6,7 +6,11 @@ import {
type TraceableFunction,
isTraceableFunction,
} from "langsmith/singletons/traceable";
-import type { RunnableInterface, RunnableBatchOptions } from "./types.js";
+import type {
+ RunnableInterface,
+ RunnableBatchOptions,
+ RunnableConfig,
+} from "./types.js";
import { CallbackManagerForChainRun } from "../callbacks/manager.js";
import {
LogStreamCallbackHandler,
@@ -33,11 +37,11 @@ import {
import { raceWithSignal } from "../utils/signal.js";
import {
DEFAULT_RECURSION_LIMIT,
- RunnableConfig,
ensureConfig,
getCallbackManagerForConfig,
mergeConfigs,
patchConfig,
+ pickRunnableConfigKeys,
} from "./config.js";
import { AsyncCaller } from "../utils/async_caller.js";
import { Run } from "../tracers/base.js";
@@ -2529,7 +2533,7 @@ export class RunnableLambda<
recursionLimit: (config?.recursionLimit ?? DEFAULT_RECURSION_LIMIT) - 1,
});
void AsyncLocalStorageProviderSingleton.runWithConfig(
- childConfig,
+ pickRunnableConfigKeys(childConfig),
async () => {
try {
let output = await this.func(input, {
@@ -2627,7 +2631,7 @@ export class RunnableLambda<
const output = await new Promise(
(resolve, reject) => {
void AsyncLocalStorageProviderSingleton.runWithConfig(
- childConfig,
+ pickRunnableConfigKeys(childConfig),
async () => {
try {
const res = await this.func(finalChunk as RunInput, {
diff --git a/langchain-core/src/runnables/config.ts b/langchain-core/src/runnables/config.ts
index 8fa9a244ee3d..aae7164b5721 100644
--- a/langchain-core/src/runnables/config.ts
+++ b/langchain-core/src/runnables/config.ts
@@ -233,3 +233,21 @@ export function patchConfig(
}
return newConfig;
}
+
+// eslint-disable-next-line @typescript-eslint/no-explicit-any
+export function pickRunnableConfigKeys>(
+ config?: CallOptions
+): Partial | undefined {
+ return config
+ ? {
+ configurable: config.configurable,
+ recursionLimit: config.recursionLimit,
+ callbacks: config.callbacks,
+ tags: config.tags,
+ metadata: config.metadata,
+ maxConcurrency: config.maxConcurrency,
+ timeout: config.timeout,
+ signal: config.signal,
+ }
+ : undefined;
+}
diff --git a/langchain-core/src/runnables/index.ts b/langchain-core/src/runnables/index.ts
index 2a34d91c8164..7d78b1c5f75a 100644
--- a/langchain-core/src/runnables/index.ts
+++ b/langchain-core/src/runnables/index.ts
@@ -29,6 +29,7 @@ export {
patchConfig,
ensureConfig,
mergeConfigs,
+ pickRunnableConfigKeys,
} from "./config.js";
export { RunnablePassthrough } from "./passthrough.js";
export { type RouterInput, RouterRunnable } from "./router.js";
diff --git a/langchain-core/src/runnables/iter.ts b/langchain-core/src/runnables/iter.ts
index 52b7a61db06a..4d7ead6efa60 100644
--- a/langchain-core/src/runnables/iter.ts
+++ b/langchain-core/src/runnables/iter.ts
@@ -1,5 +1,6 @@
+import type { RunnableConfig } from "../runnables/types.js";
import { AsyncLocalStorageProviderSingleton } from "../singletons/index.js";
-import { RunnableConfig } from "./config.js";
+import { pickRunnableConfigKeys } from "./config.js";
export function isIterableIterator(
thing: unknown
@@ -36,7 +37,7 @@ export function* consumeIteratorInContext(
): IterableIterator {
while (true) {
const { value, done } = AsyncLocalStorageProviderSingleton.runWithConfig(
- context,
+ pickRunnableConfigKeys(context),
iter.next.bind(iter),
true
);
@@ -56,7 +57,7 @@ export async function* consumeAsyncIterableInContext(
while (true) {
const { value, done } =
await AsyncLocalStorageProviderSingleton.runWithConfig(
- context,
+ pickRunnableConfigKeys(context),
iterator.next.bind(iter),
true
);
diff --git a/langchain-core/src/runnables/tests/runnable_map.test.ts b/langchain-core/src/runnables/tests/runnable_map.test.ts
index ffb41ab4e527..d2aa90093f58 100644
--- a/langchain-core/src/runnables/tests/runnable_map.test.ts
+++ b/langchain-core/src/runnables/tests/runnable_map.test.ts
@@ -131,6 +131,10 @@ test("Should stream chunks from each step as they are produced", async () => {
const chunks = [];
for await (const chunk of stream) {
+ if (chunk.chat?.id !== undefined) {
+ chunk.chat.id = expect.any(String) as any;
+ chunk.chat.lc_kwargs.id = expect.any(String);
+ }
chunks.push(chunk);
}
diff --git a/langchain-core/src/runnables/types.ts b/langchain-core/src/runnables/types.ts
index f40d80ee3831..f06e94fa1254 100644
--- a/langchain-core/src/runnables/types.ts
+++ b/langchain-core/src/runnables/types.ts
@@ -1,7 +1,7 @@
import type { z } from "zod";
-import type { IterableReadableStreamInterface } from "../utils/stream.js";
import type { SerializableInterface } from "../load/serializable.js";
import type { BaseCallbackConfig } from "../callbacks/manager.js";
+import type { IterableReadableStreamInterface } from "../types/stream.js";
export type RunnableBatchOptions = {
/** @deprecated Pass in via the standard runnable config object instead */
diff --git a/langchain-core/src/singletons/tests/async_local_storage.test.ts b/langchain-core/src/singletons/tests/async_local_storage.test.ts
index 4cb5a2ea77f9..1be3b273ed06 100644
--- a/langchain-core/src/singletons/tests/async_local_storage.test.ts
+++ b/langchain-core/src/singletons/tests/async_local_storage.test.ts
@@ -155,7 +155,12 @@ test("Runnable streamEvents method with streaming nested in a RunnableLambda", a
const nestedLambdaWithOverriddenCallbacks = RunnableLambda.from(
async (_: string, config) => {
- expect(config?.callbacks?.handlers).toEqual([]);
+ expect(
+ config?.callbacks?.handlers.filter(
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ (handler: any) => handler.name !== "langchain_tracer"
+ )
+ ).toEqual([]);
}
);
await nestedLambdaWithOverriddenCallbacks.invoke(input, {
diff --git a/langchain-core/src/singletons/tracer.ts b/langchain-core/src/singletons/tracer.ts
index 89696240705d..1a51a43aac91 100644
--- a/langchain-core/src/singletons/tracer.ts
+++ b/langchain-core/src/singletons/tracer.ts
@@ -16,3 +16,7 @@ export const getDefaultLangChainClientSingleton = () => {
}
return client;
};
+
+export const setDefaultLangChainClientSingleton = (newClient: Client) => {
+ client = newClient;
+};
diff --git a/langchain-core/src/tools/index.ts b/langchain-core/src/tools/index.ts
index 348e85103904..8ce02d28c935 100644
--- a/langchain-core/src/tools/index.ts
+++ b/langchain-core/src/tools/index.ts
@@ -12,6 +12,7 @@ import {
import {
ensureConfig,
patchConfig,
+ pickRunnableConfigKeys,
type RunnableConfig,
} from "../runnables/config.js";
import type { RunnableFunc, RunnableInterface } from "../runnables/base.js";
@@ -594,7 +595,7 @@ export function tool<
callbacks: runManager?.getChild(),
});
void AsyncLocalStorageProviderSingleton.runWithConfig(
- childConfig,
+ pickRunnableConfigKeys(childConfig),
async () => {
try {
// TS doesn't restrict the type here based on the guard above
@@ -625,7 +626,7 @@ export function tool<
callbacks: runManager?.getChild(),
});
void AsyncLocalStorageProviderSingleton.runWithConfig(
- childConfig,
+ pickRunnableConfigKeys(childConfig),
async () => {
try {
// TS doesn't restrict the type here based on the guard above
diff --git a/langchain-core/src/tracers/tests/langsmith_interop.test.ts b/langchain-core/src/tracers/tests/langsmith_interop.test.ts
index 11d654fca129..48429e1d5f09 100644
--- a/langchain-core/src/tracers/tests/langsmith_interop.test.ts
+++ b/langchain-core/src/tracers/tests/langsmith_interop.test.ts
@@ -11,14 +11,20 @@ import {
expect,
} from "@jest/globals";
import { traceable } from "langsmith/traceable";
+import { Client } from "langsmith";
import { RunnableLambda } from "../../runnables/base.js";
import { BaseMessage, HumanMessage } from "../../messages/index.js";
+import { setDefaultLangChainClientSingleton } from "../../singletons/tracer.js";
let fetchMock: any;
const originalTracingEnvValue = process.env.LANGCHAIN_TRACING_V2;
+const client = new Client({
+ autoBatchTracing: false,
+});
+
beforeEach(() => {
fetchMock = jest.spyOn(global, "fetch").mockImplementation(() =>
Promise.resolve({
@@ -29,6 +35,7 @@ beforeEach(() => {
},
} as any)
);
+ setDefaultLangChainClientSingleton(client);
process.env.LANGCHAIN_TRACING_V2 = "true";
});
@@ -50,7 +57,7 @@ test.each(["true", "false"])(
await new Promise((resolve) => setTimeout(resolve, 300));
return msg.content + name;
},
- { name: "aiGreet", tracingEnabled: true }
+ { name: "aiGreet", tracingEnabled: true, client }
);
const root = RunnableLambda.from(async (messages: BaseMessage[]) => {
@@ -197,7 +204,7 @@ test.each(["true", "false"])(
expect(getContextVariable("foo")).toEqual("baz");
return msg.content + name;
},
- { name: "aiGreet", tracingEnabled: true }
+ { name: "aiGreet", tracingEnabled: true, client }
);
const root = RunnableLambda.from(async (messages: BaseMessage[]) => {
@@ -485,7 +492,7 @@ test.each(["true", "false"])(
const contents = await nested.invoke([msg]);
return contents[0] + name;
},
- { name: "aiGreet", tracingEnabled: true }
+ { name: "aiGreet", tracingEnabled: true, client }
);
await aiGreet(new HumanMessage({ content: "Hello!" }), "mitochondria");
@@ -632,7 +639,7 @@ test.each(["true", "false"])(
expect(getContextVariable("foo")).toEqual("bar");
return contents[0] + name;
},
- { name: "aiGreet", tracingEnabled: true }
+ { name: "aiGreet", tracingEnabled: true, client }
);
await aiGreet(new HumanMessage({ content: "Hello!" }), "mitochondria");
@@ -781,7 +788,7 @@ test.each(["true", "false"])(
yield letter;
}
},
- { name: "aiGreet", tracingEnabled: true }
+ { name: "aiGreet", tracingEnabled: true, client }
);
for await (const _ of aiGreet(
diff --git a/langchain-core/src/tracers/tracer_langchain.ts b/langchain-core/src/tracers/tracer_langchain.ts
index e5719125df51..8a58a8e8b119 100644
--- a/langchain-core/src/tracers/tracer_langchain.ts
+++ b/langchain-core/src/tracers/tracer_langchain.ts
@@ -1,4 +1,4 @@
-import { Client } from "langsmith";
+import type { Client, LangSmithTracingClientInterface } from "langsmith";
import { RunTree } from "langsmith/run_trees";
import { getCurrentRunTree } from "langsmith/singletons/traceable";
@@ -36,7 +36,7 @@ export interface RunUpdate extends BaseRunUpdate {
export interface LangChainTracerFields extends BaseCallbackHandlerInput {
exampleId?: string;
projectName?: string;
- client?: Client;
+ client?: LangSmithTracingClientInterface;
}
export class LangChainTracer
@@ -49,7 +49,7 @@ export class LangChainTracer
exampleId?: string;
- client: Client;
+ client: LangSmithTracingClientInterface;
constructor(fields: LangChainTracerFields = {}) {
super(fields);
@@ -156,7 +156,7 @@ export class LangChainTracer
parent_run: undefined,
// inherited properties
- client: this.client,
+ client: this.client as Client,
project_name: this.projectName,
reference_example_id: this.exampleId,
tracingEnabled: true,
diff --git a/langchain-core/src/types/stream.ts b/langchain-core/src/types/stream.ts
new file mode 100644
index 000000000000..ae03b69b78bb
--- /dev/null
+++ b/langchain-core/src/types/stream.ts
@@ -0,0 +1,5 @@
+// Make this a type to override ReadableStream's async iterator type in case
+// the popular web-streams-polyfill is imported - the supplied types
+// in that case don't quite match.
+export type IterableReadableStreamInterface = ReadableStream &
+ AsyncIterable;
diff --git a/langchain-core/src/utils/env.ts b/langchain-core/src/utils/env.ts
index 15639cb471a3..3b8d5153847a 100644
--- a/langchain-core/src/utils/env.ts
+++ b/langchain-core/src/utils/env.ts
@@ -5,6 +5,9 @@ declare global {
version: {
deno: string;
};
+ env: {
+ get: (name: string) => string | undefined;
+ };
}
| undefined;
}
@@ -78,10 +81,14 @@ export function getEnvironmentVariable(name: string): string | undefined {
// Certain Deno setups will throw an error if you try to access environment variables
// https://github.com/langchain-ai/langchainjs/issues/1412
try {
- return typeof process !== "undefined"
- ? // eslint-disable-next-line no-process-env
- process.env?.[name]
- : undefined;
+ if (typeof process !== "undefined") {
+ // eslint-disable-next-line no-process-env
+ return process.env?.[name];
+ } else if (isDeno()) {
+ return Deno?.env.get(name);
+ } else {
+ return undefined;
+ }
} catch (e) {
return undefined;
}
diff --git a/langchain-core/src/utils/stream.ts b/langchain-core/src/utils/stream.ts
index aa9db0604637..cd3e592806be 100644
--- a/langchain-core/src/utils/stream.ts
+++ b/langchain-core/src/utils/stream.ts
@@ -1,11 +1,11 @@
+import { pickRunnableConfigKeys } from "../runnables/config.js";
import { AsyncLocalStorageProviderSingleton } from "../singletons/index.js";
+import type { IterableReadableStreamInterface } from "../types/stream.js";
import { raceWithSignal } from "./signal.js";
-// Make this a type to override ReadableStream's async iterator type in case
-// the popular web-streams-polyfill is imported - the supplied types
-// in that case don't quite match.
-export type IterableReadableStreamInterface = ReadableStream &
- AsyncIterable;
+// Re-exported for backwards compatibility
+// Do NOT import this type from this file inside the project. Instead, always import from `types/stream.js`
+export type { IterableReadableStreamInterface };
/*
* Support async iterator syntax for ReadableStreams in all environments.
@@ -215,7 +215,9 @@ export class AsyncGeneratorWithSetup<
// to each generator is available.
this.setup = new Promise((resolve, reject) => {
void AsyncLocalStorageProviderSingleton.runWithConfig(
- params.config,
+ pickRunnableConfigKeys(
+ params.config as Record | undefined
+ ),
async () => {
this.firstResult = params.generator.next();
if (params.startSetup) {
@@ -238,7 +240,9 @@ export class AsyncGeneratorWithSetup<
}
return AsyncLocalStorageProviderSingleton.runWithConfig(
- this.config,
+ pickRunnableConfigKeys(
+ this.config as Record | undefined
+ ),
this.signal
? async () => {
return raceWithSignal(this.generator.next(...args), this.signal);
diff --git a/langchain/package.json b/langchain/package.json
index 24c44241b0f5..9a31b79868df 100644
--- a/langchain/package.json
+++ b/langchain/package.json
@@ -520,7 +520,7 @@
"js-tiktoken": "^1.0.12",
"js-yaml": "^4.1.0",
"jsonpointer": "^5.0.1",
- "langsmith": "^0.2.0",
+ "langsmith": "^0.2.8",
"openapi-types": "^12.1.3",
"p-retry": "4",
"uuid": "^10.0.0",
diff --git a/libs/langchain-anthropic/package.json b/libs/langchain-anthropic/package.json
index 2f62e77b6a78..9148d6e61c8a 100644
--- a/libs/langchain-anthropic/package.json
+++ b/libs/langchain-anthropic/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/anthropic",
- "version": "0.3.8",
+ "version": "0.3.9",
"description": "Anthropic integrations for LangChain.js",
"type": "module",
"engines": {
diff --git a/libs/langchain-anthropic/src/chat_models.ts b/libs/langchain-anthropic/src/chat_models.ts
index f474324b37cd..480de0a7c57e 100644
--- a/libs/langchain-anthropic/src/chat_models.ts
+++ b/libs/langchain-anthropic/src/chat_models.ts
@@ -28,7 +28,7 @@ import { z } from "zod";
import type {
MessageCreateParams,
Tool as AnthropicTool,
-} from "@anthropic-ai/sdk/resources/index.mjs";
+} from "@anthropic-ai/sdk/resources/messages";
import { isLangChainTool } from "@langchain/core/utils/function_calling";
import { AnthropicToolsOutputParser } from "./output_parsers.js";
diff --git a/libs/langchain-anthropic/src/types.ts b/libs/langchain-anthropic/src/types.ts
index ff84cec243eb..fb289b84b019 100644
--- a/libs/langchain-anthropic/src/types.ts
+++ b/libs/langchain-anthropic/src/types.ts
@@ -1,5 +1,5 @@
import Anthropic from "@anthropic-ai/sdk";
-import type { Tool as AnthropicTool } from "@anthropic-ai/sdk/resources/index.mjs";
+import type { Tool as AnthropicTool } from "@anthropic-ai/sdk/resources/messages";
import { BindToolsInput } from "@langchain/core/language_models/chat_models";
export type AnthropicToolResponse = {
diff --git a/libs/langchain-azure-cosmosdb/package.json b/libs/langchain-azure-cosmosdb/package.json
index 7cd8a1cd4101..5389e1befa4b 100644
--- a/libs/langchain-azure-cosmosdb/package.json
+++ b/libs/langchain-azure-cosmosdb/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/azure-cosmosdb",
- "version": "0.2.3",
+ "version": "0.2.4",
"description": "Azure CosmosDB integration for LangChain.js",
"type": "module",
"engines": {
@@ -32,9 +32,9 @@
"author": "LangChain",
"license": "MIT",
"dependencies": {
- "@azure/cosmos": "4.0.1-beta.3",
- "@azure/identity": "^4.2.0",
- "mongodb": "^6.8.0"
+ "@azure/cosmos": "^4.2.0",
+ "@azure/identity": "^4.5.0",
+ "mongodb": "^6.10.0"
},
"peerDependencies": {
"@langchain/core": ">=0.2.21 <0.4.0"
diff --git a/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_nosql.ts b/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_nosql.ts
index 618d43ab64c9..3e4acb259c77 100644
--- a/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_nosql.ts
+++ b/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_nosql.ts
@@ -15,7 +15,9 @@ import {
IndexingPolicy,
SqlParameter,
SqlQuerySpec,
+ VectorEmbedding,
VectorEmbeddingPolicy,
+ VectorIndex,
} from "@azure/cosmos";
import { DefaultAzureCredential, TokenCredential } from "@azure/identity";
@@ -186,7 +188,7 @@ export class AzureCosmosDBNoSQLVectorStore extends VectorStore {
distanceFunction: "cosine",
// Will be determined automatically during initialization
dimensions: 0,
- },
+ } as VectorEmbedding,
];
}
@@ -195,7 +197,7 @@ export class AzureCosmosDBNoSQLVectorStore extends VectorStore {
{
path: "/vector",
type: "quantizedFlat",
- },
+ } as VectorIndex,
];
}
diff --git a/libs/langchain-azure-cosmosdb/src/chat_histories.ts b/libs/langchain-azure-cosmosdb/src/chat_histories.ts
index 033acc521334..24e98fd3b074 100644
--- a/libs/langchain-azure-cosmosdb/src/chat_histories.ts
+++ b/libs/langchain-azure-cosmosdb/src/chat_histories.ts
@@ -1,4 +1,9 @@
-import { Container, CosmosClient, CosmosClientOptions } from "@azure/cosmos";
+import {
+ Container,
+ CosmosClient,
+ CosmosClientOptions,
+ ErrorResponse,
+} from "@azure/cosmos";
import { DefaultAzureCredential, TokenCredential } from "@azure/identity";
import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
import {
@@ -12,6 +17,14 @@ const USER_AGENT_SUFFIX = "langchainjs-cdbnosql-chathistory-javascript";
const DEFAULT_DATABASE_NAME = "chatHistoryDB";
const DEFAULT_CONTAINER_NAME = "chatHistoryContainer";
+/**
+ * Lightweight type for listing chat sessions.
+ */
+export type ChatSession = {
+ id: string;
+ context: Record;
+};
+
/**
* Type for the input to the `AzureCosmosDBNoSQLChatMessageHistory` constructor.
*/
@@ -68,7 +81,6 @@ export interface AzureCosmosDBNoSQLChatMessageHistoryInput {
* );
* ```
*/
-
export class AzureCosmsosDBNoSQLChatMessageHistory extends BaseListChatMessageHistory {
lc_namespace = ["langchain", "stores", "message", "azurecosmosdb"];
@@ -90,6 +102,8 @@ export class AzureCosmsosDBNoSQLChatMessageHistory extends BaseListChatMessageHi
private initPromise?: Promise;
+ private context: Record = {};
+
constructor(chatHistoryInput: AzureCosmosDBNoSQLChatMessageHistoryInput) {
super();
@@ -175,9 +189,11 @@ export class AzureCosmsosDBNoSQLChatMessageHistory extends BaseListChatMessageHi
this.messageList = await this.getMessages();
this.messageList.push(message);
const messages = mapChatMessagesToStoredMessages(this.messageList);
+ const context = await this.getContext();
await this.container.items.upsert({
id: this.sessionId,
userId: this.userId,
+ context,
messages,
});
}
@@ -188,17 +204,53 @@ export class AzureCosmsosDBNoSQLChatMessageHistory extends BaseListChatMessageHi
await this.container.item(this.sessionId, this.userId).delete();
}
- async clearAllSessionsForUser(userId: string) {
+ async clearAllSessions() {
await this.initializeContainer();
const query = {
query: "SELECT c.id FROM c WHERE c.userId = @userId",
- parameters: [{ name: "@userId", value: userId }],
+ parameters: [{ name: "@userId", value: this.userId }],
};
const { resources: userSessions } = await this.container.items
.query(query)
.fetchAll();
for (const userSession of userSessions) {
- await this.container.item(userSession.id, userId).delete();
+ await this.container.item(userSession.id, this.userId).delete();
+ }
+ }
+
+ async getAllSessions(): Promise {
+ await this.initializeContainer();
+ const query = {
+ query: "SELECT c.id, c.context FROM c WHERE c.userId = @userId",
+ parameters: [{ name: "@userId", value: this.userId }],
+ };
+ const { resources: userSessions } = await this.container.items
+ .query(query)
+ .fetchAll();
+ return userSessions ?? [];
+ }
+
+ async getContext(): Promise> {
+ const document = await this.container
+ .item(this.sessionId, this.userId)
+ .read();
+ this.context = document.resource?.context || this.context;
+ return this.context;
+ }
+
+ async setContext(context: Record): Promise {
+ await this.initializeContainer();
+ this.context = context || {};
+ try {
+ await this.container
+ .item(this.sessionId, this.userId)
+ .patch([{ op: "replace", path: "/context", value: this.context }]);
+ } catch (_error: unknown) {
+ const error = _error as ErrorResponse;
+ // If document does not exist yet, context will be set when adding the first message
+ if (error?.code !== 404) {
+ throw error;
+ }
}
}
}
diff --git a/libs/langchain-azure-cosmosdb/src/tests/caches.int.test.ts b/libs/langchain-azure-cosmosdb/src/tests/caches.int.test.ts
index d6b66ddaac05..c7acb92f7c86 100644
--- a/libs/langchain-azure-cosmosdb/src/tests/caches.int.test.ts
+++ b/libs/langchain-azure-cosmosdb/src/tests/caches.int.test.ts
@@ -4,6 +4,7 @@
import {
CosmosClient,
IndexingMode,
+ VectorEmbedding,
VectorEmbeddingPolicy,
} from "@azure/cosmos";
import { DefaultAzureCredential } from "@azure/identity";
@@ -33,7 +34,7 @@ function vectorEmbeddingPolicy(
dataType: "float32",
distanceFunction,
dimensions: dimension,
- },
+ } as VectorEmbedding,
],
};
}
diff --git a/libs/langchain-azure-cosmosdb/src/tests/chat_histories.int.test.ts b/libs/langchain-azure-cosmosdb/src/tests/chat_histories.int.test.ts
index 81f2070ceb81..76da66d7f805 100644
--- a/libs/langchain-azure-cosmosdb/src/tests/chat_histories.int.test.ts
+++ b/libs/langchain-azure-cosmosdb/src/tests/chat_histories.int.test.ts
@@ -159,10 +159,41 @@ test("Test clear all sessions for a user", async () => {
const result2 = await chatHistory1.getMessages();
expect(result2).toEqual(expectedMessages);
- await chatHistory1.clearAllSessionsForUser("user1");
+ await chatHistory1.clearAllSessions();
const deletedResult1 = await chatHistory1.getMessages();
const deletedResult2 = await chatHistory2.getMessages();
expect(deletedResult1).toStrictEqual([]);
expect(deletedResult2).toStrictEqual([]);
});
+
+test("Test set context and get all sessions for a user", async () => {
+ const session1 = {
+ userId: "user1",
+ databaseName: DATABASE_NAME,
+ containerName: CONTAINER_NAME,
+ sessionId: new ObjectId().toString(),
+ };
+ const context1 = { title: "Best vocalist" };
+ const chatHistory1 = new AzureCosmsosDBNoSQLChatMessageHistory(session1);
+
+ await chatHistory1.setContext(context1);
+ await chatHistory1.addUserMessage("Who is the best vocalist?");
+ await chatHistory1.addAIMessage("Ozzy Osbourne");
+
+ const chatHistory2 = new AzureCosmsosDBNoSQLChatMessageHistory({
+ ...session1,
+ sessionId: new ObjectId().toString(),
+ });
+ const context2 = { title: "Best guitarist" };
+
+ await chatHistory2.addUserMessage("Who is the best guitarist?");
+ await chatHistory2.addAIMessage("Jimi Hendrix");
+ await chatHistory2.setContext(context2);
+
+ const sessions = await chatHistory1.getAllSessions();
+
+ expect(sessions.length).toBe(2);
+ expect(sessions[0].context).toEqual(context1);
+ expect(sessions[1].context).toEqual(context2);
+});
diff --git a/libs/langchain-community/.gitignore b/libs/langchain-community/.gitignore
index e6ae5fa54a4f..5064c1f14c79 100644
--- a/libs/langchain-community/.gitignore
+++ b/libs/langchain-community/.gitignore
@@ -558,6 +558,10 @@ chat_models/moonshot.cjs
chat_models/moonshot.js
chat_models/moonshot.d.ts
chat_models/moonshot.d.cts
+chat_models/novita.cjs
+chat_models/novita.js
+chat_models/novita.d.ts
+chat_models/novita.d.cts
chat_models/ollama.cjs
chat_models/ollama.js
chat_models/ollama.d.ts
@@ -770,6 +774,10 @@ stores/message/firestore.cjs
stores/message/firestore.js
stores/message/firestore.d.ts
stores/message/firestore.d.cts
+stores/message/file_system.cjs
+stores/message/file_system.js
+stores/message/file_system.d.ts
+stores/message/file_system.d.cts
stores/message/in_memory.cjs
stores/message/in_memory.js
stores/message/in_memory.d.ts
diff --git a/libs/langchain-community/langchain.config.js b/libs/langchain-community/langchain.config.js
index 4a402c6941e8..b0207b8612ab 100644
--- a/libs/langchain-community/langchain.config.js
+++ b/libs/langchain-community/langchain.config.js
@@ -178,6 +178,7 @@ export const config = {
"chat_models/llama_cpp": "chat_models/llama_cpp",
"chat_models/minimax": "chat_models/minimax",
"chat_models/moonshot": "chat_models/moonshot",
+ "chat_models/novita": "chat_models/novita",
"chat_models/ollama": "chat_models/ollama",
"chat_models/portkey": "chat_models/portkey",
"chat_models/premai": "chat_models/premai",
@@ -242,6 +243,7 @@ export const config = {
"stores/message/convex": "stores/message/convex",
"stores/message/dynamodb": "stores/message/dynamodb",
"stores/message/firestore": "stores/message/firestore",
+ "stores/message/file_system": "stores/message/file_system",
"stores/message/in_memory": "stores/message/in_memory",
"stores/message/ipfs_datastore": "stores/message/ipfs_datastore",
"stores/message/ioredis": "stores/message/ioredis",
diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json
index 7b826ad1e106..7b441119bdbd 100644
--- a/libs/langchain-community/package.json
+++ b/libs/langchain-community/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/community",
- "version": "0.3.16",
+ "version": "0.3.17",
"description": "Third-party integrations for LangChain.js",
"type": "module",
"engines": {
@@ -41,7 +41,7 @@
"flat": "^5.0.2",
"js-yaml": "^4.1.0",
"langchain": ">=0.2.3 <0.3.0 || >=0.3.4 <0.4.0",
- "langsmith": "^0.2.0",
+ "langsmith": "^0.2.8",
"uuid": "^10.0.0",
"zod": "^3.22.3",
"zod-to-json-schema": "^3.22.5"
@@ -68,7 +68,7 @@
"@cloudflare/workers-types": "^4.20230922.0",
"@datastax/astra-db-ts": "^1.0.1",
"@elastic/elasticsearch": "^8.4.0",
- "@faker-js/faker": "^7.6.0",
+ "@faker-js/faker": "8.4.1",
"@getmetal/metal-sdk": "^4.0.0",
"@getzep/zep-cloud": "^1.0.6",
"@getzep/zep-js": "^0.9.0",
@@ -1972,6 +1972,15 @@
"import": "./chat_models/moonshot.js",
"require": "./chat_models/moonshot.cjs"
},
+ "./chat_models/novita": {
+ "types": {
+ "import": "./chat_models/novita.d.ts",
+ "require": "./chat_models/novita.d.cts",
+ "default": "./chat_models/novita.d.ts"
+ },
+ "import": "./chat_models/novita.js",
+ "require": "./chat_models/novita.cjs"
+ },
"./chat_models/ollama": {
"types": {
"import": "./chat_models/ollama.d.ts",
@@ -2449,6 +2458,15 @@
"import": "./stores/message/firestore.js",
"require": "./stores/message/firestore.cjs"
},
+ "./stores/message/file_system": {
+ "types": {
+ "import": "./stores/message/file_system.d.ts",
+ "require": "./stores/message/file_system.d.cts",
+ "default": "./stores/message/file_system.d.ts"
+ },
+ "import": "./stores/message/file_system.js",
+ "require": "./stores/message/file_system.cjs"
+ },
"./stores/message/in_memory": {
"types": {
"import": "./stores/message/in_memory.d.ts",
@@ -3661,6 +3679,10 @@
"chat_models/moonshot.js",
"chat_models/moonshot.d.ts",
"chat_models/moonshot.d.cts",
+ "chat_models/novita.cjs",
+ "chat_models/novita.js",
+ "chat_models/novita.d.ts",
+ "chat_models/novita.d.cts",
"chat_models/ollama.cjs",
"chat_models/ollama.js",
"chat_models/ollama.d.ts",
@@ -3873,6 +3895,10 @@
"stores/message/firestore.js",
"stores/message/firestore.d.ts",
"stores/message/firestore.d.cts",
+ "stores/message/file_system.cjs",
+ "stores/message/file_system.js",
+ "stores/message/file_system.d.ts",
+ "stores/message/file_system.d.cts",
"stores/message/in_memory.cjs",
"stores/message/in_memory.js",
"stores/message/in_memory.d.ts",
diff --git a/libs/langchain-community/src/chat_models/ibm.ts b/libs/langchain-community/src/chat_models/ibm.ts
index 399e6d4d2909..8c329c70a9af 100644
--- a/libs/langchain-community/src/chat_models/ibm.ts
+++ b/libs/langchain-community/src/chat_models/ibm.ts
@@ -17,11 +17,11 @@ import {
BaseLanguageModelInput,
FunctionDefinition,
StructuredOutputMethodOptions,
- type BaseLanguageModelCallOptions,
} from "@langchain/core/language_models/base";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import {
BaseChatModel,
+ BaseChatModelCallOptions,
BindToolsInput,
LangSmithParams,
type BaseChatModelParams,
@@ -41,7 +41,6 @@ import {
TextChatResultChoice,
TextChatResultMessage,
TextChatToolCall,
- TextChatToolChoiceTool,
TextChatUsage,
} from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js";
import { WatsonXAI } from "@ibm-cloud/watsonx-ai";
@@ -80,14 +79,14 @@ export interface WatsonxDeltaStream {
}
export interface WatsonxCallParams
- extends Partial> {
+ extends Partial> {
maxRetries?: number;
}
export interface WatsonxCallOptionsChat
- extends Omit,
+ extends Omit,
WatsonxCallParams {
promptIndex?: number;
- tool_choice?: TextChatToolChoiceTool;
+ tool_choice?: TextChatParameterTools | string | "auto" | "any";
}
type ChatWatsonxToolType = BindToolsInput | TextChatParameterTools;
@@ -309,6 +308,29 @@ function _convertDeltaToMessageChunk(
return null;
}
+function _convertToolChoiceToWatsonxToolChoice(
+ toolChoice: TextChatParameterTools | string | "auto" | "any"
+) {
+ if (typeof toolChoice === "string") {
+ if (toolChoice === "any" || toolChoice === "required") {
+ return { toolChoiceOption: "required" };
+ } else if (toolChoice === "auto" || toolChoice === "none") {
+ return { toolChoiceOption: toolChoice };
+ } else {
+ return {
+ toolChoice: {
+ type: "function",
+ function: { name: toolChoice },
+ },
+ };
+ }
+ } else if ("type" in toolChoice) return { toolChoice };
+ else
+ throw new Error(
+ `Unrecognized tool_choice type. Expected string or TextChatParameterTools. Recieved ${toolChoice}`
+ );
+}
+
export class ChatWatsonx<
CallOptions extends WatsonxCallOptionsChat = WatsonxCallOptionsChat
>
@@ -459,7 +481,7 @@ export class ChatWatsonx<
}
invocationParams(options: this["ParsedCallOptions"]) {
- return {
+ const params = {
maxTokens: options.maxTokens ?? this.maxTokens,
temperature: options?.temperature ?? this.temperature,
timeLimit: options?.timeLimit ?? this.timeLimit,
@@ -472,10 +494,12 @@ export class ChatWatsonx<
tools: options.tools
? _convertToolToWatsonxTool(options.tools)
: undefined,
- toolChoice: options.tool_choice,
responseFormat: options.responseFormat,
- toolChoiceOption: options.toolChoiceOption,
};
+ const toolChoiceResult = options.tool_choice
+ ? _convertToolChoiceToWatsonxToolChoice(options.tool_choice)
+ : {};
+ return { ...params, ...toolChoiceResult };
}
override bindTools(
@@ -562,7 +586,7 @@ export class ChatWatsonx<
.map(([_, value]) => value);
return { generations, llmOutput: { tokenUsage } };
} else {
- const params: Omit = {
+ const params = {
...this.invocationParams(options),
...this.scopeId(),
};
@@ -576,7 +600,6 @@ export class ChatWatsonx<
messages: watsonxMessages,
});
const { result } = await this.completionWithRetry(callback, options);
-
const generations: ChatGeneration[] = [];
for (const part of result.choices) {
const generation: ChatGeneration = {
@@ -623,10 +646,13 @@ export class ChatWatsonx<
});
const stream = await this.completionWithRetry(callback, options);
let defaultRole;
+ let usage: TextChatUsage | undefined;
+ let currentCompletion = 0;
for await (const chunk of stream) {
if (options.signal?.aborted) {
throw new Error("AbortError");
}
+ if (chunk?.data?.usage) usage = chunk.data.usage;
const { data } = chunk;
const choice = data.choices[0] as TextChatResultChoice &
Record<"delta", TextChatResultMessage>;
@@ -638,7 +664,7 @@ export class ChatWatsonx<
if (!delta) {
continue;
}
-
+ currentCompletion = choice.index ?? 0;
const newTokenIndices = {
prompt: options.promptIndex ?? 0,
completion: choice.index ?? 0,
@@ -682,6 +708,26 @@ export class ChatWatsonx<
{ chunk: generationChunk }
);
}
+
+ const generationChunk = new ChatGenerationChunk({
+ message: new AIMessageChunk({
+ content: "",
+ response_metadata: {
+ usage,
+ },
+ usage_metadata: {
+ input_tokens: usage?.prompt_tokens ?? 0,
+ output_tokens: usage?.completion_tokens ?? 0,
+ total_tokens: usage?.total_tokens ?? 0,
+ },
+ }),
+ text: "",
+ generationInfo: {
+ prompt: options.promptIndex ?? 0,
+ completion: currentCompletion ?? 0,
+ },
+ });
+ yield generationChunk;
}
/** @ignore */
@@ -760,7 +806,7 @@ export class ChatWatsonx<
},
],
// Ideally that would be set to required but this is not supported yet
- toolChoice: {
+ tool_choice: {
type: "function",
function: {
name: functionName,
@@ -796,7 +842,7 @@ export class ChatWatsonx<
},
],
// Ideally that would be set to required but this is not supported yet
- toolChoice: {
+ tool_choice: {
type: "function",
function: {
name: functionName,
diff --git a/libs/langchain-community/src/chat_models/novita.ts b/libs/langchain-community/src/chat_models/novita.ts
new file mode 100644
index 000000000000..59726db84238
--- /dev/null
+++ b/libs/langchain-community/src/chat_models/novita.ts
@@ -0,0 +1,147 @@
+import type {
+ BaseChatModelParams,
+ LangSmithParams,
+} from "@langchain/core/language_models/chat_models";
+import {
+ type OpenAIClient,
+ type ChatOpenAICallOptions,
+ type OpenAIChatInput,
+ type OpenAICoreRequestOptions,
+ ChatOpenAI,
+} from "@langchain/openai";
+import { getEnvironmentVariable } from "@langchain/core/utils/env";
+
+type NovitaUnsupportedArgs =
+ | "frequencyPenalty"
+ | "presencePenalty"
+ | "logitBias"
+ | "functions";
+
+type NovitaUnsupportedCallOptions = "functions" | "function_call";
+
+export interface ChatNovitaCallOptions
+ extends Omit {
+ response_format: {
+ type: "json_object";
+ schema: Record;
+ };
+}
+
+export interface ChatNovitaInput
+ extends Omit,
+ BaseChatModelParams {
+ /**
+ * Novita API key
+ * @default process.env.NOVITA_API_KEY
+ */
+ novitaApiKey?: string;
+ /**
+ * API key alias
+ * @default process.env.NOVITA_API_KEY
+ */
+ apiKey?: string;
+}
+
+/**
+ * Novita chat model implementation
+ */
+export class ChatNovitaAI extends ChatOpenAI {
+ static lc_name() {
+ return "ChatNovita";
+ }
+
+ _llmType() {
+ return "novita";
+ }
+
+ get lc_secrets(): { [key: string]: string } | undefined {
+ return {
+ novitaApiKey: "NOVITA_API_KEY",
+ apiKey: "NOVITA_API_KEY",
+ };
+ }
+
+ lc_serializable = true;
+
+ constructor(
+ fields?: Partial<
+ Omit
+ > &
+ BaseChatModelParams & {
+ novitaApiKey?: string;
+ apiKey?: string;
+ }
+ ) {
+ const novitaApiKey =
+ fields?.apiKey ||
+ fields?.novitaApiKey ||
+ getEnvironmentVariable("NOVITA_API_KEY");
+
+ if (!novitaApiKey) {
+ throw new Error(
+ `Novita API key not found. Please set the NOVITA_API_KEY environment variable or provide the key into "novitaApiKey"`
+ );
+ }
+
+ super({
+ ...fields,
+ model: fields?.model || "gryphe/mythomax-l2-13b",
+ apiKey: novitaApiKey,
+ configuration: {
+ baseURL: "https://api.novita.ai/v3/openai/",
+ },
+ });
+ }
+
+ getLsParams(options: this["ParsedCallOptions"]): LangSmithParams {
+ const params = super.getLsParams(options);
+ params.ls_provider = "novita";
+ return params;
+ }
+
+ toJSON() {
+ const result = super.toJSON();
+
+ if (
+ "kwargs" in result &&
+ typeof result.kwargs === "object" &&
+ result.kwargs != null
+ ) {
+ delete result.kwargs.openai_api_key;
+ delete result.kwargs.configuration;
+ }
+
+ return result;
+ }
+
+ async completionWithRetry(
+ request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming,
+ options?: OpenAICoreRequestOptions
+ ): Promise>;
+
+ async completionWithRetry(
+ request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
+ options?: OpenAICoreRequestOptions
+ ): Promise;
+
+ async completionWithRetry(
+ request:
+ | OpenAIClient.Chat.ChatCompletionCreateParamsStreaming
+ | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
+ options?: OpenAICoreRequestOptions
+ ): Promise<
+ | AsyncIterable
+ | OpenAIClient.Chat.Completions.ChatCompletion
+ > {
+ delete request.frequency_penalty;
+ delete request.presence_penalty;
+ delete request.logit_bias;
+ delete request.functions;
+
+ if (request.stream === true) {
+ return super.completionWithRetry(request, options);
+ }
+
+ return super.completionWithRetry(request, options);
+ }
+}
diff --git a/libs/langchain-community/src/chat_models/tests/chatnovita.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatnovita.int.test.ts
new file mode 100644
index 000000000000..91a134d45c57
--- /dev/null
+++ b/libs/langchain-community/src/chat_models/tests/chatnovita.int.test.ts
@@ -0,0 +1,91 @@
+import { describe, test } from "@jest/globals";
+import { ChatMessage, HumanMessage } from "@langchain/core/messages";
+import {
+ PromptTemplate,
+ ChatPromptTemplate,
+ AIMessagePromptTemplate,
+ HumanMessagePromptTemplate,
+ SystemMessagePromptTemplate,
+} from "@langchain/core/prompts";
+import { ChatNovitaAI } from "../novita.js";
+
+describe("ChatNovitaAI", () => {
+ test("invoke", async () => {
+ const chat = new ChatNovitaAI();
+ const message = new HumanMessage("Hello!");
+ const res = await chat.invoke([message]);
+ expect(res.content.length).toBeGreaterThan(10);
+ });
+
+ test("generate", async () => {
+ const chat = new ChatNovitaAI();
+ const message = new HumanMessage("Hello!");
+ const res = await chat.generate([[message]]);
+ expect(res.generations[0][0].text.length).toBeGreaterThan(10);
+ });
+
+ test("custom messages", async () => {
+ const chat = new ChatNovitaAI();
+ const res = await chat.invoke([new ChatMessage("Hello!", "user")]);
+ expect(res.content.length).toBeGreaterThan(2);
+ });
+
+ test("chaining", async () => {
+ const chat = new ChatNovitaAI();
+ const prompt = ChatPromptTemplate.fromMessages([
+ [
+ "system",
+ "You are a helpful assistant that translates {input_language} to {output_language}.",
+ ],
+ ["human", "{input}"],
+ ]);
+
+ const chain = prompt.pipe(chat);
+ const response = await chain.invoke({
+ input_language: "English",
+ output_language: "German",
+ input: "I love programming.",
+ });
+
+ expect(response.content.length).toBeGreaterThan(10);
+ });
+
+ test("prompt templates", async () => {
+ const chat = new ChatNovitaAI();
+
+ // PaLM doesn't support translation yet
+ const systemPrompt = PromptTemplate.fromTemplate(
+ "You are a helpful assistant who must always respond like a {job}."
+ );
+
+ const chatPrompt = ChatPromptTemplate.fromMessages([
+ new SystemMessagePromptTemplate(systemPrompt),
+ HumanMessagePromptTemplate.fromTemplate("{text}"),
+ ]);
+
+ const responseA = await chat.generatePrompt([
+ await chatPrompt.formatPromptValue({
+ job: "pirate",
+ text: "What would be a good company name a company that makes colorful socks?",
+ }),
+ ]);
+ expect(responseA.generations[0][0].text.length).toBeGreaterThan(10);
+ });
+
+ test("longer chain of messages", async () => {
+ const chat = new ChatNovitaAI();
+
+ const chatPrompt = ChatPromptTemplate.fromMessages([
+ HumanMessagePromptTemplate.fromTemplate(`Hi, my name is Joe!`),
+ AIMessagePromptTemplate.fromTemplate(`Nice to meet you, Joe!`),
+ HumanMessagePromptTemplate.fromTemplate("{text}"),
+ ]);
+
+ const responseA = await chat.generatePrompt([
+ await chatPrompt.formatPromptValue({
+ text: "What did I just say my name was?",
+ }),
+ ]);
+ expect(responseA.generations[0][0].text.length).toBeGreaterThan(10);
+ });
+});
diff --git a/libs/langchain-community/src/chat_models/tests/ibm.int.test.ts b/libs/langchain-community/src/chat_models/tests/ibm.int.test.ts
index 2f1d118d92a4..be8d6615a402 100644
--- a/libs/langchain-community/src/chat_models/tests/ibm.int.test.ts
+++ b/libs/langchain-community/src/chat_models/tests/ibm.int.test.ts
@@ -150,7 +150,7 @@ describe("Tests for chat", () => {
controller.abort();
return res;
}).rejects.toThrow();
- }, 5000);
+ });
});
describe("Test ChatWatsonx invoke and generate with stream mode", () => {
@@ -357,7 +357,7 @@ describe("Tests for chat", () => {
controller.abort();
return res;
}).rejects.toThrow();
- }, 5000);
+ });
});
describe("Test ChatWatsonx stream", () => {
@@ -415,7 +415,7 @@ describe("Tests for chat", () => {
}
expect(hasEntered).toBe(true);
}).rejects.toThrow();
- }, 5000);
+ });
test("Token count and response equality", async () => {
let generation = "";
const service = new ChatWatsonx({
diff --git a/libs/langchain-community/src/chat_models/tests/ibm.standard.int.test.ts b/libs/langchain-community/src/chat_models/tests/ibm.standard.int.test.ts
index 545ed3c06fa9..68b967d972b7 100644
--- a/libs/langchain-community/src/chat_models/tests/ibm.standard.int.test.ts
+++ b/libs/langchain-community/src/chat_models/tests/ibm.standard.int.test.ts
@@ -26,7 +26,7 @@ class ChatWatsonxStandardIntegrationTests extends ChatModelIntegrationTests<
chatModelHasToolCalling: true,
chatModelHasStructuredOutput: true,
constructorArgs: {
- model: "mistralai/mistral-large",
+ model: "meta-llama/llama-3-1-70b-instruct",
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString",
projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString",
diff --git a/libs/langchain-community/src/document_compressors/ibm.ts b/libs/langchain-community/src/document_compressors/ibm.ts
index 348f60685480..026219cc8fa8 100644
--- a/libs/langchain-community/src/document_compressors/ibm.ts
+++ b/libs/langchain-community/src/document_compressors/ibm.ts
@@ -115,6 +115,9 @@ export class WatsonxRerank
...this.scopeId(),
inputs,
query,
+ parameters: {
+ truncate_input_tokens: this.truncateInputTokens,
+ },
})
);
const resultDocuments = result.results.map(({ index, score }) => {
diff --git a/libs/langchain-community/src/document_compressors/tests/ibm.int.test.ts b/libs/langchain-community/src/document_compressors/tests/ibm.int.test.ts
index e65ea9e1eff3..6994bcec7c1a 100644
--- a/libs/langchain-community/src/document_compressors/tests/ibm.int.test.ts
+++ b/libs/langchain-community/src/document_compressors/tests/ibm.int.test.ts
@@ -40,6 +40,25 @@ describe("Integration tests on WatsonxRerank", () => {
expect(typeof item.metadata.relevanceScore).toBe("number")
);
});
+
+ test("Basic call with truncation", async () => {
+ const instance = new WatsonxRerank({
+ model: "cross-encoder/ms-marco-minilm-l-12-v2",
+ serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
+ version: "2024-05-31",
+ projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString",
+ truncateInputTokens: 512,
+ });
+ const longerDocs: Document[] = docs.map((item) => ({
+ pageContent: item.pageContent.repeat(100),
+ metadata: {},
+ }));
+ const result = await instance.compressDocuments(longerDocs, query);
+ expect(result.length).toBe(docs.length);
+ result.forEach((item) =>
+ expect(typeof item.metadata.relevanceScore).toBe("number")
+ );
+ });
});
describe(".rerank() method", () => {
@@ -57,24 +76,42 @@ describe("Integration tests on WatsonxRerank", () => {
expect(item.input).toBeUndefined();
});
});
- });
- test("Basic call with options", async () => {
- const instance = new WatsonxRerank({
- model: "cross-encoder/ms-marco-minilm-l-12-v2",
- serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
- version: "2024-05-31",
- projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString",
- });
- const result = await instance.rerank(docs, query, {
- returnOptions: {
- topN: 3,
- inputs: true,
- },
+ test("Basic call with options", async () => {
+ const instance = new WatsonxRerank({
+ model: "cross-encoder/ms-marco-minilm-l-12-v2",
+ serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
+ version: "2024-05-31",
+ projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString",
+ });
+ const result = await instance.rerank(docs, query, {
+ returnOptions: {
+ topN: 3,
+ inputs: true,
+ },
+ });
+ expect(result.length).toBe(3);
+ result.forEach((item) => {
+ expect(typeof item.relevanceScore).toBe("number");
+ expect(item.input).toBeDefined();
+ });
});
- expect(result.length).toBe(3);
- result.forEach((item) => {
- expect(typeof item.relevanceScore).toBe("number");
- expect(item.input).toBeDefined();
+ test("Basic call with truncation", async () => {
+ const instance = new WatsonxRerank({
+ model: "cross-encoder/ms-marco-minilm-l-12-v2",
+ serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
+ version: "2024-05-31",
+ projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString",
+ });
+ const longerDocs = docs.map((item) => ({
+ pageContent: item.pageContent.repeat(100),
+ }));
+ const result = await instance.rerank(longerDocs, query, {
+ truncateInputTokens: 512,
+ });
+ result.forEach((item) => {
+ expect(typeof item.relevanceScore).toBe("number");
+ expect(item.input).toBeUndefined();
+ });
});
});
});
diff --git a/libs/langchain-community/src/experimental/graph_transformers/llm.int.test.ts b/libs/langchain-community/src/experimental/graph_transformers/llm.int.test.ts
index c85fad11e763..2423c8cef210 100644
--- a/libs/langchain-community/src/experimental/graph_transformers/llm.int.test.ts
+++ b/libs/langchain-community/src/experimental/graph_transformers/llm.int.test.ts
@@ -10,7 +10,7 @@ import {
test.skip("convertToGraphDocuments", async () => {
const model = new ChatOpenAI({
temperature: 0,
- modelName: "gpt-4-turbo-preview",
+ modelName: "gpt-4o-mini",
});
const llmGraphTransformer = new LLMGraphTransformer({
@@ -22,14 +22,12 @@ test.skip("convertToGraphDocuments", async () => {
const result = await llmGraphTransformer.convertToGraphDocuments([
new Document({ pageContent: "Elon Musk is suing OpenAI" }),
]);
-
- // console.log(result);
});
test("convertToGraphDocuments with allowed", async () => {
const model = new ChatOpenAI({
temperature: 0,
- modelName: "gpt-4-turbo-preview",
+ modelName: "gpt-4o-mini",
});
const llmGraphTransformer = new LLMGraphTransformer({
@@ -42,8 +40,6 @@ test("convertToGraphDocuments with allowed", async () => {
new Document({ pageContent: "Elon Musk is suing OpenAI" }),
]);
- // console.log(JSON.stringify(result));
-
expect(result).toEqual([
new GraphDocument({
nodes: [
@@ -68,7 +64,7 @@ test("convertToGraphDocuments with allowed", async () => {
test("convertToGraphDocuments with allowed lowercased", async () => {
const model = new ChatOpenAI({
temperature: 0,
- modelName: "gpt-4-turbo-preview",
+ modelName: "gpt-4o-mini",
});
const llmGraphTransformer = new LLMGraphTransformer({
@@ -81,8 +77,6 @@ test("convertToGraphDocuments with allowed lowercased", async () => {
new Document({ pageContent: "Elon Musk is suing OpenAI" }),
]);
- // console.log(JSON.stringify(result));
-
expect(result).toEqual([
new GraphDocument({
nodes: [
@@ -103,3 +97,82 @@ test("convertToGraphDocuments with allowed lowercased", async () => {
}),
]);
});
+
+test("convertToGraphDocuments with node properties", async () => {
+ const model = new ChatOpenAI({
+ temperature: 0,
+ modelName: "gpt-4o-mini",
+ });
+
+ const llmGraphTransformer = new LLMGraphTransformer({
+ llm: model,
+ allowedNodes: ["Person"],
+ allowedRelationships: ["KNOWS"],
+ nodeProperties: ["age", "country"],
+ });
+
+ const result = await llmGraphTransformer.convertToGraphDocuments([
+ new Document({ pageContent: "John is 30 years old and lives in Spain" }),
+ ]);
+
+ expect(result).toEqual([
+ new GraphDocument({
+ nodes: [
+ new Node({
+ id: "John",
+ type: "Person",
+ properties: {
+ age: "30",
+ country: "Spain",
+ },
+ }),
+ ],
+ relationships: [],
+ source: new Document({
+ pageContent: "John is 30 years old and lives in Spain",
+ metadata: {},
+ }),
+ }),
+ ]);
+});
+
+test("convertToGraphDocuments with relationship properties", async () => {
+ const model = new ChatOpenAI({
+ temperature: 0,
+ modelName: "gpt-4o-mini",
+ });
+
+ const llmGraphTransformer = new LLMGraphTransformer({
+ llm: model,
+ allowedNodes: ["Person"],
+ allowedRelationships: ["KNOWS"],
+ relationshipProperties: ["since"],
+ });
+
+ const result = await llmGraphTransformer.convertToGraphDocuments([
+ new Document({ pageContent: "John has known Mary since 2020" }),
+ ]);
+
+ expect(result).toEqual([
+ new GraphDocument({
+ nodes: [
+ new Node({ id: "John", type: "Person" }),
+ new Node({ id: "Mary", type: "Person" }),
+ ],
+ relationships: [
+ new Relationship({
+ source: new Node({ id: "John", type: "Person" }),
+ target: new Node({ id: "Mary", type: "Person" }),
+ type: "KNOWS",
+ properties: {
+ since: "2020",
+ },
+ }),
+ ],
+ source: new Document({
+ pageContent: "John has known Mary since 2020",
+ metadata: {},
+ }),
+ }),
+ ]);
+});
diff --git a/libs/langchain-community/src/experimental/graph_transformers/llm.ts b/libs/langchain-community/src/experimental/graph_transformers/llm.ts
index 41167e09ad6f..53155ede9866 100644
--- a/libs/langchain-community/src/experimental/graph_transformers/llm.ts
+++ b/libs/langchain-community/src/experimental/graph_transformers/llm.ts
@@ -47,6 +47,11 @@ interface OptionalEnumFieldProps {
fieldKwargs?: object;
}
+interface SchemaProperty {
+ key: string;
+ value: string;
+}
+
function toTitleCase(str: string): string {
return str
.split(" ")
@@ -86,50 +91,112 @@ function createOptionalEnumType({
return schema;
}
-function createSchema(allowedNodes: string[], allowedRelationships: string[]) {
+function createNodeSchema(allowedNodes: string[], nodeProperties: string[]) {
+ const nodeSchema = z.object({
+ id: z.string(),
+ type: createOptionalEnumType({
+ enumValues: allowedNodes,
+ description: "The type or label of the node.",
+ }),
+ });
+
+ return nodeProperties.length > 0
+ ? nodeSchema.extend({
+ properties: z
+ .array(
+ z.object({
+ key: createOptionalEnumType({
+ enumValues: nodeProperties,
+ description: "Property key.",
+ }),
+ value: z.string().describe("Extracted value."),
+ })
+ )
+ .describe(`List of node properties`),
+ })
+ : nodeSchema;
+}
+
+function createRelationshipSchema(
+ allowedNodes: string[],
+ allowedRelationships: string[],
+ relationshipProperties: string[]
+) {
+ const relationshipSchema = z.object({
+ sourceNodeId: z.string(),
+ sourceNodeType: createOptionalEnumType({
+ enumValues: allowedNodes,
+ description: "The source node of the relationship.",
+ }),
+ relationshipType: createOptionalEnumType({
+ enumValues: allowedRelationships,
+ description: "The type of the relationship.",
+ isRel: true,
+ }),
+ targetNodeId: z.string(),
+ targetNodeType: createOptionalEnumType({
+ enumValues: allowedNodes,
+ description: "The target node of the relationship.",
+ }),
+ });
+
+ return relationshipProperties.length > 0
+ ? relationshipSchema.extend({
+ properties: z
+ .array(
+ z.object({
+ key: createOptionalEnumType({
+ enumValues: relationshipProperties,
+ description: "Property key.",
+ }),
+ value: z.string().describe("Extracted value."),
+ })
+ )
+ .describe(`List of relationship properties`),
+ })
+ : relationshipSchema;
+}
+
+function createSchema(
+ allowedNodes: string[],
+ allowedRelationships: string[],
+ nodeProperties: string[],
+ relationshipProperties: string[]
+) {
+ const nodeSchema = createNodeSchema(allowedNodes, nodeProperties);
+ const relationshipSchema = createRelationshipSchema(
+ allowedNodes,
+ allowedRelationships,
+ relationshipProperties
+ );
+
const dynamicGraphSchema = z.object({
- nodes: z
- .array(
- z.object({
- id: z.string(),
- type: createOptionalEnumType({
- enumValues: allowedNodes,
- description: "The type or label of the node.",
- }),
- })
- )
- .describe("List of nodes"),
+ nodes: z.array(nodeSchema).describe("List of nodes"),
relationships: z
- .array(
- z.object({
- sourceNodeId: z.string(),
- sourceNodeType: createOptionalEnumType({
- enumValues: allowedNodes,
- description: "The source node of the relationship.",
- }),
- relationshipType: createOptionalEnumType({
- enumValues: allowedRelationships,
- description: "The type of the relationship.",
- isRel: true,
- }),
- targetNodeId: z.string(),
- targetNodeType: createOptionalEnumType({
- enumValues: allowedNodes,
- description: "The target node of the relationship.",
- }),
- })
- )
+ .array(relationshipSchema)
.describe("List of relationships."),
});
return dynamicGraphSchema;
}
+function convertPropertiesToRecord(
+ properties: SchemaProperty[]
+): Record {
+ return properties.reduce((accumulator: Record, prop) => {
+ accumulator[prop.key] = prop.value;
+ return accumulator;
+ }, {});
+}
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function mapToBaseNode(node: any): Node {
return new Node({
id: node.id,
type: node.type ? toTitleCase(node.type) : "",
+ properties: node.properties
+ ? convertPropertiesToRecord(node.properties)
+ : {},
});
}
@@ -149,6 +216,9 @@ function mapToBaseRelationship(relationship: any): Relationship {
: "",
}),
type: relationship.relationshipType.replace(" ", "_").toUpperCase(),
+ properties: relationship.properties
+ ? convertPropertiesToRecord(relationship.properties)
+ : {},
});
}
@@ -158,6 +228,8 @@ export interface LLMGraphTransformerProps {
allowedRelationships?: string[];
prompt?: ChatPromptTemplate;
strictMode?: boolean;
+ nodeProperties?: string[];
+ relationshipProperties?: string[];
}
export class LLMGraphTransformer {
@@ -170,12 +242,18 @@ export class LLMGraphTransformer {
strictMode: boolean;
+ nodeProperties: string[];
+
+ relationshipProperties: string[];
+
constructor({
llm,
allowedNodes = [],
allowedRelationships = [],
prompt = DEFAULT_PROMPT,
strictMode = true,
+ nodeProperties = [],
+ relationshipProperties = [],
}: LLMGraphTransformerProps) {
if (typeof llm.withStructuredOutput !== "function") {
throw new Error(
@@ -186,9 +264,16 @@ export class LLMGraphTransformer {
this.allowedNodes = allowedNodes;
this.allowedRelationships = allowedRelationships;
this.strictMode = strictMode;
+ this.nodeProperties = nodeProperties;
+ this.relationshipProperties = relationshipProperties;
// Define chain
- const schema = createSchema(allowedNodes, allowedRelationships);
+ const schema = createSchema(
+ allowedNodes,
+ allowedRelationships,
+ nodeProperties,
+ relationshipProperties
+ );
const structuredLLM = llm.withStructuredOutput(zodToJsonSchema(schema));
this.chain = prompt.pipe(structuredLLM);
}
diff --git a/libs/langchain-community/src/llms/ibm.ts b/libs/langchain-community/src/llms/ibm.ts
index a0e8a292f0bf..5647e2f3ae9f 100644
--- a/libs/langchain-community/src/llms/ibm.ts
+++ b/libs/langchain-community/src/llms/ibm.ts
@@ -259,7 +259,9 @@ export class WatsonxLLM<
input: string,
options: this["ParsedCallOptions"],
stream: true
- ): Promise>;
+ ): Promise<
+ AsyncIterable>
+ >;
private async generateSingleMessage(
input: string,
@@ -294,14 +296,16 @@ export class WatsonxLLM<
input,
},
},
+ returnObject: true,
})
: await this.service.generateTextStream({
input,
parameters,
...this.scopeId(),
...requestOptions,
+ returnObject: true,
});
- return textStream as unknown as AsyncIterable;
+ return textStream;
} else {
const textGenerationPromise = idOrName
? this.service.deploymentGenerateText({
@@ -367,7 +371,7 @@ export class WatsonxLLM<
async _generate(
prompts: string[],
options: this["ParsedCallOptions"],
- _runManager?: CallbackManagerForLLMRun
+ runManager?: CallbackManagerForLLMRun
): Promise {
const tokenUsage: TokenUsage = {
generated_token_count: 0,
@@ -379,70 +383,38 @@ export class WatsonxLLM<
if (options.signal?.aborted) {
throw new Error("AbortError");
}
- const callback = () =>
- this.generateSingleMessage(prompt, options, true);
- type ReturnMessage = ReturnType;
- const stream = await this.completionWithRetry(
- callback,
- options
- );
+ const stream = this._streamResponseChunks(prompt, options);
+ const geneartionsArray: GenerationInfo[] = [];
- const responseChunk: ResponseChunk = {
- id: 0,
- event: "",
- data: {
- results: [],
- },
- };
- const messages: ResponseChunk[] = [];
- type ResponseChunkKeys = keyof ResponseChunk;
for await (const chunk of stream) {
- if (chunk.length > 0) {
- const index = chunk.indexOf(": ");
- const [key, value] = [
- chunk.substring(0, index) as ResponseChunkKeys,
- chunk.substring(index + 2),
- ];
- if (key === "id") {
- responseChunk[key] = Number(value);
- } else if (key === "event") {
- responseChunk[key] = String(value);
- } else {
- responseChunk[key] = JSON.parse(value);
- }
- } else if (chunk.length === 0) {
- messages.push(JSON.parse(JSON.stringify(responseChunk)));
- Object.assign(responseChunk, { id: 0, event: "", data: {} });
- }
- }
-
- const geneartionsArray: GenerationInfo[] = [];
- for (const message of messages) {
- message.data.results.forEach((item, index) => {
- const generationInfo: GenerationInfo = {
- text: "",
- stop_reason: "",
- generated_token_count: 0,
- input_token_count: 0,
- };
- void _runManager?.handleLLMNewToken(item.generated_text ?? "", {
+ const completion = chunk?.generationInfo?.completion ?? 0;
+ const generationInfo: GenerationInfo = {
+ text: "",
+ stop_reason: "",
+ generated_token_count: 0,
+ input_token_count: 0,
+ };
+ geneartionsArray[completion] ??= generationInfo;
+ geneartionsArray[completion].generated_token_count =
+ chunk?.generationInfo?.usage_metadata.generated_token_count ?? 0;
+ geneartionsArray[completion].input_token_count +=
+ chunk?.generationInfo?.usage_metadata.input_token_count ?? 0;
+ geneartionsArray[completion].stop_reason =
+ chunk?.generationInfo?.stop_reason;
+ geneartionsArray[completion].text += chunk.text;
+ if (chunk.text)
+ void runManager?.handleLLMNewToken(chunk.text, {
prompt: promptIdx,
- completion: 1,
+ completion: 0,
});
- geneartionsArray[index] ??= generationInfo;
- geneartionsArray[index].generated_token_count =
- item.generated_token_count;
- geneartionsArray[index].input_token_count +=
- item.input_token_count;
- geneartionsArray[index].stop_reason = item.stop_reason;
- geneartionsArray[index].text += item.generated_text;
- });
}
+
return geneartionsArray.map((item) => {
const { text, ...rest } = item;
- tokenUsage.generated_token_count += rest.generated_token_count;
+ tokenUsage.generated_token_count = rest.generated_token_count;
tokenUsage.input_token_count += rest.input_token_count;
+
return {
text,
generationInfo: rest,
@@ -527,35 +499,23 @@ export class WatsonxLLM<
throw new Error("AbortError");
}
- type Keys = keyof typeof responseChunk;
- if (chunk.length > 0) {
- const index = chunk.indexOf(": ");
- const [key, value] = [
- chunk.substring(0, index) as Keys,
- chunk.substring(index + 2),
- ];
- if (key === "id") {
- responseChunk[key] = Number(value);
- } else if (key === "event") {
- responseChunk[key] = String(value);
- } else {
- responseChunk[key] = JSON.parse(value);
- }
- } else if (
- chunk.length === 0 &&
- responseChunk.data?.results?.length > 0
- ) {
- for (const item of responseChunk.data.results) {
- yield new GenerationChunk({
- text: item.generated_text,
- generationInfo: {
+ for (const [index, item] of chunk.data.results.entries()) {
+ yield new GenerationChunk({
+ text: item.generated_text,
+ generationInfo: {
+ stop_reason: item.stop_reason,
+ completion: index,
+ usage_metadata: {
+ generated_token_count: item.generated_token_count,
+ input_token_count: item.input_token_count,
stop_reason: item.stop_reason,
},
- });
- await runManager?.handleLLMNewToken(item.generated_text ?? "");
- }
- Object.assign(responseChunk, { id: 0, event: "", data: {} });
+ },
+ });
+ if (item.generated_text)
+ void runManager?.handleLLMNewToken(item.generated_text);
}
+ Object.assign(responseChunk, { id: 0, event: "", data: {} });
}
}
diff --git a/libs/langchain-community/src/llms/tests/ibm.int.test.ts b/libs/langchain-community/src/llms/tests/ibm.int.test.ts
index dfeebedd39e2..369b657fb4ca 100644
--- a/libs/langchain-community/src/llms/tests/ibm.int.test.ts
+++ b/libs/langchain-community/src/llms/tests/ibm.int.test.ts
@@ -172,7 +172,6 @@ describe("Text generation", () => {
let usedTokens = 0;
const model = new WatsonxLLM({
model: "ibm/granite-13b-chat-v2",
- maxConcurrency: 1,
version: "2024-05-31",
serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string,
projectId: process.env.WATSONX_AI_PROJECT_ID,
@@ -190,7 +189,7 @@ describe("Text generation", () => {
}),
});
- const res = await model.invoke(" Print hello world?");
+ const res = await model.invoke("Print hello world?");
expect(countedTokens).toBe(usedTokens);
expect(res).toBe(streamedText);
});
diff --git a/libs/langchain-community/src/load/import_map.ts b/libs/langchain-community/src/load/import_map.ts
index 8b3b734a82c1..cfc0af93456c 100644
--- a/libs/langchain-community/src/load/import_map.ts
+++ b/libs/langchain-community/src/load/import_map.ts
@@ -51,6 +51,7 @@ export * as chat_models__fireworks from "../chat_models/fireworks.js";
export * as chat_models__friendli from "../chat_models/friendli.js";
export * as chat_models__minimax from "../chat_models/minimax.js";
export * as chat_models__moonshot from "../chat_models/moonshot.js";
+export * as chat_models__novita from "../chat_models/novita.js";
export * as chat_models__ollama from "../chat_models/ollama.js";
export * as chat_models__togetherai from "../chat_models/togetherai.js";
export * as chat_models__yandex from "../chat_models/yandex.js";
@@ -67,6 +68,7 @@ export * as caches__upstash_redis from "../caches/upstash_redis.js";
export * as stores__doc__base from "../stores/doc/base.js";
export * as stores__doc__gcs from "../stores/doc/gcs.js";
export * as stores__doc__in_memory from "../stores/doc/in_memory.js";
+export * as stores__message__file_system from "../stores/message/file_system.js";
export * as stores__message__in_memory from "../stores/message/in_memory.js";
export * as memory__chat_memory from "../memory/chat_memory.js";
export * as indexes__base from "../indexes/base.js";
diff --git a/libs/langchain-community/src/load/import_type.ts b/libs/langchain-community/src/load/import_type.ts
index 51536706142c..097584aef493 100644
--- a/libs/langchain-community/src/load/import_type.ts
+++ b/libs/langchain-community/src/load/import_type.ts
@@ -39,6 +39,7 @@ export interface SecretMap {
MINIMAX_API_KEY?: string;
MINIMAX_GROUP_ID?: string;
MOONSHOT_API_KEY?: string;
+ NOVITA_API_KEY?: string;
PLANETSCALE_DATABASE_URL?: string;
PLANETSCALE_HOST?: string;
PLANETSCALE_PASSWORD?: string;
diff --git a/libs/langchain-community/src/stores/message/file_system.ts b/libs/langchain-community/src/stores/message/file_system.ts
new file mode 100644
index 000000000000..f81af5f7a4ef
--- /dev/null
+++ b/libs/langchain-community/src/stores/message/file_system.ts
@@ -0,0 +1,199 @@
+import { promises as fs } from "node:fs";
+import { dirname } from "node:path";
+
+import { BaseListChatMessageHistory } from "@langchain/core/chat_history";
+import {
+ BaseMessage,
+ StoredMessage,
+ mapChatMessagesToStoredMessages,
+ mapStoredMessagesToChatMessages,
+} from "@langchain/core/messages";
+
+export const FILE_HISTORY_DEFAULT_FILE_PATH = ".history/history.json";
+
+/**
+ * Represents a lightweight file chat session.
+ */
+export type FileChatSession = {
+ id: string;
+ context: Record;
+};
+
+/**
+ * Represents a stored chat session.
+ */
+export type StoredFileChatSession = FileChatSession & {
+ messages: StoredMessage[];
+};
+
+/**
+ * Type for the store of chat sessions.
+ */
+export type FileChatStore = {
+ [userId: string]: Record;
+};
+
+/**
+ * Type for the input to the `FileSystemChatMessageHistory` constructor.
+ */
+export interface FileSystemChatMessageHistoryInput {
+ sessionId: string;
+ userId?: string;
+ filePath?: string;
+}
+
+let store: FileChatStore;
+
+/**
+ * Store chat message history using a local JSON file.
+ * For demo and development purposes only.
+ *
+ * @example
+ * ```typescript
+ * const model = new ChatOpenAI({
+ * model: "gpt-3.5-turbo",
+ * temperature: 0,
+ * });
+ * const prompt = ChatPromptTemplate.fromMessages([
+ * [
+ * "system",
+ * "You are a helpful assistant. Answer all questions to the best of your ability.",
+ * ],
+ * ["placeholder", "chat_history"],
+ * ["human", "{input}"],
+ * ]);
+ *
+ * const chain = prompt.pipe(model).pipe(new StringOutputParser());
+ * const chainWithHistory = new RunnableWithMessageHistory({
+ * runnable: chain,
+ * inputMessagesKey: "input",
+ * historyMessagesKey: "chat_history",
+ * getMessageHistory: async (sessionId) => {
+ * const chatHistory = new FileSystemChatMessageHistory({
+ * sessionId: sessionId,
+ * userId: "userId", // Optional
+ * })
+ * return chatHistory;
+ * },
+ * });
+ * await chainWithHistory.invoke(
+ * { input: "What did I just say my name was?" },
+ * { configurable: { sessionId: "session-id" } }
+ * );
+ * ```
+ */
+export class FileSystemChatMessageHistory extends BaseListChatMessageHistory {
+ lc_namespace = ["langchain", "stores", "message", "file"];
+
+ private sessionId: string;
+
+ private userId: string;
+
+ private filePath: string;
+
+ constructor(chatHistoryInput: FileSystemChatMessageHistoryInput) {
+ super();
+
+ this.sessionId = chatHistoryInput.sessionId;
+ this.userId = chatHistoryInput.userId ?? "";
+ this.filePath = chatHistoryInput.filePath ?? FILE_HISTORY_DEFAULT_FILE_PATH;
+ }
+
+ private async init(): Promise {
+ if (store) {
+ return;
+ }
+ try {
+ store = await this.loadStore();
+ } catch (error) {
+ console.error("Error initializing FileSystemChatMessageHistory:", error);
+ throw error;
+ }
+ }
+
+ protected async loadStore(): Promise {
+ try {
+ await fs.access(this.filePath, fs.constants.F_OK);
+ const store = await fs.readFile(this.filePath, "utf-8");
+ return JSON.parse(store) as FileChatStore;
+ } catch (_error) {
+ const error = _error as NodeJS.ErrnoException;
+ if (error.code === "ENOENT") {
+ return {};
+ }
+ throw new Error(
+ `Error loading FileSystemChatMessageHistory store: ${error}`
+ );
+ }
+ }
+
+ protected async saveStore(): Promise {
+ try {
+ await fs.mkdir(dirname(this.filePath), { recursive: true });
+ await fs.writeFile(this.filePath, JSON.stringify(store));
+ } catch (error) {
+ throw new Error(
+ `Error saving FileSystemChatMessageHistory store: ${error}`
+ );
+ }
+ }
+
+ async getMessages(): Promise {
+ await this.init();
+ const messages = store[this.userId]?.[this.sessionId]?.messages ?? [];
+ return mapStoredMessagesToChatMessages(messages);
+ }
+
+ async addMessage(message: BaseMessage): Promise {
+ await this.init();
+ const messages = await this.getMessages();
+ messages.push(message);
+ const storedMessages = mapChatMessagesToStoredMessages(messages);
+ store[this.userId] ??= {};
+ store[this.userId][this.sessionId] = {
+ ...store[this.userId][this.sessionId],
+ messages: storedMessages,
+ };
+ await this.saveStore();
+ }
+
+ async clear(): Promise {
+ await this.init();
+ if (store[this.userId]) {
+ delete store[this.userId][this.sessionId];
+ }
+ await this.saveStore();
+ }
+
+ async getContext(): Promise> {
+ await this.init();
+ return store[this.userId]?.[this.sessionId]?.context ?? {};
+ }
+
+ async setContext(context: Record): Promise {
+ await this.init();
+ store[this.userId] ??= {};
+ store[this.userId][this.sessionId] = {
+ ...store[this.userId][this.sessionId],
+ context,
+ };
+ await this.saveStore();
+ }
+
+ async clearAllSessions() {
+ await this.init();
+ delete store[this.userId];
+ await this.saveStore();
+ }
+
+ async getAllSessions(): Promise {
+ await this.init();
+ const userSessions = store[this.userId]
+ ? Object.values(store[this.userId]).map((session) => ({
+ id: session.id,
+ context: session.context,
+ }))
+ : [];
+ return userSessions;
+ }
+}
diff --git a/libs/langchain-community/src/stores/tests/file_chat_history.int.test.ts b/libs/langchain-community/src/stores/tests/file_chat_history.int.test.ts
new file mode 100644
index 000000000000..1610bcbee0e1
--- /dev/null
+++ b/libs/langchain-community/src/stores/tests/file_chat_history.int.test.ts
@@ -0,0 +1,147 @@
+/* eslint-disable no-promise-executor-return */
+
+import { expect } from "@jest/globals";
+import { promises as fs } from "node:fs";
+import { HumanMessage, AIMessage } from "@langchain/core/messages";
+import { v4 as uuid } from "uuid";
+import {
+ FILE_HISTORY_DEFAULT_FILE_PATH,
+ FileSystemChatMessageHistory,
+} from "../message/file_system.js";
+
+afterAll(async () => {
+ try {
+ await fs.unlink(FILE_HISTORY_DEFAULT_FILE_PATH);
+ } catch {
+ // Ignore error if the file does not exist
+ }
+});
+
+test("FileSystemChatMessageHistory works", async () => {
+ const input = {
+ sessionId: uuid(),
+ };
+ const chatHistory = new FileSystemChatMessageHistory(input);
+ const blankResult = await chatHistory.getMessages();
+ expect(blankResult).toStrictEqual([]);
+
+ await chatHistory.addUserMessage("Who is the best vocalist?");
+ await chatHistory.addAIMessage("Ozzy Osbourne");
+
+ const expectedMessages = [
+ new HumanMessage("Who is the best vocalist?"),
+ new AIMessage("Ozzy Osbourne"),
+ ];
+ const resultWithHistory = await chatHistory.getMessages();
+ expect(resultWithHistory).toEqual(expectedMessages);
+});
+
+test("FileSystemChatMessageHistory persist sessions", async () => {
+ const input = {
+ sessionId: uuid(),
+ };
+ const chatHistory1 = new FileSystemChatMessageHistory(input);
+ const blankResult = await chatHistory1.getMessages();
+ expect(blankResult).toStrictEqual([]);
+
+ await chatHistory1.addUserMessage("Who is the best vocalist?");
+ await chatHistory1.addAIMessage("Ozzy Osbourne");
+
+ const chatHistory2 = new FileSystemChatMessageHistory(input);
+ const expectedMessages = [
+ new HumanMessage("Who is the best vocalist?"),
+ new AIMessage("Ozzy Osbourne"),
+ ];
+ const resultWithHistory = await chatHistory2.getMessages();
+ expect(resultWithHistory).toEqual(expectedMessages);
+});
+
+test("FileSystemChatMessageHistory clear session", async () => {
+ const input = {
+ sessionId: uuid(),
+ userId: uuid(),
+ };
+ const chatHistory = new FileSystemChatMessageHistory(input);
+
+ await chatHistory.addUserMessage("Who is the best vocalist?");
+ await chatHistory.addAIMessage("Ozzy Osbourne");
+
+ const expectedMessages = [
+ new HumanMessage("Who is the best vocalist?"),
+ new AIMessage("Ozzy Osbourne"),
+ ];
+ const resultWithHistory = await chatHistory.getMessages();
+ expect(resultWithHistory).toEqual(expectedMessages);
+
+ await chatHistory.clear();
+
+ const blankResult = await chatHistory.getMessages();
+ expect(blankResult).toStrictEqual([]);
+});
+
+test("FileSystemChatMessageHistory clear all sessions", async () => {
+ const input1 = {
+ sessionId: uuid(),
+ userId: "user1",
+ };
+ const chatHistory1 = new FileSystemChatMessageHistory(input1);
+
+ await chatHistory1.addUserMessage("Who is the best vocalist?");
+ await chatHistory1.addAIMessage("Ozzy Osbourne");
+
+ const input2 = {
+ sessionId: uuid(),
+ userId: "user1",
+ };
+ const chatHistory2 = new FileSystemChatMessageHistory(input2);
+
+ await chatHistory2.addUserMessage("Who is the best vocalist?");
+ await chatHistory2.addAIMessage("Ozzy Osbourne");
+
+ const expectedMessages = [
+ new HumanMessage("Who is the best vocalist?"),
+ new AIMessage("Ozzy Osbourne"),
+ ];
+
+ const result1 = await chatHistory1.getMessages();
+ expect(result1).toEqual(expectedMessages);
+
+ const result2 = await chatHistory2.getMessages();
+ expect(result2).toEqual(expectedMessages);
+
+ await chatHistory1.clearAllSessions();
+
+ const deletedResult1 = await chatHistory1.getMessages();
+ const deletedResult2 = await chatHistory2.getMessages();
+ expect(deletedResult1).toStrictEqual([]);
+ expect(deletedResult2).toStrictEqual([]);
+});
+
+test("FileSystemChatMessageHistory set context and get all sessions", async () => {
+ const session1 = {
+ sessionId: uuid(),
+ userId: "user1",
+ };
+ const context1 = { title: "Best vocalist" };
+ const chatHistory1 = new FileSystemChatMessageHistory(session1);
+
+ await chatHistory1.setContext(context1);
+ await chatHistory1.addUserMessage("Who is the best vocalist?");
+ await chatHistory1.addAIMessage("Ozzy Osbourne");
+
+ const chatHistory2 = new FileSystemChatMessageHistory({
+ sessionId: uuid(),
+ userId: "user1",
+ });
+ const context2 = { title: "Best guitarist" };
+
+ await chatHistory2.addUserMessage("Who is the best guitarist?");
+ await chatHistory2.addAIMessage("Jimi Hendrix");
+ await chatHistory2.setContext(context2);
+
+ const sessions = await chatHistory1.getAllSessions();
+
+ expect(sessions.length).toBe(2);
+ expect(sessions[0].context).toEqual(context1);
+ expect(sessions[1].context).toEqual(context2);
+});
diff --git a/libs/langchain-community/src/vectorstores/hanavector.ts b/libs/langchain-community/src/vectorstores/hanavector.ts
index 8f55568adb27..48e40b8ee48c 100644
--- a/libs/langchain-community/src/vectorstores/hanavector.ts
+++ b/libs/langchain-community/src/vectorstores/hanavector.ts
@@ -8,6 +8,73 @@ import { maximalMarginalRelevance } from "@langchain/core/utils/math";
export type DistanceStrategy = "euclidean" | "cosine";
+const COMPARISONS_TO_SQL: Record = {
+ $eq: "=",
+ $ne: "<>",
+ $lt: "<",
+ $lte: "<=",
+ $gt: ">",
+ $gte: ">=",
+};
+
+// Base value types that can be used in comparisons
+type ComparisonRValue =
+ | string
+ | number
+ | boolean
+ | Date
+ | Array;
+// Available comparison operators for filtering
+type Comparator =
+ | "$eq"
+ | "$ne"
+ | "$lt"
+ | "$lte"
+ | "$gt"
+ | "$gte"
+ | "$in"
+ | "$nin"
+ | "$between"
+ | "$like";
+// Filter using comparison operators
+// Defines the relationship between a comparison operator and its value
+type ComparatorFilter = {
+ [K in Comparator]?: ComparisonRValue;
+};
+
+type LogicalOperator = "$and" | "$or";
+type LogicalFilter = {
+ [K in LogicalOperator]?: Filter[];
+};
+type PropertyFilter = {
+ [property: string]: string | number | boolean | Date | ComparatorFilter;
+};
+
+type Filter = PropertyFilter | LogicalFilter;
+
+interface DateValue {
+ type: "date";
+ date: string | Date;
+}
+
+const IN_OPERATORS_TO_SQL: Record = {
+ $in: "IN",
+ $nin: "NOT IN",
+};
+
+const BETWEEN_OPERATOR_TO_SQL: Record = {
+ $between: "BETWEEN",
+};
+
+const LIKE_OPERATOR_TO_SQL: Record = {
+ $like: "LIKE",
+};
+
+const LOGICAL_OPERATORS_TO_SQL: Record = {
+ $and: "AND",
+ $or: "OR",
+};
+
const HANA_DISTANCE_FUNCTION: Record = {
cosine: ["COSINE_SIMILARITY", "DESC"],
euclidean: ["L2DISTANCE", "ASC"],
@@ -20,10 +87,6 @@ const defaultMetadataColumn = "VEC_META";
const defaultVectorColumn = "VEC_VECTOR";
const defaultVectorColumnLength = -1; // -1 means dynamic length
-interface Filter {
- [key: string]: boolean | string | number;
-}
-
/**
* Interface defining the arguments required to create an instance of
* `HanaDB`.
@@ -37,6 +100,7 @@ export interface HanaDBArgs {
metadataColumn?: string;
vectorColumn?: string;
vectorColumnLength?: number;
+ specificMetadataColumns?: string[];
}
export class HanaDB extends VectorStore {
@@ -60,6 +124,8 @@ export class HanaDB extends VectorStore {
declare FilterType: Filter;
+ private specificMetadataColumns: string[];
+
_vectorstoreType(): string {
return "hanadb";
}
@@ -78,9 +144,12 @@ export class HanaDB extends VectorStore {
args.vectorColumn || defaultVectorColumn
);
this.vectorColumnLength = HanaDB.sanitizeInt(
- args.vectorColumnLength || defaultVectorColumnLength
- ); // Using '??' to allow 0 as a valid value
-
+ args.vectorColumnLength || defaultVectorColumnLength,
+ -1
+ );
+ this.specificMetadataColumns = HanaDB.sanitizeSpecificMetadataColumns(
+ args.specificMetadataColumns || []
+ );
this.connection = args.connection;
}
@@ -166,15 +235,16 @@ export class HanaDB extends VectorStore {
}
/**
- * Sanitizes the input to integer. Throws an error if the value is less than -1.
+ * Sanitizes the input to integer. Throws an error if the value is less than lower bound.
* @param inputInt The input to be sanitized.
* @returns The sanitized integer.
*/
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- public static sanitizeInt(inputInt: any): number {
+ public static sanitizeInt(inputInt: number | string, lowerBound = 0): number {
const value = parseInt(inputInt.toString(), 10);
- if (Number.isNaN(value) || value < -1) {
- throw new Error(`Value (${value}) must not be smaller than -1`);
+ if (Number.isNaN(value) || value < lowerBound) {
+ throw new Error(
+ `Value (${value}) must not be smaller than ${lowerBound}`
+ );
}
return value;
}
@@ -221,6 +291,10 @@ export class HanaDB extends VectorStore {
return metadata;
}
+ static sanitizeSpecificMetadataColumns(columns: string[]): string[] {
+ return columns.map((column) => this.sanitizeName(column));
+ }
+
/**
* Parses a string representation of a float array and returns an array of numbers.
* @param {string} arrayAsString - The string representation of the array.
@@ -287,11 +361,13 @@ export class HanaDB extends VectorStore {
`"${this.contentColumn}" NCLOB, ` +
`"${this.metadataColumn}" NCLOB, ` +
`"${this.vectorColumn}" REAL_VECTOR`;
+ // Length can either be -1 (QRC01+02-24) or 0 (QRC03-24 onwards)
+ if (this.vectorColumnLength === -1 || this.vectorColumnLength === 0) {
+ sqlStr += ");";
+ } else {
+ sqlStr += `(${this.vectorColumnLength}));`;
+ }
- sqlStr +=
- this.vectorColumnLength === -1
- ? ");"
- : `(${this.vectorColumnLength}));`;
const client = this.connection;
await this.executeQuery(client, sqlStr);
}
@@ -316,40 +392,257 @@ export class HanaDB extends VectorStore {
* @returns A tuple containing the WHERE clause string and an array of query parameters.
*/
private createWhereByFilter(
- filter?: Filter
- ): [string, Array] {
- const queryTuple: Array = [];
+ filter?: this["FilterType"]
+ ): [string, Array] {
let whereStr = "";
- if (filter) {
- Object.keys(filter).forEach((key, i) => {
- whereStr += i === 0 ? " WHERE " : " AND ";
- whereStr += ` JSON_VALUE(${this.metadataColumn}, '$.${key}') = ?`;
-
- const value = filter[key];
- if (typeof value === "number") {
- if (Number.isInteger(value)) {
- // hdb requires string while sap/hana-client doesn't
- queryTuple.push(value.toString());
+ let queryTuple: Array = [];
+
+ if (filter && Object.keys(filter).length > 0) {
+ const [where, params] = this.processFilterObject(filter);
+ whereStr = ` WHERE ${where}`;
+ queryTuple = params;
+ }
+
+ return [whereStr, queryTuple];
+ }
+
+ /**
+ * Processes a filter object to generate SQL WHERE clause components.
+ * @param filter - A filter object with keys as metadata fields and values as filter values.
+ * @returns A tuple containing the WHERE clause string and an array of query parameters.
+ */
+ private processFilterObject(
+ filter: this["FilterType"]
+ ): [string, Array] {
+ let whereStr = "";
+ const queryTuple: Array = [];
+
+ Object.keys(filter).forEach((key, i) => {
+ const filterValue = filter[key as keyof Filter] as
+ | ComparisonRValue
+ | ComparatorFilter
+ | Filter[];
+ if (i !== 0) {
+ whereStr += " AND ";
+ }
+
+ // Handling logical operators ($and, $or)
+ if (key in LOGICAL_OPERATORS_TO_SQL) {
+ const logicalOperator = LOGICAL_OPERATORS_TO_SQL[key];
+ const logicalOperands = filterValue as Filter[];
+ logicalOperands.forEach((operand: Filter, j: number) => {
+ if (j !== 0) {
+ whereStr += ` ${logicalOperator} `;
+ }
+ const [whereLogical, paramsLogical] =
+ this.processFilterObject(operand);
+ whereStr += "(" + whereLogical + ")";
+ queryTuple.push(...paramsLogical);
+ });
+
+ return;
+ }
+
+ // Handle special comparison operators and simple types
+ let operator = "=";
+ let sqlParam = "?";
+ if (typeof filterValue === "number") {
+ if (Number.isInteger(filterValue)) {
+ // hdb requires string while sap/hana-client doesn't
+ queryTuple.push(filterValue.toString());
+ } else {
+ throw new Error(
+ `Unsupported filter data-type: wrong number type for key ${key}`
+ );
+ }
+ } else if (typeof filterValue === "string") {
+ queryTuple.push(filterValue);
+ } else if (typeof filterValue === "boolean") {
+ queryTuple.push(filterValue.toString());
+ } else if (typeof filterValue === "object" && filterValue !== null) {
+ // Get the special operator key, like $eq, $ne, $in, $between, etc.
+ const specialOp = Object.keys(filterValue)[0] as Comparator;
+ const specialVal = (filterValue as ComparatorFilter)[specialOp];
+ // Handling of 'special' operators starting with "$"
+ if (specialOp in COMPARISONS_TO_SQL) {
+ operator = COMPARISONS_TO_SQL[specialOp];
+ if (specialVal === undefined) {
+ throw new Error(
+ `Operator '${specialOp}' expects a non-undefined value.`
+ );
+ }
+ if (typeof specialVal === "boolean") {
+ queryTuple.push(specialVal.toString());
+ } else if (typeof specialVal === "number") {
+ sqlParam = "CAST(? as float)";
+ queryTuple.push(specialVal);
+ } else if (
+ typeof specialVal === "object" &&
+ specialVal !== null &&
+ "type" in specialVal &&
+ specialVal.type === "date" &&
+ "date" in specialVal
+ ) {
+ sqlParam = "CAST(? as DATE)";
+ queryTuple.push((specialVal as DateValue).date);
+ } else {
+ queryTuple.push(specialVal);
+ }
+ } else if (specialOp in BETWEEN_OPERATOR_TO_SQL) {
+ // ensure the value is an array with exact length of 2
+ if (!Array.isArray(specialVal) || specialVal.length !== 2) {
+ throw new Error(`Operator '${specialOp}' expects two values.`);
+ }
+ const [betweenFrom, betweenTo] = specialVal as [
+ ComparisonRValue,
+ ComparisonRValue
+ ];
+ operator = BETWEEN_OPERATOR_TO_SQL[specialOp];
+ sqlParam = "? AND ?";
+ queryTuple.push(betweenFrom.toString(), betweenTo.toString());
+ } else if (specialOp in LIKE_OPERATOR_TO_SQL) {
+ operator = LIKE_OPERATOR_TO_SQL[specialOp];
+ if (specialVal !== undefined) {
+ queryTuple.push(specialVal.toString());
} else {
throw new Error(
- `Unsupported filter data-type: wrong number type for key ${key}`
+ `Operator '${specialOp}' expects a non-undefined value.`
);
}
- } else if (typeof value === "string") {
- queryTuple.push(value);
- } else if (typeof value === "boolean") {
- queryTuple.push(value.toString());
+ } else if (specialOp in IN_OPERATORS_TO_SQL) {
+ operator = IN_OPERATORS_TO_SQL[specialOp];
+ if (Array.isArray(specialVal)) {
+ const placeholders = Array(specialVal.length).fill("?").join(",");
+ sqlParam = `(${placeholders})`;
+ queryTuple.push(
+ ...specialVal.map((listEntry) => listEntry.toString())
+ );
+ } else {
+ throw new Error(`Unsupported value for ${operator}: ${specialVal}`);
+ }
} else {
- throw new Error(
- `Unsupported filter data-type: ${typeof value} for key ${key}`
- );
+ throw new Error(`Unsupported operator: ${specialOp}`);
}
- });
- }
+ } else {
+ throw new Error(`Unsupported filter data-type: ${typeof filterValue}`);
+ }
+ // Metadata column handling
+ const selector = this.specificMetadataColumns.includes(key)
+ ? `"${key}"`
+ : `JSON_VALUE(${this.metadataColumn}, '$.${key}')`;
+ whereStr += `${selector} ${operator} ${sqlParam}`;
+ });
return [whereStr, queryTuple];
}
+ /**
+ * Creates an HNSW vector index on a specified table and vector column with
+ * optional build and search configurations. If no configurations are provided,
+ * default parameters from the database are used. If provided values exceed the
+ * valid ranges, an error will be raised.
+ * The index is always created in ONLINE mode.
+ *
+ * @param {object} options Object containing configuration options for the index
+ * @param {number} [options.m] (Optional) Maximum number of neighbors per graph node (Valid Range: [4, 1000])
+ * @param {number} [options.efConstruction] (Optional) Maximal candidates to consider when building the graph
+ * (Valid Range: [1, 100000])
+ * @param {number} [options.efSearch] (Optional) Minimum candidates for top-k-nearest neighbor queries
+ * (Valid Range: [1, 100000])
+ * @param {string} [options.indexName] (Optional) Custom index name. Defaults to __idx
+ * @returns {Promise} Promise that resolves when index is added.
+ */
+ public async createHnswIndex(
+ options: {
+ m?: number;
+ efConstruction?: number;
+ efSearch?: number;
+ indexName?: string;
+ } = {}
+ ): Promise {
+ const { m, efConstruction, efSearch, indexName } = options;
+
+ // Determine the distance function based on the configured strategy
+ const distanceFuncName = HANA_DISTANCE_FUNCTION[this.distanceStrategy][0];
+ const defaultIndexName = `${this.tableName}_${distanceFuncName}_idx`;
+
+ // Use provided indexName or fallback to default
+ const finalIndexName = HanaDB.sanitizeName(indexName || defaultIndexName);
+ // Initialize buildConfig and searchConfig objects
+ const buildConfig: Record = {};
+ const searchConfig: Record = {};
+
+ // Validate and add m parameter to buildConfig if provided
+ if (m !== undefined) {
+ const minimumHnswM = 4;
+ const maximumHnswM = 1000;
+ const sanitizedM = HanaDB.sanitizeInt(m, minimumHnswM);
+ if (sanitizedM < minimumHnswM || sanitizedM > maximumHnswM) {
+ throw new Error("M must be in the range [4, 1000]");
+ }
+ buildConfig.M = sanitizedM;
+ }
+
+ // Validate and add efConstruction to buildConfig if provided
+ if (efConstruction !== undefined) {
+ const minimumEfConstruction = 1;
+ const maximumEfConstruction = 100000;
+ const sanitizedEfConstruction = HanaDB.sanitizeInt(
+ efConstruction,
+ minimumEfConstruction
+ );
+ if (
+ sanitizedEfConstruction < minimumEfConstruction ||
+ sanitizedEfConstruction > maximumEfConstruction
+ ) {
+ throw new Error("efConstruction must be in the range [1, 100000]");
+ }
+ buildConfig.efConstruction = sanitizedEfConstruction;
+ }
+
+ // Validate and add efSearch to searchConfig if provided
+ if (efSearch !== undefined) {
+ const minimumEfSearch = 1;
+ const maximumEfSearch = 100000;
+ const sanitizedEfSearch = HanaDB.sanitizeInt(efSearch, minimumEfSearch);
+ if (
+ sanitizedEfSearch < minimumEfSearch ||
+ sanitizedEfSearch > maximumEfSearch
+ ) {
+ throw new Error("efSearch must be in the range [1, 100000]");
+ }
+ searchConfig.efSearch = sanitizedEfSearch;
+ }
+
+ // Convert buildConfig and searchConfig to JSON strings if they contain values
+ const buildConfigStr = Object.keys(buildConfig).length
+ ? JSON.stringify(buildConfig)
+ : "";
+ const searchConfigStr = Object.keys(searchConfig).length
+ ? JSON.stringify(searchConfig)
+ : "";
+
+ // Create the base SQL string for index creation
+ let sqlStr = `CREATE HNSW VECTOR INDEX ${finalIndexName} ON "${this.tableName}" ("${this.vectorColumn}")
+ SIMILARITY FUNCTION ${distanceFuncName} `;
+
+ // Append buildConfig to the SQL string if provided
+ if (buildConfigStr) {
+ sqlStr += `BUILD CONFIGURATION '${buildConfigStr}' `;
+ }
+
+ // Append searchConfig to the SQL string if provided
+ if (searchConfigStr) {
+ sqlStr += `SEARCH CONFIGURATION '${searchConfigStr}' `;
+ }
+
+ // Add the ONLINE option
+ sqlStr += "ONLINE;";
+
+ const client = this.connection;
+ await this.executeQuery(client, sqlStr);
+ }
+
/**
* Deletes entries from the table based on the provided filter.
* @param ids - Optional. Deletion by ids is not supported and will throw an error.
@@ -482,7 +775,7 @@ export class HanaDB extends VectorStore {
async similaritySearch(
query: string,
k: number,
- filter?: Filter
+ filter?: this["FilterType"]
): Promise {
const results = await this.similaritySearchWithScore(query, k, filter);
return results.map((result) => result[0]);
@@ -499,7 +792,7 @@ export class HanaDB extends VectorStore {
async similaritySearchWithScore(
query: string,
k: number,
- filter?: Filter
+ filter?: this["FilterType"]
): Promise<[Document, number][]> {
const queryEmbedding = await this.embeddings.embedQuery(query);
return this.similaritySearchVectorWithScore(queryEmbedding, k, filter);
@@ -516,7 +809,7 @@ export class HanaDB extends VectorStore {
async similaritySearchVectorWithScore(
queryEmbedding: number[],
k: number,
- filter?: Filter
+ filter?: this["FilterType"]
): Promise<[Document, number][]> {
const wholeResult = await this.similaritySearchWithScoreAndVectorByVector(
queryEmbedding,
@@ -537,9 +830,8 @@ export class HanaDB extends VectorStore {
async similaritySearchWithScoreAndVectorByVector(
embedding: number[],
k: number,
- filter?: Filter
+ filter?: this["FilterType"]
): Promise> {
- // const result: Array<[Document, number, number[]]> = [];
// Sanitize inputs
const sanitizedK = HanaDB.sanitizeInt(k);
const sanitizedEmbedding = HanaDB.sanitizeListFloat(embedding);
@@ -600,7 +892,6 @@ export class HanaDB extends VectorStore {
options: MaxMarginalRelevanceSearchOptions
): Promise {
const { k, fetchK = 20, lambda = 0.5 } = options;
- // console.log(options)
const queryEmbedding = await this.embeddings.embedQuery(query);
const docs = await this.similaritySearchWithScoreAndVectorByVector(
diff --git a/libs/langchain-community/src/vectorstores/tests/hanavector.fixtures.ts b/libs/langchain-community/src/vectorstores/tests/hanavector.fixtures.ts
new file mode 100644
index 000000000000..9634adaa05b4
--- /dev/null
+++ b/libs/langchain-community/src/vectorstores/tests/hanavector.fixtures.ts
@@ -0,0 +1,142 @@
+import { Document } from "@langchain/core/documents";
+
+interface Metadata {
+ name: string;
+ date: string;
+ count: number;
+ is_active: boolean;
+ tags: string[];
+ location: number[];
+ id: number;
+ height: number | null;
+ happiness: number | null;
+ sadness?: number;
+}
+
+const metadatas: Metadata[] = [
+ {
+ name: "adam",
+ date: "2021-01-01",
+ count: 1,
+ is_active: true,
+ tags: ["a", "b"],
+ location: [1.0, 2.0],
+ id: 1,
+ height: 10.0,
+ happiness: 0.9,
+ sadness: 0.1,
+ },
+ {
+ name: "bob",
+ date: "2021-01-02",
+ count: 2,
+ is_active: false,
+ tags: ["b", "c"],
+ location: [2.0, 3.0],
+ id: 2,
+ height: 5.7,
+ happiness: 0.8,
+ sadness: 0.1,
+ },
+ {
+ name: "jane",
+ date: "2021-01-01",
+ count: 3,
+ is_active: true,
+ tags: ["b", "d"],
+ location: [3.0, 4.0],
+ id: 3,
+ height: 2.4,
+ happiness: null,
+ },
+];
+
+const texts: string[] = metadatas.map((metadata) => `id ${metadata.id} `);
+
+export const DOCUMENTS: Document[] = texts.map(
+ (text, index) =>
+ new Document({ pageContent: text, metadata: metadatas[index] })
+);
+
+interface TestCase {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ filter: Record;
+ expected: number[];
+}
+
+export const TYPE_1_FILTERING_TEST_CASES: TestCase[] = [
+ { filter: { id: 1 }, expected: [1] },
+ { filter: { name: "adam" }, expected: [1] },
+ { filter: { is_active: true }, expected: [1, 3] },
+ { filter: { is_active: false }, expected: [2] },
+ { filter: { id: 1, is_active: true }, expected: [1] },
+ { filter: { id: 1, is_active: false }, expected: [] },
+];
+
+export const TYPE_2_FILTERING_TEST_CASES: TestCase[] = [
+ { filter: { id: 1 }, expected: [1] },
+ { filter: { id: { $ne: 1 } }, expected: [2, 3] },
+ { filter: { id: { $gt: 1 } }, expected: [2, 3] },
+ { filter: { id: { $gte: 1 } }, expected: [1, 2, 3] },
+ { filter: { id: { $lt: 1 } }, expected: [] },
+ { filter: { id: { $lte: 1 } }, expected: [1] },
+ { filter: { name: "adam" }, expected: [1] },
+ { filter: { name: "bob" }, expected: [2] },
+ { filter: { name: { $eq: "adam" } }, expected: [1] },
+ { filter: { name: { $ne: "adam" } }, expected: [2, 3] },
+ { filter: { name: { $gt: "jane" } }, expected: [] },
+ { filter: { name: { $gte: "jane" } }, expected: [3] },
+ { filter: { name: { $lt: "jane" } }, expected: [1, 2] },
+ { filter: { name: { $lte: "jane" } }, expected: [1, 2, 3] },
+ { filter: { is_active: { $eq: true } }, expected: [1, 3] },
+ { filter: { is_active: { $ne: true } }, expected: [2] },
+ { filter: { height: { $gt: 5.0 } }, expected: [1, 2] },
+ { filter: { height: { $gte: 5.0 } }, expected: [1, 2] },
+ { filter: { height: { $lt: 5.0 } }, expected: [3] },
+ { filter: { height: { $lte: 5.8 } }, expected: [2, 3] },
+ // New date-related test cases
+ {
+ filter: { date: { $eq: { type: "date", date: "2021-01-01" } } },
+ expected: [1, 3],
+ },
+ { filter: { date: { $ne: "2021-01-01" } }, expected: [2] },
+ { filter: { date: { $gt: "2021-01-01" } }, expected: [2] },
+ { filter: { date: { $gte: "2021-01-01" } }, expected: [1, 2, 3] },
+ { filter: { date: { $lt: "2021-01-02" } }, expected: [1, 3] },
+ { filter: { date: { $lte: "2021-01-02" } }, expected: [1, 2, 3] },
+];
+
+export const TYPE_3_FILTERING_TEST_CASES: TestCase[] = [
+ { filter: { $or: [{ id: 1 }, { id: 2 }] }, expected: [1, 2] },
+ { filter: { $or: [{ id: 1 }, { name: "bob" }] }, expected: [1, 2] },
+ { filter: { $and: [{ id: 1 }, { id: 2 }] }, expected: [] },
+ { filter: { $or: [{ id: 1 }, { id: 2 }, { id: 3 }] }, expected: [1, 2, 3] },
+];
+
+export const TYPE_4_FILTERING_TEST_CASES: TestCase[] = [
+ { filter: { id: { $between: [1, 2] } }, expected: [1, 2] },
+ { filter: { id: { $between: [1, 1] } }, expected: [1] },
+ { filter: { name: { $in: ["adam", "bob"] } }, expected: [1, 2] },
+ { filter: { name: { $nin: ["adam", "bob"] } }, expected: [3] },
+];
+
+export const TYPE_5_FILTERING_TEST_CASES: TestCase[] = [
+ { filter: { name: { $like: "a%" } }, expected: [1] },
+ { filter: { name: { $like: "%a%" } }, expected: [1, 3] },
+];
+
+export const TYPE_6_FILTERING_TEST_CASES: TestCase[] = [
+ {
+ filter: {
+ $and: [
+ {
+ $or: [{ id: { $eq: 1 } }, { id: { $in: [2, 3] } }],
+ },
+ { height: { $gte: 5.0 } },
+ ],
+ },
+ expected: [1, 2],
+ },
+ { filter: { id: 3, height: { $gte: 5.0 } }, expected: [] },
+ { filter: { $and: [{ id: 1 }, { height: { $gte: 5.0 } }] }, expected: [1] },
+];
diff --git a/libs/langchain-community/src/vectorstores/tests/hanavector.int.test.ts b/libs/langchain-community/src/vectorstores/tests/hanavector.int.test.ts
index 42a1fa945576..dc8b4a534e81 100644
--- a/libs/langchain-community/src/vectorstores/tests/hanavector.int.test.ts
+++ b/libs/langchain-community/src/vectorstores/tests/hanavector.int.test.ts
@@ -5,7 +5,15 @@ import { Document } from "@langchain/core/documents";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { test, expect } from "@jest/globals";
import { HanaDB, HanaDBArgs } from "../hanavector.js";
-
+import {
+ DOCUMENTS,
+ TYPE_1_FILTERING_TEST_CASES,
+ TYPE_2_FILTERING_TEST_CASES,
+ TYPE_3_FILTERING_TEST_CASES,
+ TYPE_4_FILTERING_TEST_CASES,
+ TYPE_5_FILTERING_TEST_CASES,
+ TYPE_6_FILTERING_TEST_CASES,
+} from "./hanavector.fixtures.js";
// Connection parameters
const connectionParams = {
host: process.env.HANA_HOST,
@@ -269,12 +277,10 @@ describe("add documents and similarity search tests", () => {
},
},
]);
-
const results: Document[] = await vectorStore.similaritySearch(
"Sandwiches taste good.",
1
);
- // console.log(results);
expect(results.length).toEqual(1);
expect(results).toMatchObject([
{
@@ -868,3 +874,458 @@ describe("Tests on HANA side", () => {
expect(exceptionOccurred).toBe(true);
});
});
+
+describe("HNSW Index Creation Tests", () => {
+ test("test HNSW index creation with default values", async () => {
+ /**
+ * Description:
+ * This test verifies that the HNSW index can be successfully created with default values
+ * when no parameters are passed to the createHnswIndex function.
+ */
+ const tableNameTest = "TEST_TABLE_HNSW_DEFAULT";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+
+ // Cleanup: Drop table if exists
+ await dropTable(client, tableNameTest);
+
+ // Create HanaDB instance and add data
+ const vector = await HanaDB.fromTexts(
+ ["foo", "bar", "baz"],
+ {},
+ embeddings,
+ args
+ );
+
+ let exceptionOccurred = false;
+ try {
+ // Call the createHnswIndex function with no parameters (default values)
+ await vector.createHnswIndex();
+ } catch (error) {
+ console.log(error);
+ exceptionOccurred = true;
+ }
+
+ // Assert that no exception occurred
+ expect(exceptionOccurred).toBe(false);
+ });
+
+ test("test HNSW index creation with specific values", async () => {
+ /**
+ * Description:
+ * This test verifies that the HNSW index can be created with specific values for m, efConstruction,
+ * efSearch, and a custom indexName.
+ */
+ const tableNameTest = "TEST_TABLE_HNSW_DEFINED";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+
+ // Cleanup: Drop table if exists
+ await dropTable(client, tableNameTest);
+
+ // Create HanaDB instance and add data
+ const vector = await HanaDB.fromTexts(
+ ["foo", "bar", "baz"],
+ {},
+ embeddings,
+ args
+ );
+
+ let exceptionOccurred = false;
+ try {
+ // Call the createHnswIndex function with specific values
+ await vector.createHnswIndex({
+ m: 50,
+ efConstruction: 150,
+ efSearch: 300,
+ indexName: "custom_index",
+ });
+ } catch (error) {
+ console.log(error);
+ exceptionOccurred = true;
+ }
+
+ // Assert that no exception occurred
+ expect(exceptionOccurred).toBe(false);
+ });
+
+ test("test HNSW index creation after initialization", async () => {
+ const tableNameTest = "TEST_TABLE_HNSW_INDEX_AFTER_INIT";
+
+ // Clean up: drop the table if it exists
+ await dropTable(client, tableNameTest);
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+ // Initialize HanaDB without adding documents yet
+ const vectorDB = new HanaDB(embeddings, args);
+ await vectorDB.initialize();
+ expect(vectorDB).toBeDefined();
+ // Create HNSW index before adding any documents
+ await vectorDB.createHnswIndex({
+ indexName: "index_pre_add",
+ efSearch: 400,
+ m: 50,
+ efConstruction: 150,
+ });
+
+ // Add texts after index creation
+ await vectorDB.addDocuments([
+ {
+ pageContent: "Bye bye",
+ metadata: { id: 2, name: "2" },
+ },
+ {
+ pageContent: "Hello world",
+ metadata: { id: 1, name: "1" },
+ },
+ {
+ pageContent: "hello nice world",
+ metadata: { id: 3, name: "3" },
+ },
+ ]);
+
+ const results = await vectorDB.similaritySearch("Hello world", 1);
+ expect(results).toHaveLength(1);
+ expect(results).toEqual([
+ new Document({
+ pageContent: "Hello world",
+ metadata: { id: 1, name: "1" },
+ }),
+ ]);
+ });
+
+ test("test duplicate HNSW index creation", async () => {
+ const tableNameTest = "TEST_TABLE_HNSW_DUPLICATE_INDEX";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+ // Clean up: drop the table if it exists
+ await dropTable(client, tableNameTest);
+
+ // Create HanaDB instance and add data
+ const vectorDB = await HanaDB.fromTexts(
+ ["foo", "bar", "baz"],
+ {},
+ embeddings,
+ args
+ );
+
+ // Create HNSW index for the first time
+ await vectorDB.createHnswIndex({
+ indexName: "index_cosine",
+ efSearch: 300,
+ m: 80,
+ efConstruction: 100,
+ });
+
+ // Trying to create the same index again should raise an exception
+ await expect(
+ vectorDB.createHnswIndex({
+ efSearch: 300,
+ m: 80,
+ efConstruction: 100,
+ })
+ ).rejects.toThrow();
+ });
+
+ test("test HNSW index creation with invalid m value", async () => {
+ /**
+ * Description:
+ * This test ensures that the HNSW index creation throws an error when an invalid value for m is passed
+ * (e.g., m < 4 or m > 1000).
+ */
+ const tableNameTest = "TEST_TABLE_HNSW_INVALID_M";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+
+ // Cleanup: Drop table if exists
+ await dropTable(client, tableNameTest);
+
+ // Create HanaDB instance and add data
+ const vector = await HanaDB.fromTexts(
+ ["foo", "bar", "baz"],
+ {},
+ embeddings,
+ args
+ );
+
+ let exceptionOccurred = false;
+ try {
+ // Call the createHnswIndex function with invalid m value
+ await vector.createHnswIndex({
+ m: 2, // Invalid value for m (should be >= 4)
+ });
+ } catch (error) {
+ exceptionOccurred = true;
+ }
+
+ // Assert that exception occurred
+ expect(exceptionOccurred).toBe(true);
+ });
+
+ test("test HNSW index creation with invalid efConstruction value", async () => {
+ /**
+ * Description:
+ * This test ensures that the HNSW index creation throws an error when an invalid efConstruction value is passed
+ * (e.g., efConstruction > 100000).
+ */
+ const tableNameTest = "TEST_TABLE_HNSW_INVALID_EF_CONSTRUCTION";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+
+ // Cleanup: Drop table if exists
+ await dropTable(client, tableNameTest);
+
+ // Create HanaDB instance and add data
+ const vector = await HanaDB.fromTexts(
+ ["foo", "bar", "baz"],
+ {},
+ embeddings,
+ args
+ );
+
+ let exceptionOccurred = false;
+ try {
+ // Call the createHnswIndex function with invalid efConstruction value
+ await vector.createHnswIndex({
+ efConstruction: 100001, // Invalid value for efConstruction (should be <= 100000)
+ });
+ } catch (error) {
+ exceptionOccurred = true;
+ }
+
+ // Assert that exception occurred
+ expect(exceptionOccurred).toBe(true);
+ });
+
+ test("test HNSW index creation with invalid efSearch value", async () => {
+ /**
+ * Description:
+ * This test ensures that the HNSW index creation throws an error when an invalid efSearch value is passed
+ * (e.g., efSearch < 1 or efSearch > 100000).
+ */
+ const tableNameTest = "TEST_TABLE_HNSW_INVALID_EF_SEARCH";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+
+ // Cleanup: Drop table if exists
+ await dropTable(client, tableNameTest);
+
+ // Create HanaDB instance and add data
+ const vector = await HanaDB.fromTexts(
+ ["foo", "bar", "baz"],
+ {},
+ embeddings,
+ args
+ );
+
+ let exceptionOccurred = false;
+ try {
+ // Call the createHnswIndex function with invalid efSearch value
+ await vector.createHnswIndex({
+ efSearch: 0, // Invalid value for efSearch (should be >= 1)
+ });
+ } catch (error) {
+ exceptionOccurred = true;
+ }
+
+ // Assert that exception occurred
+ expect(exceptionOccurred).toBe(true);
+ });
+});
+
+describe("Filter Tests", () => {
+ // Filter Test 1: Applying various filters from TYPE_1_FILTERING_TEST_CASES
+ it.each(TYPE_1_FILTERING_TEST_CASES)(
+ "should apply type 1 filtering correctly with filter %j",
+ async (testCase) => {
+ const { filter, expected } = testCase;
+ const tableNameTest = "TEST_TABLE_ENHANCED_FILTER_1";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+ await dropTable(client, tableNameTest);
+
+ // Initialize the HanaDB instance
+ const vectorDB = new HanaDB(embeddings, args);
+ await vectorDB.initialize();
+ expect(vectorDB).toBeDefined();
+
+ // Add documents to the database
+ await vectorDB.addDocuments(DOCUMENTS);
+
+ // Perform a similarity search with the filter
+ const docs = await vectorDB.similaritySearch("Foo", 5, filter);
+ const ids = docs.map((doc) => doc.metadata.id);
+
+ // Check if the returned document IDs match the expected IDs
+ expect(ids.length).toBe(expected.length);
+ expect(ids.every((id) => expected.includes(id))).toBe(true);
+ }
+ );
+
+ // Filter Test 2: Testing TYPE_2_FILTERING_TEST_CASES
+ it.each(TYPE_2_FILTERING_TEST_CASES)(
+ "should apply type 2 filtering correctly with filter %j",
+ async (testCase) => {
+ const { filter, expected } = testCase;
+ const tableNameTest = "TEST_TABLE_ENHANCED_FILTER_2";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+ await dropTable(client, tableNameTest);
+
+ // Initialize the HanaDB instance
+ const vectorDB = new HanaDB(embeddings, args);
+ await vectorDB.initialize();
+ expect(vectorDB).toBeDefined();
+
+ // Add documents to the database
+ await vectorDB.addDocuments(DOCUMENTS);
+
+ // Perform a similarity search with the filter
+ const docs = await vectorDB.similaritySearch("Foo", 5, filter);
+ const ids = docs.map((doc) => doc.metadata.id);
+
+ // Check if the returned document IDs match the expected IDs
+ expect(ids.length).toBe(expected.length);
+ expect(ids.every((id) => expected.includes(id))).toBe(true);
+ }
+ );
+
+ // Filter Test 3: Testing TYPE_3_FILTERING_TEST_CASES
+ it.each(TYPE_3_FILTERING_TEST_CASES)(
+ "should apply type 3 filtering correctly with filter %j",
+ async (testCase) => {
+ const { filter, expected } = testCase;
+ const tableNameTest = "TEST_TABLE_ENHANCED_FILTER_3";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+ await dropTable(client, tableNameTest);
+
+ // Initialize the HanaDB instance
+ const vectorDB = new HanaDB(embeddings, args);
+ await vectorDB.initialize();
+ expect(vectorDB).toBeDefined();
+
+ // Add documents to the database
+ await vectorDB.addDocuments(DOCUMENTS);
+
+ // Perform a similarity search with the filter
+ const docs = await vectorDB.similaritySearch("Foo", 5, filter);
+ const ids = docs.map((doc) => doc.metadata.id);
+
+ // Check if the returned document IDs match the expected IDs
+ expect(ids.length).toBe(expected.length);
+ expect(ids.every((id) => expected.includes(id))).toBe(true);
+ }
+ );
+
+ // Filter Test 4: Testing TYPE_4_FILTERING_TEST_CASES
+ it.each(TYPE_4_FILTERING_TEST_CASES)(
+ "should apply type 4 filtering correctly with filter %j",
+ async (testCase) => {
+ const { filter, expected } = testCase;
+ const tableNameTest = "TEST_TABLE_ENHANCED_FILTER_4";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+ await dropTable(client, tableNameTest);
+
+ // Initialize the HanaDB instance
+ const vectorDB = new HanaDB(embeddings, args);
+ await vectorDB.initialize();
+ expect(vectorDB).toBeDefined();
+
+ // Add documents to the database
+ await vectorDB.addDocuments(DOCUMENTS);
+
+ // Perform a similarity search with the filter
+ const docs = await vectorDB.similaritySearch("Foo", 5, filter);
+ const ids = docs.map((doc) => doc.metadata.id);
+
+ // Check if the returned document IDs match the expected IDs
+ expect(ids.length).toBe(expected.length);
+ expect(ids.every((id) => expected.includes(id))).toBe(true);
+ }
+ );
+
+ // Filter Test 5: Testing TYPE_4_FILTERING_TEST_CASES
+ it.each(TYPE_5_FILTERING_TEST_CASES)(
+ "should apply type 5 filtering correctly with filter %j",
+ async (testCase) => {
+ const { filter, expected } = testCase;
+ const tableNameTest = "TEST_TABLE_ENHANCED_FILTER_5";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+ await dropTable(client, tableNameTest);
+
+ // Initialize the HanaDB instance
+ const vectorDB = new HanaDB(embeddings, args);
+ await vectorDB.initialize();
+ expect(vectorDB).toBeDefined();
+
+ // Add documents to the database
+ await vectorDB.addDocuments(DOCUMENTS);
+
+ // Perform a similarity search with the filter
+ const docs = await vectorDB.similaritySearch("Foo", 5, filter);
+ const ids = docs.map((doc) => doc.metadata.id);
+
+ // Check if the returned document IDs match the expected IDs
+ expect(ids.length).toBe(expected.length);
+ expect(ids.every((id) => expected.includes(id))).toBe(true);
+ }
+ );
+
+ // Filter Test 6: Testing TYPE_6_FILTERING_TEST_CASES
+ it.each(TYPE_6_FILTERING_TEST_CASES)(
+ "should apply type 6 filtering correctly with filter %j",
+ async (testCase) => {
+ const { filter, expected } = testCase;
+ const tableNameTest = "TEST_TABLE_ENHANCED_FILTER_6";
+ const args = {
+ connection: client,
+ tableName: tableNameTest,
+ };
+ await dropTable(client, tableNameTest);
+
+ // Initialize the HanaDB instance
+ const vectorDB = new HanaDB(embeddings, args);
+ await vectorDB.initialize();
+ expect(vectorDB).toBeDefined();
+
+ // Add documents to the database
+ await vectorDB.addDocuments(DOCUMENTS);
+
+ // Perform a similarity search with the filter
+ const docs = await vectorDB.similaritySearch("Foo", 5, filter);
+ console.log(docs);
+ const ids = docs.map((doc) => doc.metadata.id);
+
+ // Check if the returned document IDs match the expected IDs
+ expect(ids.length).toBe(expected.length);
+ expect(ids.every((id) => expected.includes(id))).toBe(true);
+ }
+ );
+});
diff --git a/libs/langchain-community/src/vectorstores/tests/hanavector.test.ts b/libs/langchain-community/src/vectorstores/tests/hanavector.test.ts
index 67221de543b3..9fda0b6e5434 100644
--- a/libs/langchain-community/src/vectorstores/tests/hanavector.test.ts
+++ b/libs/langchain-community/src/vectorstores/tests/hanavector.test.ts
@@ -7,7 +7,7 @@ describe("Sanity check tests", () => {
HanaDB.sanitizeInt("HUGO");
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (error: any) {
- expect(error.message).toContain("must not be smaller than -1");
+ expect(error.message).toContain("must not be smaller than 0");
}
});
@@ -17,13 +17,13 @@ describe("Sanity check tests", () => {
});
it("should sanitize int with negative values", () => {
- expect(HanaDB.sanitizeInt(-1)).toBe(-1);
- expect(HanaDB.sanitizeInt("-1")).toBe(-1);
+ expect(HanaDB.sanitizeInt(-1, -1)).toBe(-1);
+ expect(HanaDB.sanitizeInt("-1", -1)).toBe(-1);
});
it("should sanitize int with illegal negative value", () => {
try {
- HanaDB.sanitizeInt(-2);
+ HanaDB.sanitizeInt(-2, -1);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (error: any) {
expect(error.message).toContain("must not be smaller than -1");
diff --git a/libs/langchain-google-common/src/utils/gemini.ts b/libs/langchain-google-common/src/utils/gemini.ts
index cc8e994efec6..e6d0f6e96001 100644
--- a/libs/langchain-google-common/src/utils/gemini.ts
+++ b/libs/langchain-google-common/src/utils/gemini.ts
@@ -231,10 +231,10 @@ export function getGeminiAPI(config?: GeminiAPIConfig): GoogleAIAPI {
throw new Error("Missing Image URL");
}
- const mineTypeAndData = extractMimeType(url);
- if (mineTypeAndData) {
+ const mimeTypeAndData = extractMimeType(url);
+ if (mimeTypeAndData) {
return {
- inlineData: mineTypeAndData,
+ inlineData: mimeTypeAndData,
};
} else {
// FIXME - need some way to get mime type
diff --git a/libs/langchain-google-genai/package.json b/libs/langchain-google-genai/package.json
index 4188867ca254..5bf48183e567 100644
--- a/libs/langchain-google-genai/package.json
+++ b/libs/langchain-google-genai/package.json
@@ -1,6 +1,6 @@
{
"name": "@langchain/google-genai",
- "version": "0.1.4",
+ "version": "0.1.5",
"description": "Google Generative AI integration for LangChain.js",
"type": "module",
"engines": {
diff --git a/libs/langchain-google-genai/src/chat_models.ts b/libs/langchain-google-genai/src/chat_models.ts
index 6fc5433babe3..93f5dfa9c26f 100644
--- a/libs/langchain-google-genai/src/chat_models.ts
+++ b/libs/langchain-google-genai/src/chat_models.ts
@@ -7,6 +7,9 @@ import {
GenerateContentRequest,
SafetySetting,
Part as GenerativeAIPart,
+ ModelParams,
+ RequestOptions,
+ type CachedContent,
} from "@google/generative-ai";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import {
@@ -180,6 +183,15 @@ export interface GoogleGenerativeAIChatInput
* @default false
*/
json?: boolean;
+
+ /**
+ * Whether or not model supports system instructions.
+ * The following models support system instructions:
+ * - All Gemini 1.5 Pro model versions
+ * - All Gemini 1.5 Flash model versions
+ * - Gemini 1.0 Pro version gemini-1.0-pro-002
+ */
+ convertSystemMessageToHumanContent?: boolean | undefined;
}
/**
@@ -563,6 +575,8 @@ export class ChatGoogleGenerativeAI
streamUsage = true;
+ convertSystemMessageToHumanContent: boolean | undefined;
+
private client: GenerativeModel;
get _isMultimodalModel() {
@@ -651,6 +665,44 @@ export class ChatGoogleGenerativeAI
this.streamUsage = fields?.streamUsage ?? this.streamUsage;
}
+ useCachedContent(
+ cachedContent: CachedContent,
+ modelParams?: ModelParams,
+ requestOptions?: RequestOptions
+ ): void {
+ if (!this.apiKey) return;
+ this.client = new GenerativeAI(
+ this.apiKey
+ ).getGenerativeModelFromCachedContent(
+ cachedContent,
+ modelParams,
+ requestOptions
+ );
+ }
+
+ get useSystemInstruction(): boolean {
+ return typeof this.convertSystemMessageToHumanContent === "boolean"
+ ? !this.convertSystemMessageToHumanContent
+ : this.computeUseSystemInstruction;
+ }
+
+ get computeUseSystemInstruction(): boolean {
+ // This works on models from April 2024 and later
+ // Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later
+ // AI Studio: gemini-1.5-pro-latest
+ if (this.modelName === "gemini-1.0-pro-001") {
+ return false;
+ } else if (this.modelName.startsWith("gemini-pro-vision")) {
+ return false;
+ } else if (this.modelName.startsWith("gemini-1.0-pro-vision")) {
+ return false;
+ } else if (this.modelName === "gemini-pro") {
+ // on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001
+ return false;
+ }
+ return true;
+ }
+
getLsParams(options: this["ParsedCallOptions"]): LangSmithParams {
return {
ls_provider: "google_genai",
@@ -706,8 +758,15 @@ export class ChatGoogleGenerativeAI
): Promise {
const prompt = convertBaseMessagesToContent(
messages,
- this._isMultimodalModel
+ this._isMultimodalModel,
+ this.useSystemInstruction
);
+ let actualPrompt = prompt;
+ if (prompt[0].role === "system") {
+ const [systemInstruction] = prompt;
+ this.client.systemInstruction = systemInstruction;
+ actualPrompt = prompt.slice(1);
+ }
const parameters = this.invocationParams(options);
// Handle streaming
@@ -734,7 +793,7 @@ export class ChatGoogleGenerativeAI
const res = await this.completionWithRetry({
...parameters,
- contents: prompt,
+ contents: actualPrompt,
});
let usageMetadata: UsageMetadata | undefined;
@@ -770,12 +829,19 @@ export class ChatGoogleGenerativeAI
): AsyncGenerator {
const prompt = convertBaseMessagesToContent(
messages,
- this._isMultimodalModel
+ this._isMultimodalModel,
+ this.useSystemInstruction
);
+ let actualPrompt = prompt;
+ if (prompt[0].role === "system") {
+ const [systemInstruction] = prompt;
+ this.client.systemInstruction = systemInstruction;
+ actualPrompt = prompt.slice(1);
+ }
const parameters = this.invocationParams(options);
const request = {
...parameters,
- contents: prompt,
+ contents: actualPrompt,
};
const stream = await this.caller.callWithOptions(
{ signal: options?.signal },
diff --git a/libs/langchain-google-genai/src/tests/chat_models-extended.int.test.ts b/libs/langchain-google-genai/src/tests/chat_models-extended.int.test.ts
new file mode 100644
index 000000000000..935346531999
--- /dev/null
+++ b/libs/langchain-google-genai/src/tests/chat_models-extended.int.test.ts
@@ -0,0 +1,141 @@
+/* eslint-disable no-process-env */
+import { test, expect } from "@jest/globals";
+import { z } from "zod";
+import { ChatGoogleGenerativeAI } from "../chat_models.js";
+
+const baseSchema = z.object({
+ name: z.string(),
+ age: z.number(),
+});
+
+test("Google AI - Generate structured output without errors", async () => {
+ const model = new ChatGoogleGenerativeAI({
+ model: "gemini-1.5-flash",
+ temperature: 0.7,
+ });
+ const structuredLlm = model.withStructuredOutput(baseSchema);
+ const request = "Generate a structured response for a user.";
+ const result = await structuredLlm.invoke(request);
+ console.log("Valid Schema Result:", result);
+ expect(result).toBeDefined();
+ expect(result).toHaveProperty("name");
+ expect(result).toHaveProperty("age");
+});
+
+test("Google AI - Validate nested schema structures", async () => {
+ const schema = z.object({
+ name: z.string(),
+ details: z.object({
+ age: z.number(),
+ address: z.string(),
+ }),
+ });
+ const model = new ChatGoogleGenerativeAI({
+ model: "gemini-1.5-flash",
+ temperature: 0.7,
+ });
+ const structuredLlm = model.withStructuredOutput(schema);
+ const request = "Generate structured data with nested schema.";
+ const result = await structuredLlm.invoke(request);
+ console.log("Nested Schema Result:", result);
+ expect(result).toBeDefined();
+ expect(result.details).toHaveProperty("age");
+ expect(result.details).toHaveProperty("address");
+});
+
+test("Google AI - Handle optional fields in schema", async () => {
+ const schema = z.object({
+ name: z.string(),
+ age: z.number(),
+ email: z.string().optional(),
+ });
+ const model = new ChatGoogleGenerativeAI({
+ model: "gemini-1.5-flash",
+ temperature: 0.7,
+ });
+ const structuredLlm = model.withStructuredOutput(schema);
+ const request = "Generate structured data with optional fields.";
+ const result = await structuredLlm.invoke(request);
+ console.log("Optional Fields Result:", result);
+ expect(result).toBeDefined();
+ expect(result).toHaveProperty("name");
+ expect(result).toHaveProperty("age");
+ expect(result).toHaveProperty("email");
+});
+
+test("Google AI - Validate schema with large payloads", async () => {
+ const schema = z.object({
+ name: z.string(),
+ age: z.number(),
+ address: z.string(),
+ phone: z.string(),
+ email: z.string(),
+ });
+ const model = new ChatGoogleGenerativeAI({
+ model: "gemini-1.5-flash",
+ temperature: 0.7,
+ });
+ const structuredLlm = model.withStructuredOutput(schema);
+ const request = "Generate structured data for a user with many fields.";
+ const result = await structuredLlm.invoke(request);
+ console.log("Large Payload Result:", result);
+ expect(result).toBeDefined();
+ expect(result).toHaveProperty("name");
+ expect(result).toHaveProperty("age");
+ expect(result).toHaveProperty("address");
+ expect(result).toHaveProperty("phone");
+ expect(result).toHaveProperty("email");
+});
+
+test("Google AI - Handle schema with deeply nested structures", async () => {
+ const schema = z.object({
+ user: z.object({
+ id: z.string(),
+ profile: z.object({
+ details: z.object({
+ name: z.string(),
+ age: z.number(),
+ preferences: z.object({
+ favoriteColor: z.string(),
+ hobbies: z.array(z.string()),
+ }),
+ }),
+ }),
+ }),
+ });
+ const model = new ChatGoogleGenerativeAI({
+ model: "gemini-1.5-flash",
+ temperature: 0.7,
+ });
+ const structuredLlm = model.withStructuredOutput(schema);
+ const request = "Generate a deeply nested user profile structure.";
+ const result = await structuredLlm.invoke(request);
+ console.log("Deeply Nested Schema Result:", result);
+ expect(result).toBeDefined();
+ expect(result.user.profile.details.preferences).toHaveProperty(
+ "favoriteColor"
+ );
+ expect(Array.isArray(result.user.profile.details.preferences.hobbies)).toBe(
+ true
+ );
+});
+
+test("Google AI - Handle schema with enum fields", async () => {
+ const schema = z.object({
+ name: z.string(),
+ role: z.enum(["admin", "editor", "viewer"]),
+ });
+ const model = new ChatGoogleGenerativeAI({
+ model: "gemini-1.5-flash",
+ temperature: 0.7,
+ });
+ const structuredLlm = model.withStructuredOutput(schema);
+ const request =
+ "Generate structured data with a name and a role (admin, editor, or viewer).";
+ const result = await structuredLlm.invoke(request);
+ console.log("Enum Fields Result:", result);
+ expect(result).toBeDefined();
+ expect(result).toHaveProperty("name");
+ expect(result).toHaveProperty("role");
+ expect(["admin", "editor", "viewer"]).toContain(result.role);
+});
diff --git a/libs/langchain-google-genai/src/tests/chat_models.test.ts b/libs/langchain-google-genai/src/tests/chat_models.test.ts
index 97015725fc7e..73cc321abd7c 100644
--- a/libs/langchain-google-genai/src/tests/chat_models.test.ts
+++ b/libs/langchain-google-genai/src/tests/chat_models.test.ts
@@ -253,3 +253,181 @@ test("convertBaseMessagesToContent correctly creates properly formatted content"
},
]);
});
+
+test("Input has single system message followed by one user message, convert system message is false", async () => {
+ const messages = [
+ new SystemMessage("You are a helpful assistant"),
+ new HumanMessage("What's the weather like in new york?"),
+ ];
+ const messagesAsGoogleContent = convertBaseMessagesToContent(
+ messages,
+ false,
+ false
+ );
+
+ expect(messagesAsGoogleContent).toEqual([
+ {
+ role: "user",
+ parts: [
+ { text: "You are a helpful assistant" },
+ { text: "What's the weather like in new york?" },
+ ],
+ },
+ ]);
+});
+
+test("Input has a system message that is not the first message, convert system message is false", async () => {
+ const messages = [
+ new HumanMessage("What's the weather like in new york?"),
+ new SystemMessage("You are a helpful assistant"),
+ ];
+ expect(() => {
+ convertBaseMessagesToContent(messages, false, false);
+ }).toThrow("System message should be the first one");
+});
+
+test("Input has multiple system messages, convert system message is false", async () => {
+ const messages = [
+ new SystemMessage("You are a helpful assistant"),
+ new SystemMessage("You are not a helpful assistant"),
+ ];
+ expect(() => {
+ convertBaseMessagesToContent(messages, false, false);
+ }).toThrow("System message should be the first one");
+});
+
+test("Input has no system message and one user message, convert system message is false", async () => {
+ const messages = [new HumanMessage("What's the weather like in new york?")];
+ const messagesAsGoogleContent = convertBaseMessagesToContent(
+ messages,
+ false,
+ false
+ );
+
+ expect(messagesAsGoogleContent).toEqual([
+ {
+ role: "user",
+ parts: [{ text: "What's the weather like in new york?" }],
+ },
+ ]);
+});
+
+test("Input has no system message and multiple user message, convert system message is false", async () => {
+ const messages = [
+ new HumanMessage("What's the weather like in new york?"),
+ new HumanMessage("What's the weather like in toronto?"),
+ new HumanMessage("What's the weather like in los angeles?"),
+ ];
+ const messagesAsGoogleContent = convertBaseMessagesToContent(
+ messages,
+ false,
+ false
+ );
+
+ expect(messagesAsGoogleContent).toEqual([
+ {
+ role: "user",
+ parts: [{ text: "What's the weather like in new york?" }],
+ },
+ {
+ role: "user",
+ parts: [{ text: "What's the weather like in toronto?" }],
+ },
+ {
+ role: "user",
+ parts: [{ text: "What's the weather like in los angeles?" }],
+ },
+ ]);
+});
+
+test("Input has single system message followed by one user message, convert system message is true", async () => {
+ const messages = [
+ new SystemMessage("You are a helpful assistant"),
+ new HumanMessage("What's the weather like in new york?"),
+ ];
+
+ const messagesAsGoogleContent = convertBaseMessagesToContent(
+ messages,
+ false,
+ true
+ );
+
+ expect(messagesAsGoogleContent).toEqual([
+ {
+ role: "system",
+ parts: [{ text: "You are a helpful assistant" }],
+ },
+ {
+ role: "user",
+ parts: [{ text: "What's the weather like in new york?" }],
+ },
+ ]);
+});
+
+test("Input has single system message that is not the first message, convert system message is true", async () => {
+ const messages = [
+ new HumanMessage("What's the weather like in new york?"),
+ new SystemMessage("You are a helpful assistant"),
+ ];
+
+ expect(() => convertBaseMessagesToContent(messages, false, true)).toThrow(
+ "System message should be the first one"
+ );
+});
+
+test("Input has multiple system message, convert system message is true", async () => {
+ const messages = [
+ new SystemMessage("What's the weather like in new york?"),
+ new SystemMessage("You are a helpful assistant"),
+ ];
+
+ expect(() => convertBaseMessagesToContent(messages, false, true)).toThrow(
+ "System message should be the first one"
+ );
+});
+
+test("Input has no system message and one user message, convert system message is true", async () => {
+ const messages = [new HumanMessage("What's the weather like in new york?")];
+
+ const messagesAsGoogleContent = convertBaseMessagesToContent(
+ messages,
+ false,
+ true
+ );
+
+ expect(messagesAsGoogleContent).toEqual([
+ {
+ role: "user",
+ parts: [{ text: "What's the weather like in new york?" }],
+ },
+ ]);
+});
+
+test("Input has no system message and multiple user messages, convert system message is true", async () => {
+ const messages = [
+ new HumanMessage("What's the weather like in new york?"),
+ new HumanMessage("Will it rain today?"),
+ new HumanMessage("How about next week?"),
+ ];
+
+ const messagesAsGoogleContent = convertBaseMessagesToContent(
+ messages,
+ false,
+ true
+ );
+
+ expect(messagesAsGoogleContent).toEqual([
+ {
+ role: "user",
+ parts: [{ text: "What's the weather like in new york?" }],
+ },
+ {
+ role: "user",
+ parts: [{ text: "Will it rain today?" }],
+ },
+ {
+ role: "user",
+ parts: [{ text: "How about next week?" }],
+ },
+ ]);
+});
diff --git a/libs/langchain-google-genai/src/tests/context_caching.int.test.ts b/libs/langchain-google-genai/src/tests/context_caching.int.test.ts
new file mode 100644
index 000000000000..5f007929fc12
--- /dev/null
+++ b/libs/langchain-google-genai/src/tests/context_caching.int.test.ts
@@ -0,0 +1,84 @@
+/* eslint-disable no-process-env */
+
+import { test } from "@jest/globals";
+
+import { fileURLToPath } from "node:url";
+import * as path from "node:path";
+
+import {
+ FileState,
+ UploadFileResponse,
+ GoogleAIFileManager,
+ GoogleAICacheManager,
+} from "@google/generative-ai/server";
+import { ChatGoogleGenerativeAI } from "../chat_models.js";
+
+const model = new ChatGoogleGenerativeAI({});
+let fileResult: UploadFileResponse;
+
+beforeAll(async () => {
+ // Download video file and save in src/tests/data
+ // curl -O https://storage.googleapis.com/generativeai-downloads/data/Sherlock_Jr_FullMovie.mp4
+ const displayName = "Sherlock Jr. video";
+
+ const filename = fileURLToPath(import.meta.url);
+ const dirname = path.dirname(filename);
+ const pathToVideoFile = path.join(dirname, "/data/Sherlock_Jr_FullMovie.mp4");
+
+ const contextCache = new GoogleAICacheManager(
+ process.env.GOOGLE_API_KEY || ""
+ );
+ const fileCache = new GoogleAIFileManager(process.env.GOOGLE_API_KEY || "");
+ fileResult = await fileCache.uploadFile(pathToVideoFile, {
+ displayName,
+ mimeType: "video/mp4",
+ });
+
+ const { name } = fileResult.file;
+
+ // Poll getFile() on a set interval (2 seconds here) to check file state.
+ let file = await fileCache.getFile(name);
+ while (file.state === FileState.PROCESSING) {
+ // Sleep for 2 seconds
+ await new Promise((resolve) => {
+ setTimeout(resolve, 2_000);
+ });
+ file = await fileCache.getFile(name);
+ }
+
+ const systemInstruction =
+ "You are an expert video analyzer, and your job is to answer " +
+ "the user's query based on the video file you have access to.";
+ const cachedContent = await contextCache.create({
+ model: "models/gemini-1.5-flash-001",
+ displayName: "gettysburg audio",
+ systemInstruction,
+ contents: [
+ {
+ role: "user",
+ parts: [
+ {
+ fileData: {
+ mimeType: fileResult.file.mimeType,
+ fileUri: fileResult.file.uri,
+ },
+ },
+ ],
+ },
+ ],
+ ttlSeconds: 300,
+ });
+
+ model.useCachedContent(cachedContent);
+}, 10 * 60 * 1000); // Set timeout to 10 minutes to upload file
+
+test("Test Google AI", async () => {
+ const res = await model.invoke(
+ "Introduce different characters in the movie by describing " +
+ "their personality, looks, and names. Also list the " +
+ "timestamps they were introduced for the first time."
+ );
+
+ console.log(res);
+ expect(res).toBeTruthy();
+});
diff --git a/libs/langchain-google-genai/src/utils/common.ts b/libs/langchain-google-genai/src/utils/common.ts
index 2670760c7115..e2e07f3c61df 100644
--- a/libs/langchain-google-genai/src/utils/common.ts
+++ b/libs/langchain-google-genai/src/utils/common.ts
@@ -61,6 +61,7 @@ export function convertAuthorToRole(
case "model": // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
return "model";
case "system":
+ return "system";
case "human":
return "user";
case "tool":
@@ -179,7 +180,8 @@ export function convertMessageContentToParts(
export function convertBaseMessagesToContent(
messages: BaseMessage[],
- isMultimodalModel: boolean
+ isMultimodalModel: boolean,
+ convertSystemMessageToHumanContent: boolean = false
) {
return messages.reduce<{
content: Content[];
@@ -223,7 +225,10 @@ export function convertBaseMessagesToContent(
};
}
let actualRole = role;
- if (actualRole === "function") {
+ if (
+ actualRole === "function" ||
+ (actualRole === "system" && !convertSystemMessageToHumanContent)
+ ) {
// GenerativeAI API will throw an error if the role is not "user" or "model."
actualRole = "user";
}
@@ -232,7 +237,8 @@ export function convertBaseMessagesToContent(
parts,
};
return {
- mergeWithPreviousContent: author === "system",
+ mergeWithPreviousContent:
+ author === "system" && !convertSystemMessageToHumanContent,
content: [...acc.content, content],
};
},
diff --git a/yarn.lock b/yarn.lock
index 5e6b6ed60a57..1a6472c33e59 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -6032,6 +6032,17 @@ __metadata:
languageName: node
linkType: hard
+"@azure/core-auth@npm:^1.7.1, @azure/core-auth@npm:^1.8.0, @azure/core-auth@npm:^1.9.0":
+ version: 1.9.0
+ resolution: "@azure/core-auth@npm:1.9.0"
+ dependencies:
+ "@azure/abort-controller": ^2.0.0
+ "@azure/core-util": ^1.11.0
+ tslib: ^2.6.2
+ checksum: 4050112188db093c5e01caca0175708c767054c0cea4202430ff43ee42a16430235752ccc0002caea1796c8f01b4f6369c878762bf4c1b2f61af1b7ac13182fc
+ languageName: node
+ linkType: hard
+
"@azure/core-client@npm:^1.3.0":
version: 1.7.3
resolution: "@azure/core-client@npm:1.7.3"
@@ -6132,19 +6143,19 @@ __metadata:
languageName: node
linkType: hard
-"@azure/core-rest-pipeline@npm:^1.2.0":
- version: 1.16.2
- resolution: "@azure/core-rest-pipeline@npm:1.16.2"
+"@azure/core-rest-pipeline@npm:^1.15.1, @azure/core-rest-pipeline@npm:^1.17.0":
+ version: 1.18.0
+ resolution: "@azure/core-rest-pipeline@npm:1.18.0"
dependencies:
"@azure/abort-controller": ^2.0.0
- "@azure/core-auth": ^1.4.0
+ "@azure/core-auth": ^1.8.0
"@azure/core-tracing": ^1.0.1
- "@azure/core-util": ^1.9.0
+ "@azure/core-util": ^1.11.0
"@azure/logger": ^1.0.0
http-proxy-agent: ^7.0.0
https-proxy-agent: ^7.0.0
tslib: ^2.6.2
- checksum: b30bfdb7c49435c5f7c9493be8cd39d2d7a5bc24de4e7a772336f14f607517866d4bd0c97d15672f6aa2b630d27bd22b3561933cde1c415aa2e63ba6c18289b1
+ checksum: 4c8e6572938fd693494ec44477b58afa7c16aed7ea8ef061fcc0cf8a8e602d7ea07676f46b8c850d38e04e5ac4ab10888f88bce8ffac6db1bd3b77bf07a07f29
languageName: node
linkType: hard
@@ -6176,6 +6187,15 @@ __metadata:
languageName: node
linkType: hard
+"@azure/core-tracing@npm:^1.1.1":
+ version: 1.2.0
+ resolution: "@azure/core-tracing@npm:1.2.0"
+ dependencies:
+ tslib: ^2.6.2
+ checksum: 202ebf411a3076bd2c48b7a4c1b63335f53be6dd97f7d53500e3191b7ed0fdad25de219f422e777fde824031fd5c67087654de0304a5c0cd67c38cdcab96117c
+ languageName: node
+ linkType: hard
+
"@azure/core-util@npm:^1.0.0, @azure/core-util@npm:^1.1.0, @azure/core-util@npm:^1.3.0, @azure/core-util@npm:^1.4.0":
version: 1.6.1
resolution: "@azure/core-util@npm:1.6.1"
@@ -6196,7 +6216,17 @@ __metadata:
languageName: node
linkType: hard
-"@azure/core-util@npm:^1.6.1, @azure/core-util@npm:^1.9.0":
+"@azure/core-util@npm:^1.11.0, @azure/core-util@npm:^1.8.1":
+ version: 1.11.0
+ resolution: "@azure/core-util@npm:1.11.0"
+ dependencies:
+ "@azure/abort-controller": ^2.0.0
+ tslib: ^2.6.2
+ checksum: 91e3ec329d9eddaa66be5efb1785dad68dcb48dd779fca36e39db041673230510158ff5ca9ccef9f19c3e4d8e9af29f66a367cfc31a7b94d2541f80ef94ec797
+ languageName: node
+ linkType: hard
+
+"@azure/core-util@npm:^1.6.1":
version: 1.9.2
resolution: "@azure/core-util@npm:1.9.2"
dependencies:
@@ -6206,28 +6236,25 @@ __metadata:
languageName: node
linkType: hard
-"@azure/cosmos@npm:4.0.1-beta.3":
- version: 4.0.1-beta.3
- resolution: "@azure/cosmos@npm:4.0.1-beta.3"
+"@azure/cosmos@npm:^4.2.0":
+ version: 4.2.0
+ resolution: "@azure/cosmos@npm:4.2.0"
dependencies:
- "@azure/abort-controller": ^1.0.0
- "@azure/core-auth": ^1.3.0
- "@azure/core-rest-pipeline": ^1.2.0
- "@azure/core-tracing": ^1.0.0
- debug: ^4.1.1
+ "@azure/abort-controller": ^2.0.0
+ "@azure/core-auth": ^1.7.1
+ "@azure/core-rest-pipeline": ^1.15.1
+ "@azure/core-tracing": ^1.1.1
+ "@azure/core-util": ^1.8.1
fast-json-stable-stringify: ^2.1.0
- jsbi: ^3.1.3
- node-abort-controller: ^3.0.0
+ jsbi: ^4.3.0
priorityqueuejs: ^2.0.0
- semaphore: ^1.0.5
- tslib: ^2.2.0
- universal-user-agent: ^6.0.0
- uuid: ^8.3.0
- checksum: 5223ba77195030898a3aa201f7dbf2c5d99be4f63cefa93c3542c4122d1ad36f3bab22a4113dba961b3c878d7b2b63ee52a269ada35473ebcd2c42c7643ca5a8
+ semaphore: ^1.1.0
+ tslib: ^2.6.2
+ checksum: b571f5a99b12520a2128b8ed0eb61cd66c432e21f533e778cd54a508e89b8bd57e8e05eedc1dcfdb4417c91a675bdb63d6c1cfcd9a21895d444e51de80288f33
languageName: node
linkType: hard
-"@azure/identity@npm:^4.2.0, @azure/identity@npm:^4.2.1":
+"@azure/identity@npm:^4.2.1":
version: 4.4.1
resolution: "@azure/identity@npm:4.4.1"
dependencies:
@@ -6249,6 +6276,28 @@ __metadata:
languageName: node
linkType: hard
+"@azure/identity@npm:^4.5.0":
+ version: 4.5.0
+ resolution: "@azure/identity@npm:4.5.0"
+ dependencies:
+ "@azure/abort-controller": ^2.0.0
+ "@azure/core-auth": ^1.9.0
+ "@azure/core-client": ^1.9.2
+ "@azure/core-rest-pipeline": ^1.17.0
+ "@azure/core-tracing": ^1.0.0
+ "@azure/core-util": ^1.11.0
+ "@azure/logger": ^1.0.0
+ "@azure/msal-browser": ^3.26.1
+ "@azure/msal-node": ^2.15.0
+ events: ^3.0.0
+ jws: ^4.0.0
+ open: ^8.0.0
+ stoppable: ^1.1.0
+ tslib: ^2.2.0
+ checksum: 07d15898f194a220376d8d9c0ee891c93c6da188e44e76810fb781bf3bb7424498a6c1fa5b92c5a4d31f62b7398953f8a5bcf0f0ed57ed72239ce1c4f594b355
+ languageName: node
+ linkType: hard
+
"@azure/logger@npm:^1.0.0, @azure/logger@npm:^1.0.3":
version: 1.0.4
resolution: "@azure/logger@npm:1.0.4"
@@ -6267,6 +6316,15 @@ __metadata:
languageName: node
linkType: hard
+"@azure/msal-browser@npm:^3.26.1":
+ version: 3.27.0
+ resolution: "@azure/msal-browser@npm:3.27.0"
+ dependencies:
+ "@azure/msal-common": 14.16.0
+ checksum: 22c7d087380405f87139a7dfa579b8a49a17d5493e748e1e609f5733bb7549dd5b8558d709f81500f8faa3feebbc2245f8978adc96dc2ce84c54825b37301465
+ languageName: node
+ linkType: hard
+
"@azure/msal-common@npm:14.14.0":
version: 14.14.0
resolution: "@azure/msal-common@npm:14.14.0"
@@ -6274,6 +6332,24 @@ __metadata:
languageName: node
linkType: hard
+"@azure/msal-common@npm:14.16.0":
+ version: 14.16.0
+ resolution: "@azure/msal-common@npm:14.16.0"
+ checksum: 01ec26e22243c5c435b97db085e96f5488733336c142b65a118ee6e523a548d3f17d013147810948cceaee7bdc339362bb9b2799fc9ea53c9d4c9aa10d8987e3
+ languageName: node
+ linkType: hard
+
+"@azure/msal-node@npm:^2.15.0":
+ version: 2.16.2
+ resolution: "@azure/msal-node@npm:2.16.2"
+ dependencies:
+ "@azure/msal-common": 14.16.0
+ jsonwebtoken: ^9.0.0
+ uuid: ^8.3.0
+ checksum: 3676972cf7e1e91ea60773d7054275534239d209989da4c4c1aa790790ba309a2da58d6c593b6465feb1c7028772fce77757227e7ac9631b3a79e4f5a0a81aab
+ languageName: node
+ linkType: hard
+
"@azure/msal-node@npm:^2.9.2":
version: 2.12.0
resolution: "@azure/msal-node@npm:2.12.0"
@@ -10048,6 +10124,13 @@ __metadata:
languageName: node
linkType: hard
+"@faker-js/faker@npm:8.4.1, @faker-js/faker@npm:^8.4.1":
+ version: 8.4.1
+ resolution: "@faker-js/faker@npm:8.4.1"
+ checksum: d802d531f8929562715adc279cfec763c9a4bc596ec67b0ce43fd0ae61b285d2b0eec6f1f4aa852452a63721a842fe7e81926dce7bd92acca94b01e2a1f55f5a
+ languageName: node
+ linkType: hard
+
"@faker-js/faker@npm:^7.6.0":
version: 7.6.0
resolution: "@faker-js/faker@npm:7.6.0"
@@ -10069,13 +10152,6 @@ __metadata:
languageName: node
linkType: hard
-"@faker-js/faker@npm:^8.4.1":
- version: 8.4.1
- resolution: "@faker-js/faker@npm:8.4.1"
- checksum: d802d531f8929562715adc279cfec763c9a4bc596ec67b0ce43fd0ae61b285d2b0eec6f1f4aa852452a63721a842fe7e81926dce7bd92acca94b01e2a1f55f5a
- languageName: node
- linkType: hard
-
"@fastify/busboy@npm:^1.2.1":
version: 1.2.1
resolution: "@fastify/busboy@npm:1.2.1"
@@ -11471,8 +11547,8 @@ __metadata:
version: 0.0.0-use.local
resolution: "@langchain/azure-cosmosdb@workspace:libs/langchain-azure-cosmosdb"
dependencies:
- "@azure/cosmos": 4.0.1-beta.3
- "@azure/identity": ^4.2.0
+ "@azure/cosmos": ^4.2.0
+ "@azure/identity": ^4.5.0
"@jest/globals": ^29.5.0
"@langchain/core": "workspace:*"
"@langchain/openai": "workspace:^"
@@ -11492,7 +11568,7 @@ __metadata:
eslint-plugin-prettier: ^4.2.1
jest: ^29.5.0
jest-environment-node: ^29.6.4
- mongodb: ^6.8.0
+ mongodb: ^6.10.0
prettier: ^2.8.3
release-it: ^15.10.1
rollup: ^4.5.2
@@ -11709,7 +11785,7 @@ __metadata:
"@cloudflare/workers-types": ^4.20230922.0
"@datastax/astra-db-ts": ^1.0.1
"@elastic/elasticsearch": ^8.4.0
- "@faker-js/faker": ^7.6.0
+ "@faker-js/faker": 8.4.1
"@getmetal/metal-sdk": ^4.0.0
"@getzep/zep-cloud": ^1.0.6
"@getzep/zep-js": ^0.9.0
@@ -11829,7 +11905,7 @@ __metadata:
jsdom: ^22.1.0
jsonwebtoken: ^9.0.2
langchain: ">=0.2.3 <0.3.0 || >=0.3.4 <0.4.0"
- langsmith: ^0.2.0
+ langsmith: ^0.2.8
llmonitor: ^0.5.9
lodash: ^4.17.21
lunary: ^0.7.10
@@ -12263,7 +12339,7 @@ __metadata:
jest: ^29.5.0
jest-environment-node: ^29.6.4
js-tiktoken: ^1.0.12
- langsmith: ^0.2.0
+ langsmith: ^0.2.8
ml-matrix: ^6.10.4
mustache: ^4.2.0
p-queue: ^6.6.2
@@ -27675,7 +27751,7 @@ __metadata:
ioredis: ^5.3.2
js-yaml: ^4.1.0
langchain: "workspace:*"
- langsmith: ^0.2.0
+ langsmith: ^0.2.8
mongodb: ^6.3.0
pg: ^8.11.0
pickleparser: ^0.2.1
@@ -32857,10 +32933,10 @@ __metadata:
languageName: node
linkType: hard
-"jsbi@npm:^3.1.3":
- version: 3.2.5
- resolution: "jsbi@npm:3.2.5"
- checksum: 642d1bb139ad1c1e96c4907eb159565e980a0d168487626b493d0d0b7b341da0e43001089d3b21703fe17b18a7a6c0f42c92026f71d54471ed0a0d1b3015ec0f
+"jsbi@npm:^4.3.0":
+ version: 4.3.0
+ resolution: "jsbi@npm:4.3.0"
+ checksum: 27c4f178eb7fd9d1756144066fdebc62f4a0176e877f55e646e8ce84075c13551bd575a316b9959ccdcca9d5dc05a81c9907cfa09f0cfeb43c9777797e36b0e9
languageName: node
linkType: hard
@@ -33329,7 +33405,7 @@ __metadata:
js-tiktoken: ^1.0.12
js-yaml: ^4.1.0
jsonpointer: ^5.0.1
- langsmith: ^0.2.0
+ langsmith: ^0.2.8
openai: ^4.41.1
openapi-types: ^12.1.3
p-retry: 4
@@ -33408,9 +33484,9 @@ __metadata:
languageName: unknown
linkType: soft
-"langsmith@npm:^0.2.0":
- version: 0.2.0
- resolution: "langsmith@npm:0.2.0"
+"langsmith@npm:^0.2.8":
+ version: 0.2.8
+ resolution: "langsmith@npm:0.2.8"
dependencies:
"@types/uuid": ^10.0.0
commander: ^10.0.1
@@ -33423,7 +33499,7 @@ __metadata:
peerDependenciesMeta:
openai:
optional: true
- checksum: 0cd92d0e31526d309af197a3502c93a00ac8c09f6b2864161a18a5c1e8b95b0e8203bad2dfe3b4beb26055fc815a8d70730592a58c9af7e202917b13d01f695c
+ checksum: 8695df08a09b9885b0308c66fbf9802edbe20e286fec3db8faa75ed1893a7aafae014441e311677bb60abb33af49da7f7d8404f55fffbdad5aec61cf65215fc8
languageName: node
linkType: hard
@@ -34972,12 +35048,12 @@ __metadata:
languageName: node
linkType: hard
-"mongodb@npm:^6.3.0":
- version: 6.3.0
- resolution: "mongodb@npm:6.3.0"
+"mongodb@npm:^6.10.0":
+ version: 6.10.0
+ resolution: "mongodb@npm:6.10.0"
dependencies:
- "@mongodb-js/saslprep": ^1.1.0
- bson: ^6.2.0
+ "@mongodb-js/saslprep": ^1.1.5
+ bson: ^6.7.0
mongodb-connection-string-url: ^3.0.0
peerDependencies:
"@aws-sdk/credential-providers": ^3.188.0
@@ -35002,16 +35078,16 @@ __metadata:
optional: true
socks:
optional: true
- checksum: ebc5d9dbd1299321b6873e86eb4ea635316f97450644811db24ce2b01432b1c641def864facf2eab6f0c0c5c360c318108ea5555142f55177ca4c33991c6d7c4
+ checksum: b8e7ab9fb84181cb020b5fef5fedd90a5fc12140e688fa12ba588d523a958bb9f8790bfaceeca9f594171794eda0f56be855d7d0588705db82b3de7bf5e2352c
languageName: node
linkType: hard
-"mongodb@npm:^6.8.0":
- version: 6.8.0
- resolution: "mongodb@npm:6.8.0"
+"mongodb@npm:^6.3.0":
+ version: 6.3.0
+ resolution: "mongodb@npm:6.3.0"
dependencies:
- "@mongodb-js/saslprep": ^1.1.5
- bson: ^6.7.0
+ "@mongodb-js/saslprep": ^1.1.0
+ bson: ^6.2.0
mongodb-connection-string-url: ^3.0.0
peerDependencies:
"@aws-sdk/credential-providers": ^3.188.0
@@ -35036,7 +35112,7 @@ __metadata:
optional: true
socks:
optional: true
- checksum: 5a744e9bf0f21a6f639d935b807ea4c4502f6c38719413e7c6dbed2323786c347a877e905bfd711259f552b21774a5d9d8a9271c97ed1634804f97f10addd440
+ checksum: ebc5d9dbd1299321b6873e86eb4ea635316f97450644811db24ce2b01432b1c641def864facf2eab6f0c0c5c360c318108ea5555142f55177ca4c33991c6d7c4
languageName: node
linkType: hard
@@ -35331,13 +35407,6 @@ __metadata:
languageName: node
linkType: hard
-"node-abort-controller@npm:^3.0.0":
- version: 3.1.1
- resolution: "node-abort-controller@npm:3.1.1"
- checksum: 2c340916af9710328b11c0828223fc65ba320e0d082214a211311bf64c2891028e42ef276b9799188c4ada9e6e1c54cf7a0b7c05dd9d59fcdc8cd633304c8047
- languageName: node
- linkType: hard
-
"node-addon-api@npm:^3.0.0":
version: 3.2.1
resolution: "node-addon-api@npm:3.2.1"
@@ -40028,7 +40097,7 @@ __metadata:
languageName: node
linkType: hard
-"semaphore@npm:^1.0.5":
+"semaphore@npm:^1.1.0":
version: 1.1.0
resolution: "semaphore@npm:1.1.0"
checksum: d2445d232ad9959048d4748ef54eb01bc7b60436be2b42fb7de20c4cffacf70eafeeecd3772c1baf408cfdce3805fa6618a4389590335671f18cde54ef3cfae4