From 51441b561f264e9f0dbbf698c7889331d5b3076e Mon Sep 17 00:00:00 2001 From: Predrag Gruevski <2348618+obi1kenobi@users.noreply.github.com> Date: Mon, 16 Oct 2023 16:48:52 -0400 Subject: [PATCH 1/4] Mark the docs scripts files as executable. (#2932) --- docs/build.sh | 0 docs/dev.sh | 0 docs/nodemon.sh | 0 docs/serve.sh | 0 4 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 docs/build.sh mode change 100644 => 100755 docs/dev.sh mode change 100644 => 100755 docs/nodemon.sh mode change 100644 => 100755 docs/serve.sh diff --git a/docs/build.sh b/docs/build.sh old mode 100644 new mode 100755 diff --git a/docs/dev.sh b/docs/dev.sh old mode 100644 new mode 100755 diff --git a/docs/nodemon.sh b/docs/nodemon.sh old mode 100644 new mode 100755 diff --git a/docs/serve.sh b/docs/serve.sh old mode 100644 new mode 100755 From 5f53963a3220444ff6a809cc515dab2e71db6f11 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 16 Oct 2023 14:20:24 -0700 Subject: [PATCH 2/4] Add more memory to linter (#2934) --- langchain/package.json | 2 +- package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/langchain/package.json b/langchain/package.json index a5fe2d503ed8..bddf00193a98 100644 --- a/langchain/package.json +++ b/langchain/package.json @@ -701,7 +701,7 @@ "build:esm": "tsc --outDir dist/ && rimraf dist/tests dist/**/tests", "build:cjs": "tsc --outDir dist-cjs/ -p tsconfig.cjs.json && node scripts/move-cjs-to-dist.js && rimraf dist-cjs", "build:watch": "node scripts/create-entrypoints.js && tsc --outDir dist/ --watch", - "lint": "eslint src && dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", + "lint": "NODE_OPTIONS=--max-old-space-size=4096 eslint src && dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint:fix": "yarn lint --fix", "precommit": "lint-staged", "clean": "rimraf dist/ && node scripts/create-entrypoints.js pre", diff --git a/package.json b/package.json index 8dd5f2445016..0771a6178c93 100644 --- a/package.json +++ b/package.json @@ -19,7 +19,7 @@ "build": "turbo run build --filter=\"!docs_skeleton\" --filter=\"!test-exports-*\"", "format": "turbo run format", "format:check": "turbo run format:check", - "lint": "turbo run lint --filter=\"!docs_skeleton\"", + "lint": "turbo run lint --filter=\"!docs_skeleton\" --concurrency 1", "lint:fix": "yarn lint -- --fix", "test": "yarn test:unit && yarn workspace langchain build && yarn test:exports:docker", "test:unit": "turbo run test --filter langchain", From 1f734dbc8f7f3cb66ac8438c01d6403c4dedd587 Mon Sep 17 00:00:00 2001 From: Predrag Gruevski <2348618+obi1kenobi@users.noreply.github.com> Date: Mon, 16 Oct 2023 17:53:09 -0400 Subject: [PATCH 3/4] Update repo path to point to `langchain-ai` GitHub org. (#2933) --- .devcontainer/README.md | 4 ++-- .github/contributing/INTEGRATIONS.md | 20 +++++++++---------- .../integrations/DOCUMENT_LOADERS.md | 4 ++-- .github/contributing/integrations/LLMS.md | 8 ++++---- .github/contributing/integrations/MEMORY.md | 6 +++--- .../integrations/MESSAGE_STORES.md | 8 ++++---- .github/contributing/integrations/TOOLS.md | 4 ++-- .../integrations/VECTOR_STORES.md | 4 ++-- .github/pull_request_template.md | 6 +++--- CONTRIBUTING.md | 10 +++++----- README.md | 6 +++--- docs/docs_skeleton/docs/community.md | 6 +++--- docs/docs_skeleton/docusaurus.config.js | 4 ++-- .../vectorstores/integrations/opensearch.md | 6 +++--- .../use_cases/autonomous_agents/sales_gpt.mdx | 2 +- docs/snippets/get_started/introduction.mdx | 2 +- .../model_io/models/chat/how_to/streaming.mdx | 4 ++-- .../models/llms/how_to/streaming_llm.mdx | 2 +- examples/src/document_loaders/github.ts | 2 +- .../document_loaders/github_ignore_paths.ts | 2 +- .../src/document_loaders/github_submodules.ts | 2 +- langchain/README.md | 8 ++++---- .../document_loaders/tests/github.int.test.ts | 6 +++--- .../src/document_loaders/tests/github.test.ts | 4 ++-- langchain/src/prompts/prompt.ts | 2 +- langchain/src/util/axios-fetch-adapter.js | 2 +- langchain/src/util/env.ts | 2 +- langchain/src/util/event-source-parse.ts | 2 +- 28 files changed, 69 insertions(+), 69 deletions(-) diff --git a/.devcontainer/README.md b/.devcontainer/README.md index 91568809b45a..65d6aad3ea31 100644 --- a/.devcontainer/README.md +++ b/.devcontainer/README.md @@ -7,14 +7,14 @@ You can use the dev container configuration in this folder to build and run the ## GitHub Codespaces [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/hwchase17/langchainjs) You may use the button above, or follow these steps to open this repo in a Codespace: -1. Click the **Code** drop-down menu at the top of https://github.com/hwchase17/langchainjs. +1. Click the **Code** drop-down menu at the top of https://github.com/langchain-ai/langchainjs. 1. Click on the **Codespaces** tab. 1. Click **Create codespace on main** . For more info, check out the [GitHub documentation](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace#creating-a-codespace). ## VS Code Dev Containers -[![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/hwchase17/langchainjs) +[![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchainjs) If you already have VS Code and Docker installed, you can use the button above to get started. This will cause VS Code to automatically install the Dev Containers extension if needed, clone the source code into a container volume, and spin up a dev container for use. diff --git a/.github/contributing/INTEGRATIONS.md b/.github/contributing/INTEGRATIONS.md index 888de9659379..69dfcaa3d401 100644 --- a/.github/contributing/INTEGRATIONS.md +++ b/.github/contributing/INTEGRATIONS.md @@ -1,6 +1,6 @@ # Contributing Integrations to LangChain -In addition to the [general contribution guidelines](https://github.com/hwchase17/langchainjs/blob/main/CONTRIBUTING.md), there are a few extra things to consider when contributing third-party integrations to LangChain that will be covered here. The goal of this page is to help you draft PRs that take these considerations into account, and can therefore be merged sooner. +In addition to the [general contribution guidelines](https://github.com/langchain-ai/langchainjs/blob/main/CONTRIBUTING.md), there are a few extra things to consider when contributing third-party integrations to LangChain that will be covered here. The goal of this page is to help you draft PRs that take these considerations into account, and can therefore be merged sooner. Integrations tend to fall into a set number of categories, each of which will have their own section below. Please read the [general guidelines](#general-concepts), then see the [integration-specific guidelines and example PRs](#integration-specific-guidelines-and-example-prs) section at the end of this page for additional information and examples. @@ -10,7 +10,7 @@ The following guidelines apply broadly to all type of integrations: ### Creating a separate entrypoint -You should generally not export your new module from an `index.ts` file that contains many other exports. Instead, you should add a separate entrypoint for your integration in [`langchain/scripts/create-entrypoints.js`](https://github.com/hwchase17/langchainjs/blob/main/langchain/scripts/create-entrypoints.js) within the `entrypoints` object: +You should generally not export your new module from an `index.ts` file that contains many other exports. Instead, you should add a separate entrypoint for your integration in [`langchain/scripts/create-entrypoints.js`](https://github.com/langchain-ai/langchainjs/blob/main/langchain/scripts/create-entrypoints.js) within the `entrypoints` object: ```js import * as fs from "fs"; @@ -59,13 +59,13 @@ A user would then import your new vector store as `import { LangCoVectorStore } ### Third-party dependencies -You may use third-party dependencies in new integrations, but they should be added as `peerDependencies` and `devDependencies` with an entry under `peerDependenciesMeta` in [`langchain/package.json`](https://github.com/hwchase17/langchainjs/blob/main/langchain/package.json), **not under any core `dependencies` list**. This keeps the overall package size small, as only people who are using your integration will need to install, and allows us to support a wider range of runtimes. +You may use third-party dependencies in new integrations, but they should be added as `peerDependencies` and `devDependencies` with an entry under `peerDependenciesMeta` in [`langchain/package.json`](https://github.com/langchain-ai/langchainjs/blob/main/langchain/package.json), **not under any core `dependencies` list**. This keeps the overall package size small, as only people who are using your integration will need to install, and allows us to support a wider range of runtimes. We suggest using caret syntax (`^`) for peer dependencies to support a wider range of people trying to use them as well as to be somewhat tolerant to non-major version updates, which should (theoretically) be the only breaking ones. Please make sure all introduced dependencies are permissively licensed (MIT is recommended) and well-supported and maintained. -You must also add your new entrypoint under `requiresOptionalDependency` in the [`create-entrypoints.js`](https://github.com/hwchase17/langchainjs/blob/main/langchain/scripts/create-entrypoints.js) file to avoid breaking the build: +You must also add your new entrypoint under `requiresOptionalDependency` in the [`create-entrypoints.js`](https://github.com/langchain-ai/langchainjs/blob/main/langchain/scripts/create-entrypoints.js) file to avoid breaking the build: ```js // Entrypoints in this list require an optional dependency to be installed. @@ -146,13 +146,13 @@ As with all contributions, make sure you run `yarn lint` and `yarn format` so th Below are links to guides with advice and tips for specific types of integrations: -- [LLM providers](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/integrations/LLMS.md) (e.g. OpenAI's GPT-3) +- [LLM providers](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/integrations/LLMS.md) (e.g. OpenAI's GPT-3) - Chat model providers (TODO) (e.g. Anthropic's Claude, OpenAI's GPT-4) -- [Memory](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/integrations/MEMORY.md) (used to give an LLM or chat model context of past conversations, e.g. Motörhead) -- [Vector stores](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/integrations/VECTOR_STORES.md) (e.g. Pinecone) -- [Persistent message stores](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/integrations/MESSAGE_STORES.md) (used to persistently store and load raw chat histories, e.g. Redis) -- [Document loaders](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/integrations/DOCUMENT_LOADERS.md) (used to load documents for later storage into vector stores, e.g. Apify) +- [Memory](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/integrations/MEMORY.md) (used to give an LLM or chat model context of past conversations, e.g. Motörhead) +- [Vector stores](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/integrations/VECTOR_STORES.md) (e.g. Pinecone) +- [Persistent message stores](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/integrations/MESSAGE_STORES.md) (used to persistently store and load raw chat histories, e.g. Redis) +- [Document loaders](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/integrations/DOCUMENT_LOADERS.md) (used to load documents for later storage into vector stores, e.g. Apify) - Embeddings (TODO) (e.g. Cohere) -- [Tools](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/integrations/TOOLS.md) (used for agents, e.g. the SERP API tool) +- [Tools](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/integrations/TOOLS.md) (used for agents, e.g. the SERP API tool) This is a living document, so please make a pull request if we're missing anything useful! diff --git a/.github/contributing/integrations/DOCUMENT_LOADERS.md b/.github/contributing/integrations/DOCUMENT_LOADERS.md index 549364da3059..b53e5cb625a1 100644 --- a/.github/contributing/integrations/DOCUMENT_LOADERS.md +++ b/.github/contributing/integrations/DOCUMENT_LOADERS.md @@ -4,8 +4,8 @@ This page contains some specific guidelines and examples for contributing integr Document loaders are classes that pull in text from a given source and load them into chunks called **documents** for later use in queryable vector stores. Some example sources include PDFs, websites, and Notion docs. -**Make sure you read the [general guidelines page](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** +**Make sure you read the [general guidelines page](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** ## Example PR -You can take a look at this PR adding Apify Datasets as an example when creating your own document loader integrations: https://github.com/hwchase17/langchainjs/pull/1271 +You can take a look at this PR adding Apify Datasets as an example when creating your own document loader integrations: https://github.com/langchain-ai/langchainjs/pull/1271 diff --git a/.github/contributing/integrations/LLMS.md b/.github/contributing/integrations/LLMS.md index 0aeab7294ef8..d90f9cc9e89b 100644 --- a/.github/contributing/integrations/LLMS.md +++ b/.github/contributing/integrations/LLMS.md @@ -2,21 +2,21 @@ This page contains some specific guidelines and examples for contributing integrations with third-party LLM providers. -**Make sure you read the [general guidelines page](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** +**Make sure you read the [general guidelines page](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** ## Example PR -We'll be referencing this PR adding Amazon SageMaker endpoints as an example: https://github.com/hwchase17/langchainjs/pull/1267 +We'll be referencing this PR adding Amazon SageMaker endpoints as an example: https://github.com/langchain-ai/langchainjs/pull/1267 ## General ideas The general idea for adding new third-party LLMs is to subclass the `LLM` class and implement the `_call` method. As the name suggests, this method should call the LLM with the given prompt and transform the LLM response into some generated string output. -The example PR for Amazon SageMaker is an interesting example of this because SageMaker endpoints can host a wide variety of models with non-standard input and output formats. Therefore, the contributor added a [simple abstract class](https://github.com/hwchase17/langchainjs/pull/1267/files#diff-4496012d30c03b969546b14039f8deee1b5ba9152a86222100d76c4da77f060cR35) that a user can implement depending on which specific model they are hosting that transforms input from LangChain into a format expected by the model and output into a plain string. +The example PR for Amazon SageMaker is an interesting example of this because SageMaker endpoints can host a wide variety of models with non-standard input and output formats. Therefore, the contributor added a [simple abstract class](https://github.com/langchain-ai/langchainjs/pull/1267/files#diff-4496012d30c03b969546b14039f8deee1b5ba9152a86222100d76c4da77f060cR35) that a user can implement depending on which specific model they are hosting that transforms input from LangChain into a format expected by the model and output into a plain string. Other third-party providers like OpenAI and Anthropic will have a defined input and output format, and in those cases, the input and output transformations should happen within the `_call` method. ## Wrap LLM requests in this.caller -The base LLM class contains an instance property called `caller` that will automatically handle retries, errors, timeouts, and more. You should wrap calls to the LLM in `this.caller.call` [as shown here](https://github.com/hwchase17/langchainjs/pull/1267/files#diff-4496012d30c03b969546b14039f8deee1b5ba9152a86222100d76c4da77f060cR148) +The base LLM class contains an instance property called `caller` that will automatically handle retries, errors, timeouts, and more. You should wrap calls to the LLM in `this.caller.call` [as shown here](https://github.com/langchain-ai/langchainjs/pull/1267/files#diff-4496012d30c03b969546b14039f8deee1b5ba9152a86222100d76c4da77f060cR148) diff --git a/.github/contributing/integrations/MEMORY.md b/.github/contributing/integrations/MEMORY.md index 2800338617d9..dd2dc2ab3f2e 100644 --- a/.github/contributing/integrations/MEMORY.md +++ b/.github/contributing/integrations/MEMORY.md @@ -2,17 +2,17 @@ This page contains some specific guidelines and examples for contributing integrations with third-party memory providers. -In LangChain, memory differs from [message stores](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/integrations/MESSAGE_STORES.md) in that memory does not actually handle persistently storing messages, but acts as a representation of the LLM or chat model's awareness of past conversations, while message stores handle the actual message data persistence. For example, memory may perform other transformations on the messages, like summarization, or may emphasize specific pieces of pertinent information. Memory may rely on message stores as a backing class. +In LangChain, memory differs from [message stores](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/integrations/MESSAGE_STORES.md) in that memory does not actually handle persistently storing messages, but acts as a representation of the LLM or chat model's awareness of past conversations, while message stores handle the actual message data persistence. For example, memory may perform other transformations on the messages, like summarization, or may emphasize specific pieces of pertinent information. Memory may rely on message stores as a backing class. Another key difference is that message stores are only used with chat models. Before getting started, think about whether your planned integration would be more suited as a message store or as memory! -**Make sure you read the [general guidelines page](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** +**Make sure you read the [general guidelines page](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** ## Example PR -You can use this PR adding Motorhead memory as a reference: https://github.com/hwchase17/langchainjs/pull/598 +You can use this PR adding Motorhead memory as a reference: https://github.com/langchain-ai/langchainjs/pull/598 ## General ideas diff --git a/.github/contributing/integrations/MESSAGE_STORES.md b/.github/contributing/integrations/MESSAGE_STORES.md index fc70634c3e99..9beca495ce12 100644 --- a/.github/contributing/integrations/MESSAGE_STORES.md +++ b/.github/contributing/integrations/MESSAGE_STORES.md @@ -2,18 +2,18 @@ This page contains some specific guidelines and examples for contributing integrations with third-party message stores. -In LangChain, message stores differ from [memory](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/integrations/MEMORY.md) in that they simply serialize and persistently store chat messages, while memory, despite its name, does not actually handle persistently storing messages, but acts as a representation of the LLM or chat model's awareness of past conversations. For example, memory may perform other transformations on the messages, like summarization, or may emphasize specific pieces of pertinent information. Memory may rely on message stores as a backing class. +In LangChain, message stores differ from [memory](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/integrations/MEMORY.md) in that they simply serialize and persistently store chat messages, while memory, despite its name, does not actually handle persistently storing messages, but acts as a representation of the LLM or chat model's awareness of past conversations. For example, memory may perform other transformations on the messages, like summarization, or may emphasize specific pieces of pertinent information. Memory may rely on message stores as a backing class. Another key difference is that message stores are only used with chat models. Before getting started, think about whether your planned integration would be more suited as a message store or as memory! -**Make sure you read the [general guidelines page](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** +**Make sure you read the [general guidelines page](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** ## Example PR -We'll be referencing this PR adding a Redis-backed message store as an example: https://github.com/hwchase17/langchainjs/pull/951 +We'll be referencing this PR adding a Redis-backed message store as an example: https://github.com/langchain-ai/langchainjs/pull/951 ## Serializing and deserializing chat messages -LangChain messages implement a `BaseMessage` class that contains information like the message's content and role of the speaker. In order to provide a standard way to map these messages to a storable JSON format, you should use the utility `mapChatMessagesToStoredMessages` and `mapStoredMessagesToChatMessages` functions as [shown here](https://github.com/hwchase17/langchainjs/pull/951/files#diff-4c638d231a5e5bb29a149c6fb7d8f4b24aaf1b6fcc2cc2a728346eaebb6c9c47R17). +LangChain messages implement a `BaseMessage` class that contains information like the message's content and role of the speaker. In order to provide a standard way to map these messages to a storable JSON format, you should use the utility `mapChatMessagesToStoredMessages` and `mapStoredMessagesToChatMessages` functions as [shown here](https://github.com/langchain-ai/langchainjs/pull/951/files#diff-4c638d231a5e5bb29a149c6fb7d8f4b24aaf1b6fcc2cc2a728346eaebb6c9c47R17). diff --git a/.github/contributing/integrations/TOOLS.md b/.github/contributing/integrations/TOOLS.md index 1fa999237db8..4acd23a2af56 100644 --- a/.github/contributing/integrations/TOOLS.md +++ b/.github/contributing/integrations/TOOLS.md @@ -2,11 +2,11 @@ This page contains some specific guidelines and examples for contributing integrations with third-party APIs within tools. -**Make sure you read the [general guidelines page](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** +**Make sure you read the [general guidelines page](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** ## Example PR -You can use this PR adding an AWSLambda tool as a reference when creating your own tools (minus the dynamic import!): https://github.com/hwchase17/langchainjs/pull/727 +You can use this PR adding an AWSLambda tool as a reference when creating your own tools (minus the dynamic import!): https://github.com/langchain-ai/langchainjs/pull/727 ## Guidelines diff --git a/.github/contributing/integrations/VECTOR_STORES.md b/.github/contributing/integrations/VECTOR_STORES.md index 3b3fb68fbbf0..dbf788ed8da9 100644 --- a/.github/contributing/integrations/VECTOR_STORES.md +++ b/.github/contributing/integrations/VECTOR_STORES.md @@ -2,8 +2,8 @@ This page contains some specific guidelines and examples for contributing integrations with third-party vector store providers. -**Make sure you read the [general guidelines page](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** +**Make sure you read the [general guidelines page](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) first!** ## Example PR -You can use this PR adding Faiss as a reference when creating your own vector store integration: https://github.com/hwchase17/langchainjs/pull/685 +You can use this PR adding Faiss as a reference when creating your own vector store integration: https://github.com/langchain-ai/langchainjs/pull/685 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 6a5f3c08ef87..fbe852ecaa7a 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -2,10 +2,10 @@ Thank you for contributing to LangChainJS! Your PR will appear in our next release under the title you set above. Please make sure it highlights your valuable contribution. To help streamline the review process, please make sure you read our contribution guidelines: -https://github.com/hwchase17/langchainjs/blob/main/CONTRIBUTING.md +https://github.com/langchain-ai/langchainjs/blob/main/CONTRIBUTING.md If you are adding an integration (e.g. a new LLM, vector store, or memory), please also read our additional guidelines for integrations: -https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md +https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md Replace this block with a description of the change, the issue it fixes (if applicable), and relevant context. @@ -14,4 +14,4 @@ Finally, we'd love to show appreciation for your contribution - if you'd like us -Fixes # (issue) \ No newline at end of file +Fixes # (issue) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2e7484c71c63..71533c809bfe 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,7 +12,7 @@ To contribute to this project, please follow a ["fork and pull request"](https:/ If you are not sure what to work on, we have a few suggestions: -- Look at the issues with the [help wanted](https://github.com/hwchase17/langchainjs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label. These are issues that we think are good targets for contributors. If you are interested in working on one of these, please comment on the issue so that we can assign it to you. And if you have any questions let us know, we're happy to guide you! +- Look at the issues with the [help wanted](https://github.com/langchain-ai/langchainjs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label. These are issues that we think are good targets for contributors. If you are interested in working on one of these, please comment on the issue so that we can assign it to you. And if you have any questions let us know, we're happy to guide you! - At the moment our main focus is reaching parity with the Python version for features and base functionality. If you are interested in working on a specific integration or feature, please let us know and we can help you get started. ### New abstractions @@ -21,9 +21,9 @@ We aim to keep the same APIs between the Python and JS versions of LangChain, wh ### Want to add a specific integration? -LangChain supports several different types of integrations with third-party providers and frameworks, including LLM providers (e.g. [OpenAI](https://github.com/hwchase17/langchainjs/blob/main/langchain/src/llms/openai.ts)), vector stores (e.g. [FAISS](https://github.com/ewfian/langchainjs/blob/main/langchain/src/vectorstores/faiss.ts)), document loaders (e.g. [Apify](https://github.com/hwchase17/langchainjs/blob/main/langchain/src/document_loaders/web/apify_dataset.ts)) persistent message history stores (e.g. [Redis](https://github.com/hwchase17/langchainjs/blob/main/langchain/src/stores/message/redis.ts)), and more. +LangChain supports several different types of integrations with third-party providers and frameworks, including LLM providers (e.g. [OpenAI](https://github.com/langchain-ai/langchainjs/blob/main/langchain/src/llms/openai.ts)), vector stores (e.g. [FAISS](https://github.com/ewfian/langchainjs/blob/main/langchain/src/vectorstores/faiss.ts)), document loaders (e.g. [Apify](https://github.com/langchain-ai/langchainjs/blob/main/langchain/src/document_loaders/web/apify_dataset.ts)) persistent message history stores (e.g. [Redis](https://github.com/langchain-ai/langchainjs/blob/main/langchain/src/stores/message/redis.ts)), and more. -We welcome such contributions, but ask that you read our dedicated [integration contribution guide](https://github.com/hwchase17/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) for specific details and patterns to consider before opening a pull request. +We welcome such contributions, but ask that you read our dedicated [integration contribution guide](https://github.com/langchain-ai/langchainjs/blob/main/.github/contributing/INTEGRATIONS.md) for specific details and patterns to consider before opening a pull request. ### Want to add a feature that's already in Python? @@ -39,7 +39,7 @@ https://langchain-translator.vercel.app/ ### 🚩 GitHub Issues -Our [issues](https://github.com/hwchase17/langchainjs/issues) page contains +Our [issues](https://github.com/langchain-ai/langchainjs/issues) page contains with bugs, improvements, and feature requests. If you start working on an issue, please assign it to yourself. @@ -170,7 +170,7 @@ If you add support for a new external API, please add a new integration test. Integration tests should be called `*.int.test.ts`. Note that most integration tests require credentials or other setup. You will likely need to set up a `langchain/.env` file -like the example [here](https://github.com/hwchase17/langchainjs/blob/main/langchain/.env.example). +like the example [here](https://github.com/langchain-ai/langchainjs/blob/main/langchain/.env.example). We generally recommend only running integration tests with `yarn test:single`, but if you want to run all integration tests, run: diff --git a/README.md b/README.md index 15e7774a36bd..d99e4e0be312 100644 --- a/README.md +++ b/README.md @@ -2,13 +2,13 @@ ⚡ Building applications with LLMs through composability ⚡ -[![CI](https://github.com/hwchase17/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/hwchase17/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dw/langchain) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS) [![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/hwchase17/langchainjs) +[![CI](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dw/langchain) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS) [![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchainjs) [](https://codespaces.new/hwchase17/langchainjs) Looking for the Python version? Check out [LangChain](https://github.com/hwchase17/langchain). -To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com). -[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications. +To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com). +[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications. Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to get off the waitlist or speak with our sales team ## ⚡️ Quick Install diff --git a/docs/docs_skeleton/docs/community.md b/docs/docs_skeleton/docs/community.md index 8b80dfaa4356..b8d478e5dcee 100644 --- a/docs/docs_skeleton/docs/community.md +++ b/docs/docs_skeleton/docs/community.md @@ -17,8 +17,8 @@ Whether you’re new to LangChain, looking to go deeper, or just want to get mor LangChain is the product of over 5,000+ contributions by 1,500+ contributors, and there is ******still****** so much to do together. Here are some ways to get involved: -- **[Open a pull request](https://github.com/hwchase17/langchainjs/issues):** we’d appreciate all forms of contributions–new features, infrastructure improvements, better documentation, bug fixes, etc. If you have an improvement or an idea, we’d love to work on it with you. -- **[Read our contributor guidelines:](https://github.com/hwchase17/langchainjs/blob/main/CONTRIBUTING.md)** We ask contributors to follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow, run a few local checks for formatting, linting, and testing before submitting, and follow certain documentation and testing conventions. +- **[Open a pull request](https://github.com/langchain-ai/langchainjs/issues):** we’d appreciate all forms of contributions–new features, infrastructure improvements, better documentation, bug fixes, etc. If you have an improvement or an idea, we’d love to work on it with you. +- **[Read our contributor guidelines:](https://github.com/langchain-ai/langchainjs/blob/main/CONTRIBUTING.md)** We ask contributors to follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow, run a few local checks for formatting, linting, and testing before submitting, and follow certain documentation and testing conventions. - **Become an expert:** our experts help the community by answering product questions in Discord. If that’s a role you’d like to play, we’d be so grateful! (And we have some special experts-only goodies/perks we can tell you more about). Send us an email to introduce yourself at hello@langchain.dev and we’ll take it from there! - **Integrate with LangChain:** if your product integrates with LangChain–or aspires to–we want to help make sure the experience is as smooth as possible for you and end users. Send us an email at hello@langchain.dev and tell us what you’re working on. - **Become an Integration Maintainer:** Partner with our team to ensure your integration stays up-to-date and talk directly with users (and answer their inquiries) in our Discord. Introduce yourself at hello@langchain.dev if you’d like to explore this role. @@ -48,5 +48,5 @@ Here’s where our team hangs out, talks shop, spotlights cool work, and shares - **[Twitter](https://twitter.com/LangChainAI):** we post about what we’re working on and what cool things we’re seeing in the space. If you tag @langchainai in your post, we’ll almost certainly see it, and can snow you some love! - **[Discord](https://discord.gg/6adMQxSpJS):** connect with with >30k developers who are building with LangChain -- **[GitHub](https://github.com/hwchase17/langchainjs):** open pull requests, contribute to a discussion, and/or contribute +- **[GitHub](https://github.com/langchain-ai/langchainjs):** open pull requests, contribute to a discussion, and/or contribute - **[Subscribe to our bi-weekly Release Notes](https://6w1pwbss0py.typeform.com/to/KjZB1auB):** a twice/month email roundup of the coolest things going on in our orbit diff --git a/docs/docs_skeleton/docusaurus.config.js b/docs/docs_skeleton/docusaurus.config.js index 7f07aaba9a52..24381d357345 100644 --- a/docs/docs_skeleton/docusaurus.config.js +++ b/docs/docs_skeleton/docusaurus.config.js @@ -182,7 +182,7 @@ const config = { }, // Please keep GitHub link to the right for consistency. { - href: "https://github.com/hwchase17/langchainjs", + href: "https://github.com/langchain-ai/langchainjs", className: "header-github-link", position: "right", "aria-label": "GitHub repository", @@ -214,7 +214,7 @@ const config = { }, { label: "JS/TS", - href: "https://github.com/hwchase17/langchainjs", + href: "https://github.com/langchain-ai/langchainjs", }, ], }, diff --git a/docs/extras/modules/data_connection/vectorstores/integrations/opensearch.md b/docs/extras/modules/data_connection/vectorstores/integrations/opensearch.md index 6612e2681be2..b506e3f70837 100644 --- a/docs/extras/modules/data_connection/vectorstores/integrations/opensearch.md +++ b/docs/extras/modules/data_connection/vectorstores/integrations/opensearch.md @@ -18,7 +18,7 @@ Langchain.js accepts [@opensearch-project/opensearch](https://opensearch.org/doc npm install -S @opensearch-project/opensearch ``` -You'll also need to have an OpenSearch instance running. You can use the [official Docker image](https://opensearch.org/docs/latest/opensearch/install/docker/) to get started. You can also find an example docker-compose file [here](https://github.com/hwchase17/langchainjs/blob/main/examples/src/indexes/vector_stores/opensearch/docker-compose.yml). +You'll also need to have an OpenSearch instance running. You can use the [official Docker image](https://opensearch.org/docs/latest/opensearch/install/docker/) to get started. You can also find an example docker-compose file [here](https://github.com/langchain-ai/langchainjs/blob/main/examples/src/indexes/vector_stores/opensearch/docker-compose.yml). ## Index docs @@ -96,7 +96,7 @@ const chain = VectorDBQAChain.fromLLM(model, vectorStore, { const response = await chain.call({ query: "What is opensearch?" }); console.log(JSON.stringify(response, null, 2)); -/* +/* { "text": " Opensearch is a collection of technologies that allow search engines to publish search results in a standard format, making it easier for users to search across multiple sites.", "sourceDocuments": [ @@ -107,6 +107,6 @@ console.log(JSON.stringify(response, null, 2)); } } ] - } + } */ ``` diff --git a/docs/extras/use_cases/autonomous_agents/sales_gpt.mdx b/docs/extras/use_cases/autonomous_agents/sales_gpt.mdx index 6d6f9223435f..747b1f48a869 100644 --- a/docs/extras/use_cases/autonomous_agents/sales_gpt.mdx +++ b/docs/extras/use_cases/autonomous_agents/sales_gpt.mdx @@ -21,7 +21,7 @@ Additionally, the AI Sales agent has access to tools, which allow it to interact Here, we show how the AI Sales Agent can use a **Product Knowledge Base** to speak about a particular's company offerings, hence increasing relevance and reducing hallucinations. -We leverage the [`langchain`](https://github.com/hwchase17/langchainjs) library in this implementation, specifically [Custom Agent Configuration](https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval) and are inspired by [BabyAGI](https://github.com/yoheinakajima/babyagi) architecture. +We leverage the [`langchain`](https://github.com/langchain-ai/langchainjs) library in this implementation, specifically [Custom Agent Configuration](https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval) and are inspired by [BabyAGI](https://github.com/yoheinakajima/babyagi) architecture. ## Import Libraries and Set Up Your Environment diff --git a/docs/snippets/get_started/introduction.mdx b/docs/snippets/get_started/introduction.mdx index 0f6b8db354da..c78b13c7452c 100644 --- a/docs/snippets/get_started/introduction.mdx +++ b/docs/snippets/get_started/introduction.mdx @@ -14,7 +14,7 @@ Off-the-shelf chains make it easy to get started. For more complex applications We recommend following our [Quickstart](/docs/get_started/quickstart) guide to familiarize yourself with the framework by building your first LangChain application. -_**Note**: These docs are for the LangChain [JS/TS package](https://github.com/hwchase17/langchainjs). For documentation on [the Python version](https://github.com/hwchase17/langchain), [head here](https://python.langchain.com/docs)._ +_**Note**: These docs are for the LangChain [JS/TS package](https://github.com/langchain-ai/langchainjs). For documentation on [the Python version](https://github.com/hwchase17/langchain), [head here](https://python.langchain.com/docs)._ ## Modules diff --git a/docs/snippets/modules/model_io/models/chat/how_to/streaming.mdx b/docs/snippets/modules/model_io/models/chat/how_to/streaming.mdx index 7dc1adaf8452..7d690d8e2b13 100644 --- a/docs/snippets/modules/model_io/models/chat/how_to/streaming.mdx +++ b/docs/snippets/modules/model_io/models/chat/how_to/streaming.mdx @@ -25,8 +25,8 @@ import BytesExample from "@examples/prompts/bytes_output_parser.ts"; ## Using a callback handler -You can also use a [`CallbackHandler`](https://github.com/hwchase17/langchainjs/blob/main/langchain/src/callbacks/base.ts) like so: +You can also use a [`CallbackHandler`](https://github.com/langchain-ai/langchainjs/blob/main/langchain/src/callbacks/base.ts) like so: import StreamingExample from "@examples/models/chat/chat_streaming.ts"; -{StreamingExample} \ No newline at end of file +{StreamingExample} diff --git a/docs/snippets/modules/model_io/models/llms/how_to/streaming_llm.mdx b/docs/snippets/modules/model_io/models/llms/how_to/streaming_llm.mdx index 2c1f832f1ca4..56251c94ee94 100644 --- a/docs/snippets/modules/model_io/models/llms/how_to/streaming_llm.mdx +++ b/docs/snippets/modules/model_io/models/llms/how_to/streaming_llm.mdx @@ -12,7 +12,7 @@ For models that do not support streaming, the entire response will be returned a ## Using a callback handler -You can also use a [`CallbackHandler`](https://github.com/hwchase17/langchainjs/blob/main/langchain/src/callbacks/base.ts) like so: +You can also use a [`CallbackHandler`](https://github.com/langchain-ai/langchainjs/blob/main/langchain/src/callbacks/base.ts) like so: import StreamingExample from "@examples/models/llm/llm_streaming.ts"; diff --git a/examples/src/document_loaders/github.ts b/examples/src/document_loaders/github.ts index b7df3d92be1f..fd5b9d51ae34 100644 --- a/examples/src/document_loaders/github.ts +++ b/examples/src/document_loaders/github.ts @@ -2,7 +2,7 @@ import { GithubRepoLoader } from "langchain/document_loaders/web/github"; export const run = async () => { const loader = new GithubRepoLoader( - "https://github.com/hwchase17/langchainjs", + "https://github.com/langchain-ai/langchainjs", { branch: "main", recursive: false, diff --git a/examples/src/document_loaders/github_ignore_paths.ts b/examples/src/document_loaders/github_ignore_paths.ts index 94ccc652cec8..254271ab216e 100644 --- a/examples/src/document_loaders/github_ignore_paths.ts +++ b/examples/src/document_loaders/github_ignore_paths.ts @@ -2,7 +2,7 @@ import { GithubRepoLoader } from "langchain/document_loaders/web/github"; export const run = async () => { const loader = new GithubRepoLoader( - "https://github.com/hwchase17/langchainjs", + "https://github.com/langchain-ai/langchainjs", { branch: "main", recursive: false, unknown: "warn", ignorePaths: ["*.md"] } ); const docs = await loader.load(); diff --git a/examples/src/document_loaders/github_submodules.ts b/examples/src/document_loaders/github_submodules.ts index 1ee5f62fda36..54b726a40fa7 100644 --- a/examples/src/document_loaders/github_submodules.ts +++ b/examples/src/document_loaders/github_submodules.ts @@ -2,7 +2,7 @@ import { GithubRepoLoader } from "langchain/document_loaders/web/github"; export const run = async () => { const loader = new GithubRepoLoader( - "https://github.com/hwchase17/langchainjs", + "https://github.com/langchain-ai/langchainjs", { branch: "main", recursive: true, diff --git a/langchain/README.md b/langchain/README.md index df7ea538ddbe..926111dfa321 100644 --- a/langchain/README.md +++ b/langchain/README.md @@ -2,13 +2,13 @@ ⚡ Building applications with LLMs through composability ⚡ -[![CI](https://github.com/hwchase17/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/hwchase17/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dw/langchain) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS) [![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/hwchase17/langchainjs) +[![CI](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dw/langchain) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS) [![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchainjs) [](https://codespaces.new/hwchase17/langchainjs) Looking for the Python version? Check out [LangChain](https://github.com/hwchase17/langchain). -To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com). -[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications. +To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com). +[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications. Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to get off the waitlist or speak with our sales team ## Quick Install @@ -53,4 +53,4 @@ The [LangChainHub](https://github.com/hwchase17/langchain-hub) is a central plac As an open source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infra, or better documentation. -Check out [our contributing guidelines](https://github.com/hwchase17/langchainjs/blob/main/CONTRIBUTING.md) for instructions on how to contribute. +Check out [our contributing guidelines](https://github.com/langchain-ai/langchainjs/blob/main/CONTRIBUTING.md) for instructions on how to contribute. diff --git a/langchain/src/document_loaders/tests/github.int.test.ts b/langchain/src/document_loaders/tests/github.int.test.ts index faab69643422..19a2466e8615 100644 --- a/langchain/src/document_loaders/tests/github.int.test.ts +++ b/langchain/src/document_loaders/tests/github.int.test.ts @@ -3,7 +3,7 @@ import { GithubRepoLoader } from "../web/github.js"; test("Test GithubRepoLoader", async () => { const loader = new GithubRepoLoader( - "https://github.com/hwchase17/langchainjs", + "https://github.com/langchain-ai/langchainjs", { branch: "main", recursive: false, unknown: "warn" } ); const documents = await loader.load(); @@ -20,7 +20,7 @@ test("Test GithubRepoLoader", async () => { test("Test ignorePaths with GithubRepoLoader", async () => { const loader = new GithubRepoLoader( - "https://github.com/hwchase17/langchainjs", + "https://github.com/langchain-ai/langchainjs", { branch: "main", recursive: false, @@ -42,7 +42,7 @@ test("Test ignorePaths with GithubRepoLoader", async () => { test("Test ignorePaths with GithubRepoLoader", async () => { const loader = new GithubRepoLoader( - "https://github.com/hwchase17/langchainjs", + "https://github.com/langchain-ai/langchainjs", { branch: "main", recursive: false, diff --git a/langchain/src/document_loaders/tests/github.test.ts b/langchain/src/document_loaders/tests/github.test.ts index 09197f708a2b..50b5f2a46c05 100644 --- a/langchain/src/document_loaders/tests/github.test.ts +++ b/langchain/src/document_loaders/tests/github.test.ts @@ -31,7 +31,7 @@ describe("GithubRepoLoader recursion", () => { test("Test recursion with GithubRepoLoader", async () => { const loader = new GithubRepoLoader( - "https://github.com/hwchase17/langchainjs", + "https://github.com/langchain-ai/langchainjs", { branch: "main", recursive: true, @@ -51,7 +51,7 @@ describe("GithubRepoLoader recursion", () => { test("Expect an error if processSubmodules set without recursive with GithubRepoLoader", async () => { expect( () => - new GithubRepoLoader("https://github.com/hwchase17/langchainjs", { + new GithubRepoLoader("https://github.com/langchain-ai/langchainjs", { branch: "main", recursive: false, processSubmodules: true, diff --git a/langchain/src/prompts/prompt.ts b/langchain/src/prompts/prompt.ts index 966038f23ac0..6c45b84ddaf0 100644 --- a/langchain/src/prompts/prompt.ts +++ b/langchain/src/prompts/prompt.ts @@ -69,7 +69,7 @@ type ExtractTemplateParamsRecursive< Result extends string[] = [] > = T extends `${string}{${infer Param}}${infer Rest}` ? Param extends `${NonAlphanumeric}${string}` - ? ExtractTemplateParamsRecursive // for non-template variables that look like template variables e.g. see https://github.com/hwchase17/langchainjs/blob/main/langchain/src/chains/query_constructor/prompt.ts + ? ExtractTemplateParamsRecursive // for non-template variables that look like template variables e.g. see https://github.com/langchain-ai/langchainjs/blob/main/langchain/src/chains/query_constructor/prompt.ts : ExtractTemplateParamsRecursive : Result; diff --git a/langchain/src/util/axios-fetch-adapter.js b/langchain/src/util/axios-fetch-adapter.js index 53f17f835a42..b5df0540fba9 100644 --- a/langchain/src/util/axios-fetch-adapter.js +++ b/langchain/src/util/axios-fetch-adapter.js @@ -334,7 +334,7 @@ function createRequest(config) { } // Some `fetch` implementations will override the Content-Type to text/plain // when body is a string. - // See https://github.com/hwchase17/langchainjs/issues/1010 + // See https://github.com/langchain-ai/langchainjs/issues/1010 if (typeof options.body === "string") { options.body = new TextEncoder().encode(options.body); } diff --git a/langchain/src/util/env.ts b/langchain/src/util/env.ts index 6356d0960174..15639cb471a3 100644 --- a/langchain/src/util/env.ts +++ b/langchain/src/util/env.ts @@ -76,7 +76,7 @@ export async function getRuntimeEnvironment(): Promise { export function getEnvironmentVariable(name: string): string | undefined { // Certain Deno setups will throw an error if you try to access environment variables - // https://github.com/hwchase17/langchainjs/issues/1412 + // https://github.com/langchain-ai/langchainjs/issues/1412 try { return typeof process !== "undefined" ? // eslint-disable-next-line no-process-env diff --git a/langchain/src/util/event-source-parse.ts b/langchain/src/util/event-source-parse.ts index a00dd1037fa9..dab076c03f8a 100644 --- a/langchain/src/util/event-source-parse.ts +++ b/langchain/src/util/event-source-parse.ts @@ -63,7 +63,7 @@ export async function getBytes( // This change is essential to ensure that we capture every last piece of information from streams, // such as those from Azure OpenAI, which may not terminate with a blank line. Without this // mechanism, we risk ignoring a possibly significant last message. - // See https://github.com/hwchase17/langchainjs/issues/1299 for details. + // See https://github.com/langchain-ai/langchainjs/issues/1299 for details. // eslint-disable-next-line no-constant-condition while (true) { const result = await reader.read(); From aa9b618786757c301e93caeb3253c56b56d05602 Mon Sep 17 00:00:00 2001 From: "C. J. Tantay" Date: Mon, 16 Oct 2023 14:54:07 -0700 Subject: [PATCH 4/4] fix: update llm_streaming_stream_method.ts doc (#2929) * fix: update llm_streaming_stream_method.ts doc Removes reference to `handleLLMNewToken` since `handleLLMNewToken` does not exist within this example * Adds `streaming: true` to docs Adds missing property in the expected configuration for streaming * Update llm_streaming_stream_method.ts Remove unneeded comments and unnecessary code - `.stream()` works without the need to add `streaming: true` as part of the LLM constructor --- examples/src/models/llm/llm_streaming_stream_method.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/examples/src/models/llm/llm_streaming_stream_method.ts b/examples/src/models/llm/llm_streaming_stream_method.ts index 04310c190d23..574e87127f47 100644 --- a/examples/src/models/llm/llm_streaming_stream_method.ts +++ b/examples/src/models/llm/llm_streaming_stream_method.ts @@ -1,7 +1,5 @@ import { OpenAI } from "langchain/llms/openai"; -// To enable streaming, we pass in `streaming: true` to the LLM constructor. -// Additionally, we pass in a handler for the `handleLLMNewToken` event. const model = new OpenAI({ maxTokens: 25, });