diff --git a/docs/api_refs/vercel.json b/docs/api_refs/vercel.json index 6b252413ddb2..fe54a520726c 100644 --- a/docs/api_refs/vercel.json +++ b/docs/api_refs/vercel.json @@ -3,8 +3,480 @@ "trailingSlash": false, "redirects": [ { - "source": "/:path*/langchain_:rest", - "destination": "/:path*/_langchain_:rest" + "source": "/:path*/langchain_load:rest", + "destination": "/:path*/langchain.load:rest" + }, + { + "source": "/:path*/langchain_agents:rest", + "destination": "/:path*/langchain.agents:rest" + }, + { + "source": "/:path*/langchain_tools:rest", + "destination": "/:path*/langchain.tools:rest" + }, + { + "source": "/:path*/langchain_chains:rest", + "destination": "/:path*/langchain.chains:rest" + }, + { + "source": "/:path*/langchain_chat_models:rest", + "destination": "/:path*/langchain.chat_models:rest" + }, + { + "source": "/:path*/langchain_embeddings:rest", + "destination": "/:path*/langchain.embeddings:rest" + }, + { + "source": "/:path*/langchain_vectorstores:rest", + "destination": "/:path*/langchain.vectorstores:rest" + }, + { + "source": "/:path*/langchain_text_splitter:rest", + "destination": "/:path*/langchain.text_splitter:rest" + }, + { + "source": "/:path*/langchain_memory:rest", + "destination": "/:path*/langchain.memory:rest" + }, + { + "source": "/:path*/langchain_document:rest", + "destination": "/:path*/langchain.document:rest" + }, + { + "source": "/:path*/langchain_document_loaders:rest", + "destination": "/:path*/langchain.document_loaders:rest" + }, + { + "source": "/:path*/langchain_document_transformers:rest", + "destination": "/:path*/langchain.document_transformers:rest" + }, + { + "source": "/:path*/langchain_sql_db:rest", + "destination": "/:path*/langchain.sql_db:rest" + }, + { + "source": "/:path*/langchain_callbacks:rest", + "destination": "/:path*/langchain.callbacks:rest" + }, + { + "source": "/:path*/langchain_output_parsers:rest", + "destination": "/:path*/langchain.output_parsers:rest" + }, + { + "source": "/:path*/langchain_retrievers:rest", + "destination": "/:path*/langchain.retrievers:rest" + }, + { + "source": "/:path*/langchain_cache:rest", + "destination": "/:path*/langchain.cache:rest" + }, + { + "source": "/:path*/langchain_stores:rest", + "destination": "/:path*/langchain.stores:rest" + }, + { + "source": "/:path*/langchain_storage:rest", + "destination": "/:path*/langchain.storage:rest" + }, + { + "source": "/:path*/langchain_hub:rest", + "destination": "/:path*/langchain.hub:rest" + }, + { + "source": "/:path*/langchain_util:rest", + "destination": "/:path*/langchain.util:rest" + }, + { + "source": "/:path*/langchain_experimental:rest", + "destination": "/:path*/langchain.experimental:rest" + }, + { + "source": "/:path*/langchain_evaluation:rest", + "destination": "/:path*/langchain.evaluation:rest" + }, + { + "source": "/:path*/langchain_smith:rest", + "destination": "/:path*/langchain.smith:rest" + }, + { + "source": "/:path*/langchain_runnables:rest", + "destination": "/:path*/langchain.runnables:rest" + }, + { + "source": "/:path*/langchain_indexes:rest", + "destination": "/:path*/langchain.indexes:rest" + }, + { + "source": "/:path*/langchain_schema:rest", + "destination": "/:path*/langchain.schema:rest" + }, + { + "source": "/:path*/langchain_core_:rest", + "destination": "/:path*/_langchain_core.:rest" + }, + { + "source": "/:path*/langchain_anthropic_experimental(_|\\.):rest", + "destination": "/:path*/_langchain_anthropic.experimental.:rest" + }, + { + "source": "/:path*/langchain_anthropic.ChatAnthropic.:rest", + "destination": "/:path*/_langchain_anthropic.index.ChatAnthropic.:rest" + }, + { + "source": "/:path*/langchain_anthropic.ChatAnthropicMessages.:rest", + "destination": "/:path*/_langchain_anthropic.index.ChatAnthropicMessages.:rest" + }, + { + "source": "/:path*/langchain_anthropic.AnthropicInput.:rest", + "destination": "/:path*/_langchain_anthropic.index.AnthropicInput.:rest" + }, + { + "source": "/:path*/langchain_anthropic.ChatAnthropicCallOptions.:rest", + "destination": "/:path*/_langchain_anthropic.index.ChatAnthropicCallOptions.:rest" + }, + { + "source": "/:path*/langchain_aws(_|\\.):rest", + "destination": "/:path*/_langchain_aws.:rest" + }, + { + "source": "/:path*/langchain_azure_cosmosdb(_|\\.):rest", + "destination": "/:path*/_langchain_azure_cosmosdb.:rest" + }, + { + "source": "/:path*/langchain_azure_dynamic_sessions(_|\\.):rest", + "destination": "/:path*/_langchain_azure_dynamic_sessions.:rest" + }, + { + "source": "/:path*/langchain_baidu_qianfan(_|\\.):rest", + "destination": "/:path*/_langchain_baidu_qianfan.:rest" + }, + { + "source": "/:path*/langchain_cloudflare_langgraph_checkpointers(_|\\.):rest", + "destination": "/:path*/_langchain_cloudflare.langgraph_checkpointers.:rest" + }, + { + "source": "/:path*/langchain_cloudflare.ChatCloudflareWorkersAI:rest", + "destination": "/:path*/_langchain_cloudflare.index.ChatCloudflareWorkersAI:rest" + }, + { + "source": "/:path*/langchain_cloudflare.CloudflareD1MessageHistory:rest", + "destination": "/:path*/_langchain_cloudflare.index.CloudflareD1MessageHistory:rest" + }, + { + "source": "/:path*/langchain_cloudflare.CloudflareKVCache:rest", + "destination": "/:path*/_langchain_cloudflare.index.CloudflareKVCache:rest" + }, + { + "source": "/:path*/langchain_cloudflare.CloudflareVectorizeStore:rest", + "destination": "/:path*/_langchain_cloudflare.index.CloudflareVectorizeStore:rest" + }, + { + "source": "/:path*/langchain_cloudflare.CloudflareWorkersAI:rest", + "destination": "/:path*/_langchain_cloudflare.index.CloudflareWorkersAI:rest" + }, + { + "source": "/:path*/langchain_cloudflare.CloudflareWorkersAIEmbeddings:rest", + "destination": "/:path*/_langchain_cloudflare.index.CloudflareWorkersAIEmbeddings:rest" + }, + { + "source": "/:path*/langchain_cloudflare.ChatCloudflareWorkersAICallOptions:rest", + "destination": "/:path*/_langchain_cloudflare.index.ChatCloudflareWorkersAICallOptions:rest" + }, + { + "source": "/:path*/langchain_cloudflare.CloudflareWorkersAIEmbeddingsParams:rest", + "destination": "/:path*/_langchain_cloudflare.index.CloudflareWorkersAIEmbeddingsParams:rest" + }, + { + "source": "/:path*/langchain_cloudflare.CloudflareWorkersAIInput:rest", + "destination": "/:path*/_langchain_cloudflare.index.CloudflareWorkersAIInput:rest" + }, + { + "source": "/:path*/langchain_cloudflare.VectorizeLibArgs:rest", + "destination": "/:path*/_langchain_cloudflare.index.VectorizeLibArgs:rest" + }, + { + "source": "/:path*/langchain_cloudflare.CloudflareD1MessageHistoryInput:rest", + "destination": "/:path*/_langchain_cloudflare.index.CloudflareD1MessageHistoryInput:rest" + }, + { + "source": "/:path*/langchain_cloudflare.VectorizeDeleteParams:rest", + "destination": "/:path*/_langchain_cloudflare.index.VectorizeDeleteParams:rest" + }, + { + "source": "/:path*/langchain_cohere(_|\\.):rest", + "destination": "/:path*/_langchain_cohere.:rest" + }, + { + "source": "/:path*/langchain_community_:rest", + "destination": "/:path*/_langchain_community.:rest" + }, + { + "source": "/:path*/langchain_exa(_|\\.):rest", + "destination": "/:path*/_langchain_exa.:rest" + }, + { + "source": "/:path*/langchain_google_common_types(_|\\.):rest", + "destination": "/:path*/_langchain_google_common.types.:rest" + }, + { + "source": "/:path*/langchain_google_common_utils(_|\\.):rest", + "destination": "/:path*/_langchain_google_common.utils.:rest" + }, + { + "source": "/:path*/langchain_google_common.AbstractGoogleLLMConnection.:rest", + "destination": "/:path*/_langchain_google_common.index.AbstractGoogleLLMConnection.:rest" + }, + { + "source": "/:path*/langchain_google_common.ApiKeyGoogleAuth.:rest", + "destination": "/:path*/_langchain_google_common.index.ApiKeyGoogleAuth.:rest" + }, + { + "source": "/:path*/langchain_google_common.BaseGoogleEmbeddings.:rest", + "destination": "/:path*/_langchain_google_common.index.BaseGoogleEmbeddings.:rest" + }, + { + "source": "/:path*/langchain_google_common.ChatGoogleBase.:rest", + "destination": "/:path*/_langchain_google_common.index.ChatGoogleBase.:rest" + }, + { + "source": "/:path*/langchain_google_common.ComplexJsonStream.:rest", + "destination": "/:path*/_langchain_google_common.index.ComplexJsonStream.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleAIConnection.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleAIConnection.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleAbstractedFetchClient.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleAbstractedFetchClient.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleBaseLLM.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleBaseLLM.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleConnection.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleConnection.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleHostConnection.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleHostConnection.:rest" + }, + { + "source": "/:path*/langchain_google_common.JsonStream.:rest", + "destination": "/:path*/_langchain_google_common.index.JsonStream.:rest" + }, + { + "source": "/:path*/langchain_google_common.ReadableJsonStream.:rest", + "destination": "/:path*/_langchain_google_common.index.ReadableJsonStream.:rest" + }, + { + "source": "/:path*/langchain_google_common.BaseGoogleEmbeddingsOptions.:rest", + "destination": "/:path*/_langchain_google_common.index.BaseGoogleEmbeddingsOptions.:rest" + }, + { + "source": "/:path*/langchain_google_common.BaseGoogleEmbeddingsParams.:rest", + "destination": "/:path*/_langchain_google_common.index.BaseGoogleEmbeddingsParams.:rest" + }, + { + "source": "/:path*/langchain_google_common.ChatGoogleBaseInput.:rest", + "destination": "/:path*/_langchain_google_common.index.ChatGoogleBaseInput.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleAbstractedClient.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleAbstractedClient.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleEmbeddingsInstance.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleEmbeddingsInstance.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleEmbeddingsResponse.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleEmbeddingsResponse.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleAbstractedClientOps.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleAbstractedClientOps.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleAbstractedClientOpsMethod.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleAbstractedClientOpsMethod.:rest" + }, + { + "source": "/:path*/langchain_google_common.GoogleAbstractedClientOpsResponseType.:rest", + "destination": "/:path*/_langchain_google_common.index.GoogleAbstractedClientOpsResponseType.:rest" + }, + { + "source": "/:path*/langchain_google_common.aiPlatformScope.:rest", + "destination": "/:path*/_langchain_google_common.index.aiPlatformScope.:rest" + }, + { + "source": "/:path*/langchain_google_common.complexValue.:rest", + "destination": "/:path*/_langchain_google_common.index.complexValue.:rest" + }, + { + "source": "/:path*/langchain_google_common.convertToGeminiTools.:rest", + "destination": "/:path*/_langchain_google_common.index.convertToGeminiTools.:rest" + }, + { + "source": "/:path*/langchain_google_common.copyAIModelParams.:rest", + "destination": "/:path*/_langchain_google_common.index.copyAIModelParams.:rest" + }, + { + "source": "/:path*/langchain_google_common.copyAIModelParamsInto.:rest", + "destination": "/:path*/_langchain_google_common.index.copyAIModelParamsInto.:rest" + }, + { + "source": "/:path*/langchain_google_common.copyAndValidateModelParamsInto.:rest", + "destination": "/:path*/_langchain_google_common.index.copyAndValidateModelParamsInto.:rest" + }, + { + "source": "/:path*/langchain_google_common.ensureAuthOptionScopes.:rest", + "destination": "/:path*/_langchain_google_common.index.ensureAuthOptionScopes.:rest" + }, + { + "source": "/:path*/langchain_google_common.jsonSchemaToGeminiParameters.:rest", + "destination": "/:path*/_langchain_google_common.index.jsonSchemaToGeminiParameters.:rest" + }, + { + "source": "/:path*/langchain_google_common.modelToFamily.:rest", + "destination": "/:path*/_langchain_google_common.index.modelToFamily.:rest" + }, + { + "source": "/:path*/langchain_google_common.removeAdditionalProperties.:rest", + "destination": "/:path*/_langchain_google_common.index.removeAdditionalProperties.:rest" + }, + { + "source": "/:path*/langchain_google_common.simpleValue.:rest", + "destination": "/:path*/_langchain_google_common.index.simpleValue.:rest" + }, + { + "source": "/:path*/langchain_google_common.validateModelParams.:rest", + "destination": "/:path*/_langchain_google_common.index.validateModelParams.:rest" + }, + { + "source": "/:path*/langchain_google_common.zodToGeminiParameters.:rest", + "destination": "/:path*/_langchain_google_common.index.zodToGeminiParameters.:rest" + }, + { + "source": "/:path*/langchain_google_genai(_|\\.):rest", + "destination": "/:path*/_langchain_google_genai.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai_types(_|\\.):rest", + "destination": "/:path*/_langchain_google_vertexai.types.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai_utils(_|\\.):rest", + "destination": "/:path*/_langchain_google_vertexai.utils.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai.ChatVertexAI.:rest", + "destination": "/:path*/_langchain_google_vertexai.index.ChatVertexAI.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai.VertexAI.:rest", + "destination": "/:path*/_langchain_google_vertexai.index.VertexAI.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai.VertexAIEmbeddings.:rest", + "destination": "/:path*/_langchain_google_vertexai.index.VertexAIEmbeddings.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai.ChatVertexAIInput.:rest", + "destination": "/:path*/_langchain_google_vertexai.index.ChatVertexAIInput.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai.GoogleVertexAIEmbeddingsInput.:rest", + "destination": "/:path*/_langchain_google_vertexai.index.GoogleVertexAIEmbeddingsInput.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai.VertexAIInput.:rest", + "destination": "/:path*/_langchain_google_vertexai.index.VertexAIInput.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai_web_types(_|\\.):rest", + "destination": "/:path*/_langchain_google_vertexai_web.types.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai_web_utils(_|\\.):rest", + "destination": "/:path*/_langchain_google_vertexai_web.utils.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai_web.ChatVertexAI.:rest", + "destination": "/:path*/_langchain_google_vertexai_web.index.ChatVertexAI.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai_web.VertexAI.:rest", + "destination": "/:path*/_langchain_google_vertexai_web.index.VertexAI.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai_web.VertexAIEmbeddings.:rest", + "destination": "/:path*/_langchain_google_vertexai_web.index.VertexAIEmbeddings.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai_web.ChatVertexAIInput.:rest", + "destination": "/:path*/_langchain_google_vertexai_web.index.ChatVertexAIInput.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai_web.GoogleVertexAIEmbeddingsInput.:rest", + "destination": "/:path*/_langchain_google_vertexai_web.index.GoogleVertexAIEmbeddingsInput.:rest" + }, + { + "source": "/:path*/langchain_google_vertexai_web.VertexAIInput.:rest", + "destination": "/:path*/_langchain_google_vertexai_web.index.VertexAIInput.:rest" + }, + { + "source": "/:path*/langchain_groq(_|\\.):rest", + "destination": "/:path*/_langchain_groq.:rest" + }, + { + "source": "/:path*/langchain_mistralai(_|\\.):rest", + "destination": "/:path*/_langchain_mistralai.:rest" + }, + { + "source": "/:path*/langchain_mixedbread_ai(_|\\.):rest", + "destination": "/:path*/_langchain_mixedbread_ai.:rest" + }, + { + "source": "/:path*/langchain_mongodb(_|\\.):rest", + "destination": "/:path*/_langchain_mongodb.:rest" + }, + { + "source": "/:path*/langchain_nomic(_|\\.):rest", + "destination": "/:path*/_langchain_nomic.:rest" + }, + { + "source": "/:path*/langchain_ollama(_|\\.):rest", + "destination": "/:path*/_langchain_ollama.:rest" + }, + { + "source": "/:path*/langchain_openai(_|\\.):rest", + "destination": "/:path*/_langchain_openai.:rest" + }, + { + "source": "/:path*/langchain_pinecone(_|\\.):rest", + "destination": "/:path*/_langchain_pinecone.:rest" + }, + { + "source": "/:path*/langchain_qdrant(_|\\.):rest", + "destination": "/:path*/_langchain_qdrant.:rest" + }, + { + "source": "/:path*/langchain_redis(_|\\.):rest", + "destination": "/:path*/_langchain_redis.:rest" + }, + { + "source": "/:path*/langchain_textsplitters(_|\\.):rest", + "destination": "/:path*/_langchain_textsplitters.:rest" + }, + { + "source": "/:path*/langchain_weaviate(_|\\.):rest", + "destination": "/:path*/_langchain_weaviate.:rest" + }, + { + "source": "/:path*/langchain_yandex(_|\\.):rest", + "destination": "/:path*/_langchain_yandex.:rest" } ] } diff --git a/docs/core_docs/docs/concepts.mdx b/docs/core_docs/docs/concepts.mdx index 97392c244d03..5a42901e8a08 100644 --- a/docs/core_docs/docs/concepts.mdx +++ b/docs/core_docs/docs/concepts.mdx @@ -112,7 +112,7 @@ With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.sm -To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html) protocol. +To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below. This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way. @@ -394,14 +394,14 @@ LangChain has many different types of output parsers. This is a list of output p | Name | Supports Streaming | Input Type | Output Type | Description | | ----------------------------------------------------------------------------------------------------------------- | ------------------ | ------------------------- | --------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [JSON](https://v02.api.js.langchain.com/classes/langchain_core_output_parsers.JsonOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise` | Returns a JSON object as specified. You can specify a Zod schema and it will return JSON for that model. | -| [XML](https://v02.api.js.langchain.com/classes/langchain_core_output_parsers.XMLOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise` | Returns a object of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). | -| [CSV](https://v02.api.js.langchain.com/classes/langchain_core_output_parsers.CommaSeparatedListOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Array[string]` | Returns an array of comma separated values. | -| [Structured](https://v02.api.js.langchain.com/classes/langchain_core_output_parsers.StructuredOutputParser.html) | | `string` \| `BaseMessage` | `Promise>` | Parse structured JSON from an LLM response. | -| [HTTP](https://v02.api.js.langchain.com/classes/langchain_output_parsers.HttpResponseOutputParser.html) | ✅ | `string` | `Promise` | Parse an LLM response to then send over HTTP(s). Useful when invoking the LLM on the server/edge, and then sending the content/stream back to the client. | -| [Bytes](https://v02.api.js.langchain.com/classes/langchain_core_output_parsers.BytesOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise` | Parse an LLM response to then send over HTTP(s). Useful for streaming LLM responses from the server/edge to the client. | -| [Datetime](https://v02.api.js.langchain.com/classes/langchain_output_parsers.DatetimeOutputParser.html) | | `string` | `Promise` | Parses response into a `Date`. | -| [Regex](https://v02.api.js.langchain.com/classes/langchain_output_parsers.RegexParser.html) | | `string` | `Promise>` | Parses the given text using the regex pattern and returns a object with the parsed output. | +| [JSON](https://v02.api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise` | Returns a JSON object as specified. You can specify a Zod schema and it will return JSON for that model. | +| [XML](https://v02.api.js.langchain.com/classes/langchain_core.output_parsers.XMLOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise` | Returns a object of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). | +| [CSV](https://v02.api.js.langchain.com/classes/langchain_core.output_parsers.CommaSeparatedListOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Array[string]` | Returns an array of comma separated values. | +| [Structured](https://v02.api.js.langchain.com/classes/langchain_core.output_parsers.StructuredOutputParser.html) | | `string` \| `BaseMessage` | `Promise>` | Parse structured JSON from an LLM response. | +| [HTTP](https://v02.api.js.langchain.com/classes/langchain.output_parsers.HttpResponseOutputParser.html) | ✅ | `string` | `Promise` | Parse an LLM response to then send over HTTP(s). Useful when invoking the LLM on the server/edge, and then sending the content/stream back to the client. | +| [Bytes](https://v02.api.js.langchain.com/classes/langchain_core.output_parsers.BytesOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise` | Parse an LLM response to then send over HTTP(s). Useful for streaming LLM responses from the server/edge to the client. | +| [Datetime](https://v02.api.js.langchain.com/classes/langchain.output_parsers.DatetimeOutputParser.html) | | `string` | `Promise` | Parses response into a `Date`. | +| [Regex](https://v02.api.js.langchain.com/classes/langchain.output_parsers.RegexParser.html) | | `string` | `Promise>` | Parses the given text using the regex pattern and returns a object with the parsed output. | For specifics on how to use output parsers, see the [relevant how-to guides here](/docs/how_to/#output-parsers). @@ -517,7 +517,7 @@ For specifics on how to use retrievers, see the [relevant how-to guides here](/d For some techniques, such as [indexing and retrieval with multiple vectors per document](/docs/how_to/multi_vector/), having some sort of key-value (KV) storage is helpful. -LangChain includes a [`BaseStore`](https://api.js.langchain.com/classes/langchain_core_stores.BaseStore.html) interface, +LangChain includes a [`BaseStore`](https://api.js.langchain.com/classes/langchain_core.stores.BaseStore.html) interface, which allows for storage of arbitrary data. However, LangChain components that require KV-storage accept a more specific `BaseStore` instance that stores binary data (referred to as a `ByteStore`), and internally take care of encoding and decoding data for their specific needs. @@ -526,7 +526,7 @@ This means that as a user, you only need to think about one type of store rather #### Interface -All [`BaseStores`](https://api.js.langchain.com/classes/langchain_core_stores.BaseStore.html) support the following interface. Note that the interface allows +All [`BaseStores`](https://api.js.langchain.com/classes/langchain_core.stores.BaseStore.html) support the following interface. Note that the interface allows for modifying **multiple** key-value pairs at once: - `mget(keys: string[]): Promise<(undefined | Uint8Array)[]>`: get the contents of multiple keys, returning `None` if the key does not exist @@ -723,7 +723,7 @@ You can subscribe to these events by using the `callbacks` argument available th #### Callback handlers -`CallbackHandlers` are objects that implement the [`CallbackHandler`](https://api.js.langchain.com/interfaces/langchain_core_callbacks_base.CallbackHandlerMethods.html) interface, which has a method for each event that can be subscribed to. +`CallbackHandlers` are objects that implement the [`CallbackHandler`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) interface, which has a method for each event that can be subscribed to. The `CallbackManager` will call the appropriate method on each handler when the event is triggered. #### Passing callbacks @@ -793,7 +793,7 @@ For models (or other components) that don't support streaming natively, this ite you could still use the same general pattern when calling them. Using `.stream()` will also automatically call the model in streaming mode without the need to provide additional config. -The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://api.js.langchain.com/classes/langchain_core_messages.AIMessageChunk.html). +The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://api.js.langchain.com/classes/langchain_core.messages.AIMessageChunk.html). Because this method is part of [LangChain Expression Language](/docs/concepts/#langchain-expression-language), you can handle formatting differences from different outputs using an [output parser](/docs/concepts/#output-parsers) to transform each yielded chunk. @@ -849,10 +849,10 @@ or [this guide](/docs/how_to/callbacks_custom_events) for how to stream custom e #### Callbacks The lowest level way to stream outputs from LLMs in LangChain is via the [callbacks](/docs/concepts/#callbacks) system. You can pass a -callback handler that handles the [`handleLLMNewToken`](https://api.js.langchain.com/interfaces/langchain_core_callbacks_base.CallbackHandlerMethods.html#handleLLMNewToken) event into LangChain components. When that component is invoked, any +callback handler that handles the [`handleLLMNewToken`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html#handleLLMNewToken) event into LangChain components. When that component is invoked, any [LLM](/docs/concepts/#llms) or [chat model](/docs/concepts/#chat-models) contained in the component calls the callback with the generated token. Within the callback, you could pipe the tokens into some other destination, e.g. a HTTP response. -You can also handle the [`handleLLMEnd`](https://api.js.langchain.com/interfaces/langchain_core_callbacks_base.CallbackHandlerMethods.html#handleLLMEnd) event to perform any necessary cleanup. +You can also handle the [`handleLLMEnd`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html#handleLLMEnd) event to perform any necessary cleanup. You can see [this how-to section](/docs/how_to/#callbacks) for more specifics on using callbacks. @@ -1242,7 +1242,7 @@ Two approaches can address this tension: (1) [Multi Vector](/docs/how_to/multi_v Fifth, consider ways to improve the quality of your similarity search itself. Embedding models compress text into fixed-length (vector) representations that capture the semantic content of the document. This compression is useful for search / retrieval, but puts a heavy burden on that single vector representation to capture the semantic nuance / detail of the document. In some cases, irrelevant or redundant content can dilute the semantic usefulness of the embedding. -There are some additional tricks to improve the quality of your retrieval. Embeddings excel at capturing semantic information, but may struggle with keyword-based queries. Many [vector stores](docs/integrations/retrievers/supabase-hybrid/) offer built-in [hybrid-search](https://docs.pinecone.io/guides/data/understanding-hybrid-search) to combine keyword and semantic similarity, which marries the benefits of both approaches. Furthermore, many vector stores have [maximal marginal relevance](https://api.js.langchain.com/interfaces/langchain_core_vectorstores.VectorStoreInterface.html#maxMarginalRelevanceSearch), which attempts to diversify the results of a search to avoid returning similar and redundant documents. +There are some additional tricks to improve the quality of your retrieval. Embeddings excel at capturing semantic information, but may struggle with keyword-based queries. Many [vector stores](docs/integrations/retrievers/supabase-hybrid/) offer built-in [hybrid-search](https://docs.pinecone.io/guides/data/understanding-hybrid-search) to combine keyword and semantic similarity, which marries the benefits of both approaches. Furthermore, many vector stores have [maximal marginal relevance](https://api.js.langchain.com/interfaces/langchain_core.vectorstores.VectorStoreInterface.html#maxMarginalRelevanceSearch), which attempts to diversify the results of a search to avoid returning similar and redundant documents. | Name | When to use | Description | | ------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | diff --git a/docs/core_docs/docs/how_to/assign.ipynb b/docs/core_docs/docs/how_to/assign.ipynb index 72ca850404fe..ef0b5dc505c4 100644 --- a/docs/core_docs/docs/how_to/assign.ipynb +++ b/docs/core_docs/docs/how_to/assign.ipynb @@ -27,7 +27,7 @@ "\n", ":::\n", "\n", - "An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://v02.api.js.langchain.com/classes/langchain_core_runnables.RunnablePassthrough.html#assign-2) static method takes an input value and adds the extra arguments passed to the assign function.\n", + "An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://v02.api.js.langchain.com/classes/langchain_core.runnables.RunnablePassthrough.html#assign-2) static method takes an input value and adds the extra arguments passed to the assign function.\n", "\n", "This is useful in the common [LangChain Expression Language](/docs/concepts/#langchain-expression-language) pattern of additively creating a dictionary to use as input to a later step.\n", "\n", diff --git a/docs/core_docs/docs/how_to/binding.ipynb b/docs/core_docs/docs/how_to/binding.ipynb index 2a0bbf212ee4..3375fedb84a7 100644 --- a/docs/core_docs/docs/how_to/binding.ipynb +++ b/docs/core_docs/docs/how_to/binding.ipynb @@ -27,7 +27,7 @@ "\n", ":::\n", "\n", - "Sometimes we want to invoke a [`Runnable`](https://v02.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html) within a [RunnableSequence](https://v02.api.js.langchain.com/classes/langchain_core_runnables.RunnableSequence.html) with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use the [`Runnable.bind()`](https://v02.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#bind) method to set these arguments ahead of time.\n", + "Sometimes we want to invoke a [`Runnable`](https://v02.api.js.langchain.com/classes/langchain_core.runnables.Runnable.html) within a [RunnableSequence](https://v02.api.js.langchain.com/classes/langchain_core.runnables.RunnableSequence.html) with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use the [`Runnable.bind()`](https://v02.api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#bind) method to set these arguments ahead of time.\n", "\n", "## Binding stop sequences\n", "\n", diff --git a/docs/core_docs/docs/how_to/callbacks_attach.ipynb b/docs/core_docs/docs/how_to/callbacks_attach.ipynb index f87ad9c550eb..3d3ac02bf7f0 100644 --- a/docs/core_docs/docs/how_to/callbacks_attach.ipynb +++ b/docs/core_docs/docs/how_to/callbacks_attach.ipynb @@ -16,9 +16,9 @@ "\n", ":::\n", "\n", - "If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.withConfig()`](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#withConfig) method. This saves you the need to pass callbacks in each time you invoke the chain.\n", + "If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.withConfig()`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#withConfig) method. This saves you the need to pass callbacks in each time you invoke the chain.\n", "\n", - "Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core_tracers_console.ConsoleCallbackHandler.html):" + "Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html):" ] }, { diff --git a/docs/core_docs/docs/how_to/callbacks_backgrounding.ipynb b/docs/core_docs/docs/how_to/callbacks_backgrounding.ipynb index 67812ba1e136..45af5b58b4da 100644 --- a/docs/core_docs/docs/how_to/callbacks_backgrounding.ipynb +++ b/docs/core_docs/docs/how_to/callbacks_backgrounding.ipynb @@ -16,7 +16,7 @@ "\n", "By default, LangChain.js callbacks are blocking. This means that execution will wait for the callback to either return or timeout before continuing. This is to help ensure that if you are running code in [serverless environments](https://en.wikipedia.org/wiki/Serverless_computing) such as [AWS Lambda](https://aws.amazon.com/pm/lambda/) or [Cloudflare Workers](https://workers.cloudflare.com/), these callbacks always finish before the execution context ends.\n", "\n", - "However, this can add unnecessary latency if you are running in traditional stateful environments. If desired, you can set your callbacks to run in the background to avoid this additional latency by setting the `LANGCHAIN_CALLBACKS_BACKGROUND` environment variable to `\"true\"`. You can then import the global [`awaitAllCallbacks`](https://api.js.langchain.com/functions/langchain_core_callbacks_promises.awaitAllCallbacks.html) method to ensure all callbacks finish if necessary.\n", + "However, this can add unnecessary latency if you are running in traditional stateful environments. If desired, you can set your callbacks to run in the background to avoid this additional latency by setting the `LANGCHAIN_CALLBACKS_BACKGROUND` environment variable to `\"true\"`. You can then import the global [`awaitAllCallbacks`](https://api.js.langchain.com/functions/langchain_core.callbacks_promises.awaitAllCallbacks.html) method to ensure all callbacks finish if necessary.\n", "\n", "To illustrate this, we'll create a [custom callback handler](/docs/how_to/custom_callbacks) that takes some time to resolve, and show the timing with and without `LANGCHAIN_CALLBACKS_BACKGROUND` set. Here it is without the variable set:" ] diff --git a/docs/core_docs/docs/how_to/callbacks_constructor.ipynb b/docs/core_docs/docs/how_to/callbacks_constructor.ipynb index 53b7e614945a..b653161deada 100644 --- a/docs/core_docs/docs/how_to/callbacks_constructor.ipynb +++ b/docs/core_docs/docs/how_to/callbacks_constructor.ipynb @@ -16,7 +16,7 @@ "\n", "Most LangChain modules allow you to pass `callbacks` directly into the constructor. In this case, the callbacks will only be called for that instance (and any nested runs).\n", "\n", - "Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core_tracers_console.ConsoleCallbackHandler.html):" + "Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html):" ] }, { diff --git a/docs/core_docs/docs/how_to/callbacks_runtime.ipynb b/docs/core_docs/docs/how_to/callbacks_runtime.ipynb index be9918d5f71e..7f41ac7f403a 100644 --- a/docs/core_docs/docs/how_to/callbacks_runtime.ipynb +++ b/docs/core_docs/docs/how_to/callbacks_runtime.ipynb @@ -14,9 +14,9 @@ "\n", ":::\n", "\n", - "In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.js.langchain.com/interfaces/langchain_core_callbacks_base.CallbackHandlerMethods.html) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n", + "In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n", "\n", - "This prevents us from having to manually attach the handlers to each individual nested object. Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core_tracers_console.ConsoleCallbackHandler.html):" + "This prevents us from having to manually attach the handlers to each individual nested object. Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html):" ] }, { diff --git a/docs/core_docs/docs/how_to/character_text_splitter.ipynb b/docs/core_docs/docs/how_to/character_text_splitter.ipynb index 850544b5e681..44bbe800d25e 100644 --- a/docs/core_docs/docs/how_to/character_text_splitter.ipynb +++ b/docs/core_docs/docs/how_to/character_text_splitter.ipynb @@ -22,7 +22,7 @@ "\n", "To obtain the string content directly, use `.splitText()`.\n", "\n", - "To create LangChain [Document](https://v02.api.js.langchain.com/classes/langchain_core_documents.Document.html) objects (e.g., for use in downstream tasks), use `.createDocuments()`." + "To create LangChain [Document](https://v02.api.js.langchain.com/classes/langchain_core.documents.Document.html) objects (e.g., for use in downstream tasks), use `.createDocuments()`." ] }, { diff --git a/docs/core_docs/docs/how_to/chat_models_universal_init.mdx b/docs/core_docs/docs/how_to/chat_models_universal_init.mdx index 3b03eae02b36..46010ff67f61 100644 --- a/docs/core_docs/docs/how_to/chat_models_universal_init.mdx +++ b/docs/core_docs/docs/how_to/chat_models_universal_init.mdx @@ -24,7 +24,7 @@ This guide assumes familiarity with the following concepts: `initChatModel` requires `langchain>=0.2.11`. See [this guide](/docs/how_to/installation/#installing-integration-packages) for some considerations to take when upgrading. -See the [initChatModel()](https://api.js.langchain.com/functions/langchain_chat_models_universal.initChatModel.html) API reference for a full list of supported integrations. +See the [initChatModel()](https://api.js.langchain.com/functions/langchain.chat_models_universal.initChatModel.html) API reference for a full list of supported integrations. Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `@langchain/openai` installed to init an OpenAI model. ::: @@ -38,7 +38,7 @@ import BasicExample from "@examples/models/chat/configurable/basic.ts"; ## Inferring model provider For common and distinct model names `initChatModel()` will attempt to infer the model provider. -See the [API reference](https://api.js.langchain.com/functions/langchain_chat_models_universal.initChatModel.html) for a full list of inference behavior. +See the [API reference](https://api.js.langchain.com/functions/langchain.chat_models_universal.initChatModel.html) for a full list of inference behavior. E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`. import InferringProviderExample from "@examples/models/chat/configurable/inferring_model_provider.ts"; diff --git a/docs/core_docs/docs/how_to/chat_streaming.ipynb b/docs/core_docs/docs/how_to/chat_streaming.ipynb index 1fa1a98212ba..292a3b11e7c7 100644 --- a/docs/core_docs/docs/how_to/chat_streaming.ipynb +++ b/docs/core_docs/docs/how_to/chat_streaming.ipynb @@ -17,7 +17,7 @@ "source": [ "# How to stream chat model responses\n", "\n", - "All [chat models](https://api.js.langchain.com/classes/langchain_core_language_models_chat_models.BaseChatModel.html) implement the [Runnable interface](https://.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html), which comes with **default** implementations of standard runnable methods (i.e. `invoke`, `batch`, `stream`, `streamEvents`). This guide covers how to use these methods to stream output from chat models.\n", + "All [chat models](https://api.js.langchain.com/classes/langchain_core.language_models_chat_models.BaseChatModel.html) implement the [Runnable interface](https://.api.js.langchain.com/classes/langchain_core.runnables.Runnable.html), which comes with **default** implementations of standard runnable methods (i.e. `invoke`, `batch`, `stream`, `streamEvents`). This guide covers how to use these methods to stream output from chat models.\n", "\n", ":::{.callout-tip}\n", "\n", @@ -215,7 +215,7 @@ "source": [ "## Stream events\n", "\n", - "Chat models also support the standard [`streamEvents()`](https://v02.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#streamEvents) method to stream more granular events from within chains.\n", + "Chat models also support the standard [`streamEvents()`](https://v02.api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#streamEvents) method to stream more granular events from within chains.\n", "\n", "This method is useful if you're streaming output from a larger LLM application that contains multiple steps (e.g., a chain composed of a prompt, chat model and parser):" ] diff --git a/docs/core_docs/docs/how_to/chat_token_usage_tracking.mdx b/docs/core_docs/docs/how_to/chat_token_usage_tracking.mdx index 343bf749a245..ac8c6c728395 100644 --- a/docs/core_docs/docs/how_to/chat_token_usage_tracking.mdx +++ b/docs/core_docs/docs/how_to/chat_token_usage_tracking.mdx @@ -18,7 +18,7 @@ This notebook goes over how to track your token usage for specific calls. A number of model providers return token usage information as part of the chat generation response. When available, this information will be included on the `AIMessage` objects produced by the corresponding model. -LangChain `AIMessage` objects include a [`usage_metadata`](https://api.js.langchain.com/classes/langchain_core_messages.AIMessage.html#usage_metadata) attribute for supported providers. When populated, this attribute will be an object with standard keys (e.g., "input_tokens" and "output_tokens"). +LangChain `AIMessage` objects include a [`usage_metadata`](https://api.js.langchain.com/classes/langchain_core.messages.AIMessage.html#usage_metadata) attribute for supported providers. When populated, this attribute will be an object with standard keys (e.g., "input_tokens" and "output_tokens"). #### OpenAI diff --git a/docs/core_docs/docs/how_to/chatbots_tools.ipynb b/docs/core_docs/docs/how_to/chatbots_tools.ipynb index 27cb36437b7a..113155899e9b 100644 --- a/docs/core_docs/docs/how_to/chatbots_tools.ipynb +++ b/docs/core_docs/docs/how_to/chatbots_tools.ipynb @@ -91,7 +91,7 @@ "\n", "```{=mdx}\n", ":::tip\n", - "As of `langchain` version `0.2.8`, the `createOpenAIToolsAgent` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core_language_models_base.ToolDefinition.html).\n", + "As of `langchain` version `0.2.8`, the `createOpenAIToolsAgent` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core.language_models_base.ToolDefinition.html).\n", ":::\n", "```\n" ] diff --git a/docs/core_docs/docs/how_to/convert_runnable_to_tool.ipynb b/docs/core_docs/docs/how_to/convert_runnable_to_tool.ipynb index 022e6e17de50..2fdae686438e 100644 --- a/docs/core_docs/docs/how_to/convert_runnable_to_tool.ipynb +++ b/docs/core_docs/docs/how_to/convert_runnable_to_tool.ipynb @@ -21,7 +21,7 @@ "\n", "```\n", "\n", - "For convenience, `Runnables` that accept a string or object input can be converted to tools using the [`asTool`](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#asTool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n", + "For convenience, `Runnables` that accept a string or object input can be converted to tools using the [`asTool`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#asTool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n", "\n", "Here we will demonstrate how to use this method to convert a LangChain `Runnable` into a tool that can be used by agents, chains, or chat models.\n", "\n", diff --git a/docs/core_docs/docs/how_to/custom_callbacks.ipynb b/docs/core_docs/docs/how_to/custom_callbacks.ipynb index b9349378900c..f8f98349ea04 100644 --- a/docs/core_docs/docs/how_to/custom_callbacks.ipynb +++ b/docs/core_docs/docs/how_to/custom_callbacks.ipynb @@ -16,7 +16,7 @@ "\n", "LangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic.\n", "\n", - "To create a custom callback handler, we need to determine the [event(s)](https://api.js.langchain.com/interfaces/langchain_core_callbacks_base.CallbackHandlerMethods.html) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n", + "To create a custom callback handler, we need to determine the [event(s)](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n", "\n", "An easy way to construct a custom callback handler is to initialize it as an object whose keys are functions with names matching the events we want to handle. Here's an example that only handles the start of a chat model and streamed tokens from the model run:" ] @@ -112,7 +112,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can see [this reference page](https://api.js.langchain.com/interfaces/langchain_core_callbacks_base.CallbackHandlerMethods.html) for a list of events you can handle. Note that the `handleChain*` events run for most LCEL runnables.\n", + "You can see [this reference page](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) for a list of events you can handle. Note that the `handleChain*` events run for most LCEL runnables.\n", "\n", "## Next steps\n", "\n", diff --git a/docs/core_docs/docs/how_to/custom_chat.ipynb b/docs/core_docs/docs/how_to/custom_chat.ipynb index 12bf6887b2b4..c0e102bd57e9 100644 --- a/docs/core_docs/docs/how_to/custom_chat.ipynb +++ b/docs/core_docs/docs/how_to/custom_chat.ipynb @@ -22,14 +22,14 @@ "\n", "This notebook goes over how to create a custom chat model wrapper, in case you want to use your own chat model or a different wrapper than one that is directly supported in LangChain.\n", "\n", - "There are a few required things that a chat model needs to implement after extending the [`SimpleChatModel` class](https://v02.api.js.langchain.com/classes/langchain_core_language_models_chat_models.SimpleChatModel.html):\n", + "There are a few required things that a chat model needs to implement after extending the [`SimpleChatModel` class](https://v02.api.js.langchain.com/classes/langchain_core.language_models_chat_models.SimpleChatModel.html):\n", "\n", "- A `_call` method that takes in a list of messages and call options (which includes things like `stop` sequences), and returns a string.\n", "- A `_llmType` method that returns a string. Used for logging purposes only.\n", "\n", "You can also implement the following optional method:\n", "\n", - "- A `_streamResponseChunks` method that returns an `AsyncGenerator` and yields [`ChatGenerationChunks`](https://v02.api.js.langchain.com/classes/langchain_core_outputs.ChatGenerationChunk.html). This allows the LLM to support streaming outputs.\n", + "- A `_streamResponseChunks` method that returns an `AsyncGenerator` and yields [`ChatGenerationChunks`](https://v02.api.js.langchain.com/classes/langchain_core.outputs.ChatGenerationChunk.html). This allows the LLM to support streaming outputs.\n", "\n", "Let's implement a very simple custom chat model that just echoes back the first `n` characters of the input." ] @@ -268,7 +268,7 @@ "source": [ "## Richer outputs\n", "\n", - "If you want to take advantage of LangChain's callback system for functionality like token tracking, you can extend the [`BaseChatModel`](https://v02.api.js.langchain.com/classes/langchain_core_language_models_chat_models.BaseChatModel.html) class and implement the lower level\n", + "If you want to take advantage of LangChain's callback system for functionality like token tracking, you can extend the [`BaseChatModel`](https://v02.api.js.langchain.com/classes/langchain_core.language_models_chat_models.BaseChatModel.html) class and implement the lower level\n", "`_generate` method. It also takes a list of `BaseMessage`s as input, but requires you to construct and return a `ChatGeneration` object that permits additional metadata.\n", "Here's an example:" ] diff --git a/docs/core_docs/docs/how_to/custom_llm.ipynb b/docs/core_docs/docs/how_to/custom_llm.ipynb index f2972a295e3a..900a90e49f4a 100644 --- a/docs/core_docs/docs/how_to/custom_llm.ipynb +++ b/docs/core_docs/docs/how_to/custom_llm.ipynb @@ -22,14 +22,14 @@ "\n", "This notebook goes over how to create a custom LLM wrapper, in case you want to use your own LLM or a different wrapper than one that is directly supported in LangChain.\n", "\n", - "There are a few required things that a custom LLM needs to implement after extending the [`LLM` class](https://v02.api.js.langchain.com/classes/langchain_core_language_models_llms.LLM.html):\n", + "There are a few required things that a custom LLM needs to implement after extending the [`LLM` class](https://v02.api.js.langchain.com/classes/langchain_core.language_models_llms.LLM.html):\n", "\n", "- A `_call` method that takes in a string and call options (which includes things like `stop` sequences), and returns a string.\n", "- A `_llmType` method that returns a string. Used for logging purposes only.\n", "\n", "You can also implement the following optional method:\n", "\n", - "- A `_streamResponseChunks` method that returns an `AsyncIterator` and yields [`GenerationChunks`](https://v02.api.js.langchain.com/classes/langchain_core_outputs.GenerationChunk.html). This allows the LLM to support streaming outputs.\n", + "- A `_streamResponseChunks` method that returns an `AsyncIterator` and yields [`GenerationChunks`](https://v02.api.js.langchain.com/classes/langchain_core.outputs.GenerationChunk.html). This allows the LLM to support streaming outputs.\n", "\n", "Let's implement a very simple custom LLM that just echoes back the first `n` characters of the input." ] @@ -151,7 +151,7 @@ "source": [ "## Richer outputs\n", "\n", - "If you want to take advantage of LangChain's callback system for functionality like token tracking, you can extend the [`BaseLLM`](https://v02.api.js.langchain.com/classes/langchain_core_language_models_llms.BaseLLM.html) class and implement the lower level\n", + "If you want to take advantage of LangChain's callback system for functionality like token tracking, you can extend the [`BaseLLM`](https://v02.api.js.langchain.com/classes/langchain_core.language_models_llms.BaseLLM.html) class and implement the lower level\n", "`_generate` method. Rather than taking a single string as input and a single string output, it can take multiple input strings and map each to multiple string outputs.\n", "Additionally, it returns a `Generation` output with fields for additional metadata rather than just a string." ] diff --git a/docs/core_docs/docs/how_to/custom_retriever.mdx b/docs/core_docs/docs/how_to/custom_retriever.mdx index 4e3c93e0abe3..2f81302acbf1 100644 --- a/docs/core_docs/docs/how_to/custom_retriever.mdx +++ b/docs/core_docs/docs/how_to/custom_retriever.mdx @@ -8,7 +8,7 @@ This guide assumes familiarity with the following concepts: ::: -To create your own retriever, you need to extend the [`BaseRetriever`](https://v02.api.js.langchain.com/classes/langchain_core_retrievers.BaseRetriever.html) class +To create your own retriever, you need to extend the [`BaseRetriever`](https://v02.api.js.langchain.com/classes/langchain_core.retrievers.BaseRetriever.html) class and implement a `_getRelevantDocuments` method that takes a `string` as its first parameter (and an optional `runManager` for tracing). This method should return an array of `Document`s fetched from some source. This process can involve calls to a database, to the web using `fetch`, or any other source. Note the underscore before `_getRelevantDocuments()`. The base class wraps the non-prefixed version in order to automatically handle tracing of the original call. diff --git a/docs/core_docs/docs/how_to/custom_tools.ipynb b/docs/core_docs/docs/how_to/custom_tools.ipynb index 8186e3a2b1c6..0d38c5d92d6a 100644 --- a/docs/core_docs/docs/how_to/custom_tools.ipynb +++ b/docs/core_docs/docs/how_to/custom_tools.ipynb @@ -49,13 +49,13 @@ "```\n", "\n", "\n", - "The [`tool`](https://api.js.langchain.com/classes/langchain_core_tools.Tool.html) wrapper function is a convenience method for turning a JavaScript function into a tool. It requires the function itself along with some additional arguments that define your tool. The most important are:\n", + "The [`tool`](https://api.js.langchain.com/classes/langchain_core.tools.Tool.html) wrapper function is a convenience method for turning a JavaScript function into a tool. It requires the function itself along with some additional arguments that define your tool. The most important are:\n", "\n", "- The tool's `name`, which the LLM will use as context as well as to reference the tool\n", "- An optional, but recommended `description`, which the LLM will use as context to know when to use the tool\n", "- A `schema`, which defines the shape of the tool's input\n", "\n", - "The `tool` function will return an instance of the [`StructuredTool`](https://api.js.langchain.com/classes/langchain_core_tools.StructuredTool.html) class, so it is compatible with all the existing tool calling infrastructure in the LangChain library." + "The `tool` function will return an instance of the [`StructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.StructuredTool.html) class, so it is compatible with all the existing tool calling infrastructure in the LangChain library." ] }, { @@ -102,7 +102,7 @@ "source": [ "## `DynamicStructuredTool`\n", "\n", - "You can also use the [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core_tools.DynamicStructuredTool.html) class to declare tools. Here's an example - note that tools must always return strings!" + "You can also use the [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) class to declare tools. Here's an example - note that tools must always return strings!" ] }, { @@ -148,7 +148,7 @@ "source": [ "## `DynamicTool`\n", "\n", - "For older agents that require tools which accept only a single input, you can pass the relevant parameters to the [`DynamicTool`](https://api.js.langchain.com/classes/langchain_core_tools.DynamicTool.html) class. This is useful when working with older agents that only support tools that accept a single input. In this case, no schema is required:" + "For older agents that require tools which accept only a single input, you can pass the relevant parameters to the [`DynamicTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicTool.html) class. This is useful when working with older agents that only support tools that accept a single input. In this case, no schema is required:" ] }, { @@ -203,7 +203,7 @@ "\n", "- Set the `response_format` parameter to `\"content_and_artifact\"` when defining the tool.\n", "- Make sure that we return a tuple of `[content, artifact]`.\n", - "- Call the tool with a a [`ToolCall`](https://api.js.langchain.com/types/langchain_core_messages_tool.ToolCall.html) (like the ones generated by tool-calling models) rather than with the required schema directly.\n", + "- Call the tool with a a [`ToolCall`](https://api.js.langchain.com/types/langchain_core.messages_tool.ToolCall.html) (like the ones generated by tool-calling models) rather than with the required schema directly.\n", "\n", "Here's an example of what this looks like. First, create a new tool:" ] diff --git a/docs/core_docs/docs/how_to/debugging.mdx b/docs/core_docs/docs/how_to/debugging.mdx index dd92e4b15587..09d073c34640 100644 --- a/docs/core_docs/docs/how_to/debugging.mdx +++ b/docs/core_docs/docs/how_to/debugging.mdx @@ -3524,5 +3524,5 @@ MacBook-Pro-4:examples jacoblee$ yarn start examples/src/guides/debugging/simple `Callbacks` are what we use to execute any functionality within a component outside the primary component logic. All of the above solutions use `Callbacks` under the hood to log intermediate steps of components. -There are a number of `Callbacks` relevant for debugging that come with LangChain out of the box, like the [`ConsoleCallbackHandler`](https://v02.api.js.langchain.com/classes/langchain_core_tracers_console.ConsoleCallbackHandler.html). +There are a number of `Callbacks` relevant for debugging that come with LangChain out of the box, like the [`ConsoleCallbackHandler`](https://v02.api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html). You can also implement your own callbacks to execute custom functionality. diff --git a/docs/core_docs/docs/how_to/document_loader_html.ipynb b/docs/core_docs/docs/how_to/document_loader_html.ipynb index ee037f3b937e..4c38f6c8923f 100644 --- a/docs/core_docs/docs/how_to/document_loader_html.ipynb +++ b/docs/core_docs/docs/how_to/document_loader_html.ipynb @@ -9,7 +9,7 @@ "\n", "The HyperText Markup Language or [HTML](https://en.wikipedia.org/wiki/HTML) is the standard markup language for documents designed to be displayed in a web browser.\n", "\n", - "This covers how to load `HTML` documents into a LangChain [Document](https://v02.api.js.langchain.com/classes/langchain_core_documents.Document.html) objects that we can use downstream.\n", + "This covers how to load `HTML` documents into a LangChain [Document](https://v02.api.js.langchain.com/classes/langchain_core.documents.Document.html) objects that we can use downstream.\n", "\n", "Parsing HTML files often requires specialized tools. Here we demonstrate parsing via [Unstructured](https://unstructured-io.github.io/unstructured/). Head over to the integrations page to find integrations with additional services, such as [FireCrawl](/docs/integrations/document_loaders/web_loaders/firecrawl).\n", "\n", diff --git a/docs/core_docs/docs/how_to/document_loader_markdown.ipynb b/docs/core_docs/docs/how_to/document_loader_markdown.ipynb index d34914280fbc..c84a92f18508 100644 --- a/docs/core_docs/docs/how_to/document_loader_markdown.ipynb +++ b/docs/core_docs/docs/how_to/document_loader_markdown.ipynb @@ -9,14 +9,14 @@ "\n", "[Markdown](https://en.wikipedia.org/wiki/Markdown) is a lightweight markup language for creating formatted text using a plain-text editor.\n", "\n", - "Here we cover how to load `Markdown` documents into LangChain [Document](https://v02.api.js.langchain.com/classes/langchain_core_documents.Document.html) objects that we can use downstream.\n", + "Here we cover how to load `Markdown` documents into LangChain [Document](https://v02.api.js.langchain.com/classes/langchain_core.documents.Document.html) objects that we can use downstream.\n", "\n", "We will cover:\n", "\n", "- Basic usage;\n", "- Parsing of Markdown into elements such as titles, list items, and text.\n", "\n", - "LangChain implements an [UnstructuredLoader](https://v02.api.js.langchain.com/classes/langchain_document_loaders_fs_unstructured.UnstructuredLoader.html) class.\n", + "LangChain implements an [UnstructuredLoader](https://v02.api.js.langchain.com/classes/langchain.document_loaders_fs_unstructured.UnstructuredLoader.html) class.\n", "\n", ":::info Prerequisites\n", "\n", diff --git a/docs/core_docs/docs/how_to/ensemble_retriever.mdx b/docs/core_docs/docs/how_to/ensemble_retriever.mdx index 6eaf23871a33..218a76bcff8c 100644 --- a/docs/core_docs/docs/how_to/ensemble_retriever.mdx +++ b/docs/core_docs/docs/how_to/ensemble_retriever.mdx @@ -9,13 +9,13 @@ This guide assumes familiarity with the following concepts: ::: -The [EnsembleRetriever](https://api.js.langchain.com/classes/langchain_retrievers_ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://api.js.langchain.com/classes/langchain_core_retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm. +The [EnsembleRetriever](https://api.js.langchain.com/classes/langchain.retrievers_ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://api.js.langchain.com/classes/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm. By leveraging the strengths of different algorithms, the `EnsembleRetriever` can achieve better performance than any single algorithm. One useful pattern is to combine a keyword matching retriever with a dense retriever (like embedding similarity), because their strengths are complementary. This can be considered a form of "hybrid search". The sparse retriever is good at finding relevant documents based on keywords, while the dense retriever is good at finding relevant documents based on semantic similarity. -Below we demonstrate ensembling of a [simple custom retriever](/docs/how_to/custom_retriever/) that simply returns documents that directly contain the input query with a retriever derived from a [demo, in-memory, vector store](https://api.js.langchain.com/classes/langchain_vectorstores_memory.MemoryVectorStore.html). +Below we demonstrate ensembling of a [simple custom retriever](/docs/how_to/custom_retriever/) that simply returns documents that directly contain the input query with a retriever derived from a [demo, in-memory, vector store](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html). import CodeBlock from "@theme/CodeBlock"; import Example from "@examples/retrievers/ensemble_retriever.ts"; diff --git a/docs/core_docs/docs/how_to/few_shot_examples.ipynb b/docs/core_docs/docs/how_to/few_shot_examples.ipynb index c2d480dcc8fc..2725480eb258 100644 --- a/docs/core_docs/docs/how_to/few_shot_examples.ipynb +++ b/docs/core_docs/docs/how_to/few_shot_examples.ipynb @@ -19,7 +19,7 @@ "\n", "In this guide, we'll learn how to create a simple prompt template that provides the model with example inputs and outputs when generating. Providing the LLM with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n", "\n", - "A few-shot prompt template can be constructed from either a set of examples, or from an [Example Selector](https://v02.api.js.langchain.com/classes/langchain_core_example_selectors.BaseExampleSelector.html) class responsible for choosing a subset of examples from the defined set.\n", + "A few-shot prompt template can be constructed from either a set of examples, or from an [Example Selector](https://v02.api.js.langchain.com/classes/langchain_core.example_selectors.BaseExampleSelector.html) class responsible for choosing a subset of examples from the defined set.\n", "\n", "This guide will cover few-shotting with string prompt templates. For a guide on few-shotting with chat messages for chat models, see [here](/docs/how_to/few_shot_examples_chat/).\n", "\n", @@ -127,7 +127,7 @@ "source": [ "### Pass the examples and formatter to `FewShotPromptTemplate`\n", "\n", - "Finally, create a [`FewShotPromptTemplate`](https://v02.api.js.langchain.com/classes/langchain_core_prompts.FewShotPromptTemplate.html) object. This object takes in the few-shot examples and the formatter for the few-shot examples. When this `FewShotPromptTemplate` is formatted, it formats the passed examples using the `examplePrompt`, then and adds them to the final prompt before `suffix`:" + "Finally, create a [`FewShotPromptTemplate`](https://v02.api.js.langchain.com/classes/langchain_core.prompts.FewShotPromptTemplate.html) object. This object takes in the few-shot examples and the formatter for the few-shot examples. When this `FewShotPromptTemplate` is formatted, it formats the passed examples using the `examplePrompt`, then and adds them to the final prompt before `suffix`:" ] }, { @@ -219,7 +219,7 @@ "source": [ "## Using an example selector\n", "\n", - "We will reuse the example set and the formatter from the previous section. However, instead of feeding the examples directly into the `FewShotPromptTemplate` object, we will feed them into an implementation of `ExampleSelector` called [`SemanticSimilarityExampleSelector`](https://v02.api.js.langchain.com/classes/langchain_core_example_selectors.SemanticSimilarityExampleSelector.html) instance. This class selects few-shot examples from the initial set based on their similarity to the input. It uses an embedding model to compute the similarity between the input and the few-shot examples, as well as a vector store to perform the nearest neighbor search.\n", + "We will reuse the example set and the formatter from the previous section. However, instead of feeding the examples directly into the `FewShotPromptTemplate` object, we will feed them into an implementation of `ExampleSelector` called [`SemanticSimilarityExampleSelector`](https://v02.api.js.langchain.com/classes/langchain_core.example_selectors.SemanticSimilarityExampleSelector.html) instance. This class selects few-shot examples from the initial set based on their similarity to the input. It uses an embedding model to compute the similarity between the input and the few-shot examples, as well as a vector store to perform the nearest neighbor search.\n", "\n", "To show what it looks like, let's initialize an instance and call it in isolation:" ] diff --git a/docs/core_docs/docs/how_to/few_shot_examples_chat.ipynb b/docs/core_docs/docs/how_to/few_shot_examples_chat.ipynb index a3917f4b5ffa..564736436c25 100644 --- a/docs/core_docs/docs/how_to/few_shot_examples_chat.ipynb +++ b/docs/core_docs/docs/how_to/few_shot_examples_chat.ipynb @@ -19,7 +19,7 @@ "\n", "This guide covers how to prompt a chat model with example inputs and outputs. Providing the model with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n", "\n", - "There does not appear to be solid consensus on how best to do few-shot prompting, and the optimal prompt compilation will likely vary by model. Because of this, we provide few-shot prompt templates like the [FewShotChatMessagePromptTemplate](https://v02.api.js.langchain.com/classes/langchain_core_prompts.FewShotChatMessagePromptTemplate.html) as a flexible starting point, and you can modify or replace them as you see fit.\n", + "There does not appear to be solid consensus on how best to do few-shot prompting, and the optimal prompt compilation will likely vary by model. Because of this, we provide few-shot prompt templates like the [FewShotChatMessagePromptTemplate](https://v02.api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html) as a flexible starting point, and you can modify or replace them as you see fit.\n", "\n", "The goal of few-shot prompt templates are to dynamically select examples based on an input, and then format the examples in a final prompt to provide for the model.\n", "\n", @@ -50,7 +50,7 @@ "\n", "The basic components of the template are:\n", "- `examples`: An array of object examples to include in the final prompt.\n", - "- `examplePrompt`: converts each example into 1 or more messages through its [`formatMessages`](https://v02.api.js.langchain.com/classes/langchain_core_prompts.FewShotChatMessagePromptTemplate.html#formatMessages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n", + "- `examplePrompt`: converts each example into 1 or more messages through its [`formatMessages`](https://v02.api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html#formatMessages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n", "\n", "Below is a simple demonstration. First, define the examples you'd like to include:" ] @@ -261,8 +261,8 @@ "\n", "Sometimes you may want to select only a few examples from your overall set to show based on the input. For this, you can replace the `examples` passed into `FewShotChatMessagePromptTemplate` with an `exampleSelector`. The other components remain the same as above! Our dynamic few-shot prompt template would look like:\n", "\n", - "- `exampleSelector`: responsible for selecting few-shot examples (and the order in which they are returned) for a given input. These implement the [BaseExampleSelector](https://v02.api.js.langchain.com/classes/langchain_core_example_selectors.BaseExampleSelector.html) interface. A common example is the vectorstore-backed [SemanticSimilarityExampleSelector](https://v02.api.js.langchain.com/classes/langchain_core_example_selectors.SemanticSimilarityExampleSelector.html)\n", - "- `examplePrompt`: convert each example into 1 or more messages through its [`formatMessages`](https://v02.api.js.langchain.com/classes/langchain_core_prompts.FewShotChatMessagePromptTemplate.html#formatMessages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n", + "- `exampleSelector`: responsible for selecting few-shot examples (and the order in which they are returned) for a given input. These implement the [BaseExampleSelector](https://v02.api.js.langchain.com/classes/langchain_core.example_selectors.BaseExampleSelector.html) interface. A common example is the vectorstore-backed [SemanticSimilarityExampleSelector](https://v02.api.js.langchain.com/classes/langchain_core.example_selectors.SemanticSimilarityExampleSelector.html)\n", + "- `examplePrompt`: convert each example into 1 or more messages through its [`formatMessages`](https://v02.api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html#formatMessages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n", "\n", "These once again can be composed with other messages and chat templates to assemble your final prompt.\n", "\n", diff --git a/docs/core_docs/docs/how_to/filter_messages.ipynb b/docs/core_docs/docs/how_to/filter_messages.ipynb index 06eb9c3a169f..a2edd021d477 100644 --- a/docs/core_docs/docs/how_to/filter_messages.ipynb +++ b/docs/core_docs/docs/how_to/filter_messages.ipynb @@ -386,7 +386,7 @@ "source": [ "## API reference\n", "\n", - "For a complete description of all arguments head to the [API reference](https://api.js.langchain.com/functions/langchain_core_messages.filterMessages.html)." + "For a complete description of all arguments head to the [API reference](https://api.js.langchain.com/functions/langchain_core.messages.filterMessages.html)." ] } ], diff --git a/docs/core_docs/docs/how_to/functions.ipynb b/docs/core_docs/docs/how_to/functions.ipynb index 7abfcdb03845..ec2a5b4f2e04 100644 --- a/docs/core_docs/docs/how_to/functions.ipynb +++ b/docs/core_docs/docs/how_to/functions.ipynb @@ -26,7 +26,7 @@ "\n", ":::\n", "\n", - "You can use arbitrary functions as [Runnables](https://v02.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html). This is useful for formatting or when you need functionality not provided by other LangChain components, and custom functions used as Runnables are called [`RunnableLambdas`](https://v02.api.js.langchain.com/classes/langchain_core_runnables.RunnableLambda.html).\n", + "You can use arbitrary functions as [Runnables](https://v02.api.js.langchain.com/classes/langchain_core.runnables.Runnable.html). This is useful for formatting or when you need functionality not provided by other LangChain components, and custom functions used as Runnables are called [`RunnableLambdas`](https://v02.api.js.langchain.com/classes/langchain_core.runnables.RunnableLambda.html).\n", "\n", "Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single dict input and unpacks it into multiple argument.\n", "\n", @@ -105,7 +105,7 @@ "source": [ "## Automatic coercion in chains\n", "\n", - "When using custom functions in chains with [`RunnableSequence.from`](https://v02.api.js.langchain.com/classes/langchain_core_runnables.RunnableSequence.html#from) static method, you can omit the explicit `RunnableLambda` creation and rely on coercion.\n", + "When using custom functions in chains with [`RunnableSequence.from`](https://v02.api.js.langchain.com/classes/langchain_core.runnables.RunnableSequence.html#from) static method, you can omit the explicit `RunnableLambda` creation and rely on coercion.\n", "\n", "Here's a simple example with a function that takes the output from the model and returns the first five letters of it:" ] @@ -152,7 +152,7 @@ "\n", "## Passing run metadata\n", "\n", - "Runnable lambdas can optionally accept a [RunnableConfig](https://v02.api.js.langchain.com/interfaces/langchain_core_runnables.RunnableConfig.html) parameter, which they can use to pass callbacks, tags, and other configuration information to nested runs." + "Runnable lambdas can optionally accept a [RunnableConfig](https://v02.api.js.langchain.com/interfaces/langchain_core.runnables.RunnableConfig.html) parameter, which they can use to pass callbacks, tags, and other configuration information to nested runs." ] }, { diff --git a/docs/core_docs/docs/how_to/generative_ui.mdx b/docs/core_docs/docs/how_to/generative_ui.mdx index d32d3185d38d..8a43e663cf75 100644 --- a/docs/core_docs/docs/how_to/generative_ui.mdx +++ b/docs/core_docs/docs/how_to/generative_ui.mdx @@ -52,7 +52,7 @@ export const agentExecutor = new AgentExecutor({ ``` :::tip -As of `langchain` version `0.2.8`, the `createToolCallingAgent` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core_language_models_base.ToolDefinition.html). +As of `langchain` version `0.2.8`, the `createToolCallingAgent` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core.language_models_base.ToolDefinition.html). ::: ```tsx agent.tsx diff --git a/docs/core_docs/docs/how_to/graph_prompting.ipynb b/docs/core_docs/docs/how_to/graph_prompting.ipynb index 6d3a442d279e..209990169bf9 100644 --- a/docs/core_docs/docs/how_to/graph_prompting.ipynb +++ b/docs/core_docs/docs/how_to/graph_prompting.ipynb @@ -281,7 +281,7 @@ "\n", "If we have enough examples, we may want to only include the most relevant ones in the prompt, either because they don't fit in the model's context window or because the long tail of examples distracts the model. And specifically, given any input we want to include the examples most relevant to that input.\n", "\n", - "We can do just this using an ExampleSelector. In this case we'll use a [SemanticSimilarityExampleSelector](https://v02.api.js.langchain.com/classes/langchain_core_example_selectors.SemanticSimilarityExampleSelector.html), which will store the examples in the vector database of our choosing. At runtime it will perform a similarity search between the input and our examples, and return the most semantically similar ones: " + "We can do just this using an ExampleSelector. In this case we'll use a [SemanticSimilarityExampleSelector](https://v02.api.js.langchain.com/classes/langchain_core.example_selectors.SemanticSimilarityExampleSelector.html), which will store the examples in the vector database of our choosing. At runtime it will perform a similarity search between the input and our examples, and return the most semantically similar ones: " ] }, { diff --git a/docs/core_docs/docs/how_to/index.mdx b/docs/core_docs/docs/how_to/index.mdx index 77587c33ce60..a0459f3a2e21 100644 --- a/docs/core_docs/docs/how_to/index.mdx +++ b/docs/core_docs/docs/how_to/index.mdx @@ -26,7 +26,7 @@ This highlights functionality that is core to using LangChain. ## LangChain Expression Language (LCEL) -LangChain Expression Language is a way to create arbitrary custom chains. It is built on the [`Runnable`](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html) protocol. +LangChain Expression Language is a way to create arbitrary custom chains. It is built on the [`Runnable`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html) protocol. [**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives. diff --git a/docs/core_docs/docs/how_to/lcel_cheatsheet.ipynb b/docs/core_docs/docs/how_to/lcel_cheatsheet.ipynb index ccf1473da901..331dfe28f0ea 100644 --- a/docs/core_docs/docs/how_to/lcel_cheatsheet.ipynb +++ b/docs/core_docs/docs/how_to/lcel_cheatsheet.ipynb @@ -7,10 +7,10 @@ "source": [ "# LangChain Expression Language Cheatsheet\n", "\n", - "This is a quick reference for all the most important LCEL primitives. For more advanced usage see the [LCEL how-to guides](/docs/how_to/#langchain-expression-language-lcel) and the [full API reference](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html).\n", + "This is a quick reference for all the most important LCEL primitives. For more advanced usage see the [LCEL how-to guides](/docs/how_to/#langchain-expression-language-lcel) and the [full API reference](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html).\n", "\n", "### Invoke a runnable\n", - "#### [runnable.invoke()](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#invoke)" + "#### [runnable.invoke()](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#invoke)" ] }, { @@ -44,7 +44,7 @@ "metadata": {}, "source": [ "### Batch a runnable\n", - "#### [runnable.batch()](hhttps://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#batch)" + "#### [runnable.batch()](hhttps://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#batch)" ] }, { @@ -78,7 +78,7 @@ "metadata": {}, "source": [ "### Stream a runnable\n", - "#### [runnable.stream()](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#stream)" + "#### [runnable.stream()](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#stream)" ] }, { @@ -129,7 +129,7 @@ "metadata": {}, "source": [ "### Compose runnables\n", - "#### [runnable.pipe()](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#pipe)" + "#### [runnable.pipe()](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#pipe)" ] }, { @@ -168,7 +168,7 @@ "id": "889be087", "metadata": {}, "source": [ - "#### [RunnableSequence.from()](https://api.js.langchain.com/classes/langchain_core_runnables.RunnableSequence.html#from)" + "#### [RunnableSequence.from()](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableSequence.html#from)" ] }, { @@ -211,7 +211,7 @@ "metadata": {}, "source": [ "### Invoke runnables in parallel\n", - "#### [RunnableParallel](https://api.js.langchain.com/classes/langchain_core_runnables.RunnableParallel.html)" + "#### [RunnableParallel](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableParallel.html)" ] }, { @@ -254,7 +254,7 @@ "metadata": {}, "source": [ "### Turn a function into a runnable\n", - "#### [RunnableLambda](https://api.js.langchain.com/classes/langchain_core_runnables.RunnableLambda.html)" + "#### [RunnableLambda](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableLambda.html)" ] }, { @@ -292,7 +292,7 @@ "metadata": {}, "source": [ "### Merge input and output dicts\n", - "#### [RunnablePassthrough.assign()](https://api.js.langchain.com/classes/langchain_core_runnables.RunnablePassthrough.html#assign)" + "#### [RunnablePassthrough.assign()](https://api.js.langchain.com/classes/langchain_core.runnables.RunnablePassthrough.html#assign)" ] }, { @@ -333,7 +333,7 @@ "source": [ "### Include input dict in output dict\n", "\n", - "#### [RunnablePassthrough](https://api.js.langchain.com/classes/langchain_core_runnables.RunnablePassthrough.html)" + "#### [RunnablePassthrough](https://api.js.langchain.com/classes/langchain_core.runnables.RunnablePassthrough.html)" ] }, { @@ -379,7 +379,7 @@ "source": [ "### Add default invocation args\n", "\n", - "#### [runnable.bind()](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#bind)" + "#### [runnable.bind()](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#bind)" ] }, { @@ -422,7 +422,7 @@ "source": [ "### Add fallbacks\n", "\n", - "#### [runnable.withFallbacks()](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#withFallbacks)" + "#### [runnable.withFallbacks()](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#withFallbacks)" ] }, { @@ -462,7 +462,7 @@ "metadata": {}, "source": [ "### Add retries\n", - "#### [runnable.withRetry()](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#withRetry)" + "#### [runnable.withRetry()](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#withRetry)" ] }, { @@ -529,7 +529,7 @@ "source": [ "### Configure runnable execution\n", "\n", - "#### [RunnableConfig](https://api.js.langchain.com/interfaces/langchain_core_runnables.RunnableConfig.html)" + "#### [RunnableConfig](https://api.js.langchain.com/interfaces/langchain_core.runnables.RunnableConfig.html)" ] }, { @@ -568,7 +568,7 @@ "source": [ "### Add default config to runnable\n", "\n", - "#### [runnable.withConfig()](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#withConfig)" + "#### [runnable.withConfig()](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#withConfig)" ] }, { @@ -673,7 +673,7 @@ "metadata": {}, "source": [ "### Generate a stream of internal events\n", - "#### [runnable.streamEvents()](https://v02.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#streamEvents)" + "#### [runnable.streamEvents()](https://v02.api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#streamEvents)" ] }, { @@ -735,7 +735,7 @@ "source": [ "### Return a subset of keys from output object\n", "\n", - "#### [runnable.pick()](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#pick)" + "#### [runnable.pick()](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#pick)" ] }, { @@ -776,7 +776,7 @@ "source": [ "### Declaratively make a batched version of a runnable\n", "\n", - "#### [`runnable.map()`](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#map)" + "#### [`runnable.map()`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#map)" ] }, { @@ -814,7 +814,7 @@ "source": [ "### Get a graph representation of a runnable\n", "\n", - "#### [runnable.getGraph()](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#getGraph)" + "#### [runnable.getGraph()](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#getGraph)" ] }, { diff --git a/docs/core_docs/docs/how_to/merge_message_runs.ipynb b/docs/core_docs/docs/how_to/merge_message_runs.ipynb index b4ccdcb1486f..04640d936fc8 100644 --- a/docs/core_docs/docs/how_to/merge_message_runs.ipynb +++ b/docs/core_docs/docs/how_to/merge_message_runs.ipynb @@ -252,7 +252,7 @@ "source": [ "## API reference\n", "\n", - "For a complete description of all arguments head to the [API reference](https://api.js.langchain.com/functions/langchain_core_messages.mergeMessageRuns.html)." + "For a complete description of all arguments head to the [API reference](https://api.js.langchain.com/functions/langchain_core.messages.mergeMessageRuns.html)." ] } ], diff --git a/docs/core_docs/docs/how_to/migrate_agent.ipynb b/docs/core_docs/docs/how_to/migrate_agent.ipynb index 26f5e0d5136c..32468784478a 100644 --- a/docs/core_docs/docs/how_to/migrate_agent.ipynb +++ b/docs/core_docs/docs/how_to/migrate_agent.ipynb @@ -32,7 +32,7 @@ "\n", "Here we focus on how to move from legacy LangChain agents to more flexible [LangGraph](https://langchain-ai.github.io/langgraphjs/) agents.\n", "LangChain agents (the\n", - "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain_agents.AgentExecutor.html)\n", + "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain.agents.AgentExecutor.html)\n", "in particular) have multiple configuration parameters. In this notebook we will\n", "show how those parameters map to the LangGraph\n", "react agent executor using the [create_react_agent](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html) prebuilt helper method.\n", @@ -80,7 +80,7 @@ ":::note\n", "The `tool` function is available in `@langchain/core` version 0.2.7 and above.\n", "\n", - "If you are on an older version of core, you should use instantiate and use [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core_tools.DynamicStructuredTool.html) instead.\n", + "If you are on an older version of core, you should use instantiate and use [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) instead.\n", ":::" ] }, @@ -924,7 +924,7 @@ "With LangChain's\n", "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain_agents.AgentExecutor.html),\n", "you could iterate over the steps using the\n", - "[`stream`](https://api.js.langchain.com/classes/langchain_core_runnables.Runnable.html#stream) method:\n" + "[`stream`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#stream) method:\n" ] }, { diff --git a/docs/core_docs/docs/how_to/multi_vector.mdx b/docs/core_docs/docs/how_to/multi_vector.mdx index e81a96d9802f..a0feb0b7b782 100644 --- a/docs/core_docs/docs/how_to/multi_vector.mdx +++ b/docs/core_docs/docs/how_to/multi_vector.mdx @@ -11,7 +11,7 @@ This guide assumes familiarity with the following concepts: ::: Embedding different representations of an original document, then returning the original document when any of the representations result in a search hit, can allow you to -tune and improve your retrieval performance. LangChain has a base [`MultiVectorRetriever`](https://v02.api.js.langchain.com/classes/langchain_retrievers_multi_vector.MultiVectorRetriever.html) designed to do just this! +tune and improve your retrieval performance. LangChain has a base [`MultiVectorRetriever`](https://v02.api.js.langchain.com/classes/langchain.retrievers_multi_vector.MultiVectorRetriever.html) designed to do just this! A lot of the complexity lies in how to create the multiple vectors per document. This guide covers some of the common ways to create those vectors and use the `MultiVectorRetriever`. diff --git a/docs/core_docs/docs/how_to/multiple_queries.ipynb b/docs/core_docs/docs/how_to/multiple_queries.ipynb index e20287378501..a69c64a70fe4 100644 --- a/docs/core_docs/docs/how_to/multiple_queries.ipynb +++ b/docs/core_docs/docs/how_to/multiple_queries.ipynb @@ -20,7 +20,7 @@ "But retrieval may produce different results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well.\n", "Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n", "\n", - "The [`MultiQueryRetriever`](https://v02.api.js.langchain.com/classes/langchain_retrievers_multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query.\n", + "The [`MultiQueryRetriever`](https://v02.api.js.langchain.com/classes/langchain.retrievers_multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query.\n", "For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents.\n", "By generating multiple perspectives on the same question, the `MultiQueryRetriever` can help overcome some of the limitations of the distance-based retrieval and get a richer set of results.\n", "\n", diff --git a/docs/core_docs/docs/how_to/output_parser_fixing.ipynb b/docs/core_docs/docs/how_to/output_parser_fixing.ipynb index 41ed030102bf..248796be393c 100644 --- a/docs/core_docs/docs/how_to/output_parser_fixing.ipynb +++ b/docs/core_docs/docs/how_to/output_parser_fixing.ipynb @@ -17,11 +17,11 @@ "\n", ":::\n", "\n", - "LLMs aren't perfect, and sometimes fail to produce output that perfectly matches a the desired format. To help handle errors, we can use the [`OutputFixingParser`](https://api.js.langchain.com/classes/langchain_output_parsers.OutputFixingParser.html) This output parser wraps another output parser, and in the event that the first one fails, it calls out to another LLM in an attempt to fix any errors.\n", + "LLMs aren't perfect, and sometimes fail to produce output that perfectly matches a the desired format. To help handle errors, we can use the [`OutputFixingParser`](https://api.js.langchain.com/classes/langchain.output_parsers.OutputFixingParser.html) This output parser wraps another output parser, and in the event that the first one fails, it calls out to another LLM in an attempt to fix any errors.\n", "\n", "Specifically, we can pass the misformatted output, along with the formatted instructions, to the model and ask it to fix it.\n", "\n", - "For this example, we'll use the [`StructuredOutputParser`](https://api.js.langchain.com/classes/langchain_core_output_parsers.StructuredOutputParser.html), which can validate output according to a Zod schema. Here's what happens if we pass it a result that does not comply with the schema:" + "For this example, we'll use the [`StructuredOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.StructuredOutputParser.html), which can validate output according to a Zod schema. Here's what happens if we pass it a result that does not comply with the schema:" ] }, { @@ -114,7 +114,7 @@ "id": "84498e02", "metadata": {}, "source": [ - "For more about different parameters and options, check out our [API reference docs](https://api.js.langchain.com/classes/langchain_output_parsers.OutputFixingParser.html)." + "For more about different parameters and options, check out our [API reference docs](https://api.js.langchain.com/classes/langchain.output_parsers.OutputFixingParser.html)." ] } ], diff --git a/docs/core_docs/docs/how_to/output_parser_json.ipynb b/docs/core_docs/docs/how_to/output_parser_json.ipynb index a08425097356..2784dc0e8279 100644 --- a/docs/core_docs/docs/how_to/output_parser_json.ipynb +++ b/docs/core_docs/docs/how_to/output_parser_json.ipynb @@ -31,7 +31,7 @@ "id": "ae909b7a", "metadata": {}, "source": [ - "The [`JsonOutputParser`](https://v02.api.js.langchain.com/classes/langchain_core_output_parsers.JsonOutputParser.html) is one built-in option for prompting for and then parsing JSON output." + "The [`JsonOutputParser`](https://v02.api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) is one built-in option for prompting for and then parsing JSON output." ] }, { diff --git a/docs/core_docs/docs/how_to/output_parser_structured.ipynb b/docs/core_docs/docs/how_to/output_parser_structured.ipynb index 88f3b6cd539e..c7860f637fa9 100644 --- a/docs/core_docs/docs/how_to/output_parser_structured.ipynb +++ b/docs/core_docs/docs/how_to/output_parser_structured.ipynb @@ -32,7 +32,7 @@ "\n", "## Get started\n", "\n", - "The primary type of output parser for working with structured data in model responses is the [`StructuredOutputParser`](https://api.js.langchain.com/classes/langchain_core_output_parsers.StructuredOutputParser.html). In the below example, we define a schema for the type of output we expect from the model using [`zod`](https://zod.dev).\n", + "The primary type of output parser for working with structured data in model responses is the [`StructuredOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.StructuredOutputParser.html). In the below example, we define a schema for the type of output we expect from the model using [`zod`](https://zod.dev).\n", "\n", "First, let's see the default formatting instructions we'll plug into the prompt:" ] @@ -271,7 +271,7 @@ "id": "a3a40f19", "metadata": {}, "source": [ - "The simpler [`JsonOutputParser`](https://api.js.langchain.com/classes/langchain_core_output_parsers.JsonOutputParser.html), however, supports streaming through partial outputs:" + "The simpler [`JsonOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html), however, supports streaming through partial outputs:" ] }, { diff --git a/docs/core_docs/docs/how_to/output_parser_xml.ipynb b/docs/core_docs/docs/how_to/output_parser_xml.ipynb index 9e1aae1f11e4..665bc12d2d1a 100644 --- a/docs/core_docs/docs/how_to/output_parser_xml.ipynb +++ b/docs/core_docs/docs/how_to/output_parser_xml.ipynb @@ -20,7 +20,7 @@ "\n", "LLMs from different providers often have different strengths depending on the specific data they are trianed on. This also means that some may be \"better\" and more reliable at generating output in formats other than JSON.\n", "\n", - "This guide shows you how to use the [`XMLOutputParser`](https://api.js.langchain.com/classes/langchain_core_output_parsers.XMLOutputParser.html) to prompt models for XML output, then and parse that output into a usable format.\n", + "This guide shows you how to use the [`XMLOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.XMLOutputParser.html) to prompt models for XML output, then and parse that output into a usable format.\n", "\n", ":::{.callout-note}\n", "Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed XML.\n", diff --git a/docs/core_docs/docs/how_to/parallel.mdx b/docs/core_docs/docs/how_to/parallel.mdx index bf0b21a9420f..6a06f71b14a4 100644 --- a/docs/core_docs/docs/how_to/parallel.mdx +++ b/docs/core_docs/docs/how_to/parallel.mdx @@ -9,7 +9,7 @@ This guide assumes familiarity with the following concepts: ::: -The [`RunnableParallel`](https://v02.api.js.langchain.com/classes/langchain_core_runnables.RunnableParallel.html) (also known as a `RunnableMap`) primitive is an object whose values are runnables (or things that can be coerced to runnables, like functions). +The [`RunnableParallel`](https://v02.api.js.langchain.com/classes/langchain_core.runnables.RunnableParallel.html) (also known as a `RunnableMap`) primitive is an object whose values are runnables (or things that can be coerced to runnables, like functions). It runs all of its values in parallel, and each value is called with the initial input to the `RunnableParallel`. The final return value is an object with the results of each value under its appropriate key. ## Formatting with `RunnableParallels` diff --git a/docs/core_docs/docs/how_to/parent_document_retriever.mdx b/docs/core_docs/docs/how_to/parent_document_retriever.mdx index dd024b715d66..235c1d6d09d6 100644 --- a/docs/core_docs/docs/how_to/parent_document_retriever.mdx +++ b/docs/core_docs/docs/how_to/parent_document_retriever.mdx @@ -21,7 +21,7 @@ When splitting documents for retrieval, there are often conflicting desires: 1. You may want to have small documents, so that their embeddings can most accurately reflect their meaning. If documents are too long, then the embeddings can lose meaning. 2. You want to have long enough documents that the context of each chunk is retained. -The [`ParentDocumentRetriever`](https://v02.api.js.langchain.com/classes/langchain_retrievers_parent_document.ParentDocumentRetriever.html) strikes that balance by splitting and storing small chunks of data. During retrieval, it first fetches the small chunks but then looks up the parent ids for those chunks and returns those larger documents. +The [`ParentDocumentRetriever`](https://v02.api.js.langchain.com/classes/langchain.retrievers_parent_document.ParentDocumentRetriever.html) strikes that balance by splitting and storing small chunks of data. During retrieval, it first fetches the small chunks but then looks up the parent ids for those chunks and returns those larger documents. Note that "parent document" refers to the document that a small chunk originated from. This can either be the whole raw document OR a larger chunk. diff --git a/docs/core_docs/docs/how_to/passthrough.ipynb b/docs/core_docs/docs/how_to/passthrough.ipynb index 830c1c2c69df..878fd5235c73 100644 --- a/docs/core_docs/docs/how_to/passthrough.ipynb +++ b/docs/core_docs/docs/how_to/passthrough.ipynb @@ -30,7 +30,7 @@ ":::\n", "\n", "\n", - "When composing chains with several steps, sometimes you will want to pass data from previous steps unchanged for use as input to a later step. The [`RunnablePassthrough`](https://v02.api.js.langchain.com/classes/langchain_core_runnables.RunnablePassthrough.html) class allows you to do just this, and is typically is used in conjuction with a [RunnableParallel](/docs/how_to/parallel/) to pass data through to a later step in your constructed chains.\n", + "When composing chains with several steps, sometimes you will want to pass data from previous steps unchanged for use as input to a later step. The [`RunnablePassthrough`](https://v02.api.js.langchain.com/classes/langchain_core.runnables.RunnablePassthrough.html) class allows you to do just this, and is typically is used in conjuction with a [RunnableParallel](/docs/how_to/parallel/) to pass data through to a later step in your constructed chains.\n", "\n", "Let's look at an example:" ] diff --git a/docs/core_docs/docs/how_to/prompts_composition.ipynb b/docs/core_docs/docs/how_to/prompts_composition.ipynb index b28411ea7083..623bc1f648ea 100644 --- a/docs/core_docs/docs/how_to/prompts_composition.ipynb +++ b/docs/core_docs/docs/how_to/prompts_composition.ipynb @@ -136,7 +136,7 @@ "metadata": {}, "source": [ "You can then easily create a pipeline combining it with other messages *or* message templates.\n", - "Use a `BaseMessage` when there are no variables to be formatted, use a `MessageTemplate` when there are variables to be formatted. You can also use just a string (note: this will automatically get inferred as a [`HumanMessagePromptTemplate`](https://v02.api.js.langchain.com/classes/langchain_core_prompts.HumanMessagePromptTemplate.html).)" + "Use a `BaseMessage` when there are no variables to be formatted, use a `MessageTemplate` when there are variables to be formatted. You can also use just a string (note: this will automatically get inferred as a [`HumanMessagePromptTemplate`](https://v02.api.js.langchain.com/classes/langchain_core.prompts.HumanMessagePromptTemplate.html).)" ] }, { diff --git a/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb b/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb index db2f63d19198..e0148efb0f45 100644 --- a/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb +++ b/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb @@ -369,7 +369,7 @@ "\n", "For this we can use:\n", "\n", - "- [BaseChatMessageHistory](https://v02.api.js.langchain.com/classes/langchain_core_chat_history.BaseChatMessageHistory.html): Store chat history.\n", + "- [BaseChatMessageHistory](https://v02.api.js.langchain.com/classes/langchain_core.chat_history.BaseChatMessageHistory.html): Store chat history.\n", "- [RunnableWithMessageHistory](/docs/how_to/message_history/): Wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n", "\n", "For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history/) LCEL page." diff --git a/docs/core_docs/docs/how_to/recursive_text_splitter.ipynb b/docs/core_docs/docs/how_to/recursive_text_splitter.ipynb index 63741f503405..86675c90ca4b 100644 --- a/docs/core_docs/docs/how_to/recursive_text_splitter.ipynb +++ b/docs/core_docs/docs/how_to/recursive_text_splitter.ipynb @@ -36,7 +36,7 @@ "\n", "To obtain the string content directly, use `.splitText`.\n", "\n", - "To create LangChain [Document](https://v02.api.js.langchain.com/classes/langchain_core_documents.Document.html) objects (e.g., for use in downstream tasks), use `.createDocuments`." + "To create LangChain [Document](https://v02.api.js.langchain.com/classes/langchain_core.documents.Document.html) objects (e.g., for use in downstream tasks), use `.createDocuments`." ] }, { diff --git a/docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx b/docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx index 218c1b868790..85e89dae1774 100644 --- a/docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx +++ b/docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx @@ -12,7 +12,7 @@ This guide assumes familiarity with the following concepts: ::: One way to reduce retrieval latency is through a technique called "Adaptive Retrieval". -The [`MatryoshkaRetriever`](https://v02.api.js.langchain.com/classes/langchain_retrievers_matryoshka_retriever.MatryoshkaRetriever.html) uses the +The [`MatryoshkaRetriever`](https://v02.api.js.langchain.com/classes/langchain.retrievers_matryoshka_retriever.MatryoshkaRetriever.html) uses the Matryoshka Representation Learning (MRL) technique to retrieve documents for a given query in two steps: - **First-pass**: Uses a lower dimensional sub-vector from the MRL embedding for an initial, fast, diff --git a/docs/core_docs/docs/how_to/sequence.ipynb b/docs/core_docs/docs/how_to/sequence.ipynb index d5840412346f..869a7b983c22 100644 --- a/docs/core_docs/docs/how_to/sequence.ipynb +++ b/docs/core_docs/docs/how_to/sequence.ipynb @@ -21,7 +21,7 @@ "\n", "One point about [LangChain Expression Language](/docs/concepts/#langchain-expression-language) is that any two runnables can be \"chained\" together into sequences. The output of the previous runnable's `.invoke()` call is passed as input to the next runnable. This can be done using the `.pipe()` method.\n", "\n", - "The resulting [`RunnableSequence`](https://v02.api.js.langchain.com/classes/langchain_core_runnables.RunnableSequence.html) is itself a runnable, which means it can be invoked, streamed, or further chained just like any other runnable. Advantages of chaining runnables in this way are efficient streaming (the sequence will stream output as soon as it is available), and debugging and tracing with tools like [LangSmith](/docs/how_to/debugging).\n", + "The resulting [`RunnableSequence`](https://v02.api.js.langchain.com/classes/langchain_core.runnables.RunnableSequence.html) is itself a runnable, which means it can be invoked, streamed, or further chained just like any other runnable. Advantages of chaining runnables in this way are efficient streaming (the sequence will stream output as soon as it is available), and debugging and tracing with tools like [LangSmith](/docs/how_to/debugging).\n", "\n", ":::info Prerequisites\n", "\n", diff --git a/docs/core_docs/docs/how_to/sql_prompting.mdx b/docs/core_docs/docs/how_to/sql_prompting.mdx index 5cfbc2879cd2..25d2ac9b4ed1 100644 --- a/docs/core_docs/docs/how_to/sql_prompting.mdx +++ b/docs/core_docs/docs/how_to/sql_prompting.mdx @@ -46,7 +46,7 @@ import DbCheck from "@examples/use_cases/sql/db_check.ts"; ## Dialect-specific prompting One of the simplest things we can do is make our prompt specific to the SQL dialect we're using. -When using the built-in [`createSqlQueryChain`](https://v02.api.js.langchain.com/functions/langchain_chains_sql_db.createSqlQueryChain.html) and [`SqlDatabase`](https://v02.api.js.langchain.com/classes/langchain_sql_db.SqlDatabase.html), this is handled for you for any of the following dialects: +When using the built-in [`createSqlQueryChain`](https://v02.api.js.langchain.com/functions/langchain.chains_sql_db.createSqlQueryChain.html) and [`SqlDatabase`](https://v02.api.js.langchain.com/classes/langchain.sql_db.SqlDatabase.html), this is handled for you for any of the following dialects: import DialectExample from "@examples/use_cases/sql/prompting/list_dialects.ts"; @@ -83,7 +83,7 @@ import FewShotExample from "@examples/use_cases/sql/prompting/few_shot.ts"; If we have enough examples, we may want to only include the most relevant ones in the prompt, either because they don't fit in the model's context window or because the long tail of examples distracts the model. And specifically, given any input we want to include the examples most relevant to that input. -We can do just this using an ExampleSelector. In this case we'll use a [`SemanticSimilarityExampleSelector`](https://v02.api.js.langchain.com/classes/langchain_core_example_selectors.SemanticSimilarityExampleSelector.html), +We can do just this using an ExampleSelector. In this case we'll use a [`SemanticSimilarityExampleSelector`](https://v02.api.js.langchain.com/classes/langchain_core.example_selectors.SemanticSimilarityExampleSelector.html), which will store the examples in the vector database of our choosing. At runtime it will perform a similarity search between the input and our examples, and return the most semantically similar ones: diff --git a/docs/core_docs/docs/how_to/stream_agent_client.mdx b/docs/core_docs/docs/how_to/stream_agent_client.mdx index 2a06a0e41eea..19d9875adbd6 100644 --- a/docs/core_docs/docs/how_to/stream_agent_client.mdx +++ b/docs/core_docs/docs/how_to/stream_agent_client.mdx @@ -95,7 +95,7 @@ Next, lets define our async function inside which contains the agent logic: ``` :::tip -As of `langchain` version `0.2.8`, the `createToolCallingAgent` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core_language_models_base.ToolDefinition.html). +As of `langchain` version `0.2.8`, the `createToolCallingAgent` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core.language_models_base.ToolDefinition.html). ::: Here you can see we're doing a few things: diff --git a/docs/core_docs/docs/how_to/streaming_llm.mdx b/docs/core_docs/docs/how_to/streaming_llm.mdx index 7fe5f75d8d91..2cd448777668 100644 --- a/docs/core_docs/docs/how_to/streaming_llm.mdx +++ b/docs/core_docs/docs/how_to/streaming_llm.mdx @@ -4,7 +4,7 @@ sidebar_position: 1 # How to stream responses from an LLM -All [`LLM`s](https://v02.api.js.langchain.com/classes/langchain_core_language_models_llms.BaseLLM.html) implement the [Runnable interface](https://v02.api.js.langchain.com/classes/langchain_core_runnables.Runnable.html), which comes with **default** implementations of standard runnable methods (i.e. `ainvoke`, `batch`, `abatch`, `stream`, `astream`, `astream_events`). +All [`LLM`s](https://v02.api.js.langchain.com/classes/langchain_core.language_models_llms.BaseLLM.html) implement the [Runnable interface](https://v02.api.js.langchain.com/classes/langchain_core.runnables.Runnable.html), which comes with **default** implementations of standard runnable methods (i.e. `ainvoke`, `batch`, `abatch`, `stream`, `astream`, `astream_events`). The **default** streaming implementations provide an `AsyncGenerator` that yields a single value: the final output from the underlying chat model provider. @@ -40,7 +40,7 @@ For models that do not support streaming, the entire response will be returned a ## Using a callback handler -You can also use a [`CallbackHandler`](https://v02.api.js.langchain.com/classes/langchain_core_callbacks_base.BaseCallbackHandler.html) like so: +You can also use a [`CallbackHandler`](https://v02.api.js.langchain.com/classes/langchain_core.callbacks_base.BaseCallbackHandler.html) like so: import StreamingExample from "@examples/models/llm/llm_streaming.ts"; diff --git a/docs/core_docs/docs/how_to/structured_output.ipynb b/docs/core_docs/docs/how_to/structured_output.ipynb index 5278d83c20ff..b114d5a21237 100644 --- a/docs/core_docs/docs/how_to/structured_output.ipynb +++ b/docs/core_docs/docs/how_to/structured_output.ipynb @@ -328,7 +328,7 @@ "\n", "### Using `JsonOutputParser`\n", "\n", - "The following example uses the built-in [`JsonOutputParser`](https://v02.api.js.langchain.com/classes/langchain_core_output_parsers.JsonOutputParser.html) to parse the output of a chat model prompted to match a the given JSON schema. Note that we are adding `format_instructions` directly to the prompt from a method on the parser:" + "The following example uses the built-in [`JsonOutputParser`](https://v02.api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) to parse the output of a chat model prompted to match a the given JSON schema. Note that we are adding `format_instructions` directly to the prompt from a method on the parser:" ] }, { diff --git a/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx b/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx index bec44045f35d..b218eb68bf12 100644 --- a/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx +++ b/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx @@ -10,7 +10,7 @@ This guide assumes familiarity with the following concepts: ::: -This guide covers the [`TimeWeightedVectorStoreRetriever`](https://v02.api.js.langchain.com/classes/langchain_retrievers_time_weighted.TimeWeightedVectorStoreRetriever.html), +This guide covers the [`TimeWeightedVectorStoreRetriever`](https://v02.api.js.langchain.com/classes/langchain.retrievers_time_weighted.TimeWeightedVectorStoreRetriever.html), which uses a combination of semantic similarity and a time decay. The algorithm for scoring them is: diff --git a/docs/core_docs/docs/how_to/tool_artifacts.ipynb b/docs/core_docs/docs/how_to/tool_artifacts.ipynb index 426d904987e7..44cf610980de 100644 --- a/docs/core_docs/docs/how_to/tool_artifacts.ipynb +++ b/docs/core_docs/docs/how_to/tool_artifacts.ipynb @@ -22,7 +22,7 @@ "\n", "For example if a tool returns something like a custom object or an image, we may want to pass some metadata about this output to the model without passing the actual output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n", "\n", - "The Tool and [ToolMessage](https://api.js.langchain.com/classes/langchain_core_messages_tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the `ToolMessage.content`) and those parts which are meant for use outside the model (`ToolMessage.artifact`).\n", + "The Tool and [ToolMessage](https://api.js.langchain.com/classes/langchain_core.messages_tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the `ToolMessage.content`) and those parts which are meant for use outside the model (`ToolMessage.artifact`).\n", "\n", "```{=mdx}\n", ":::caution Compatibility\n", diff --git a/docs/core_docs/docs/how_to/tool_calling.ipynb b/docs/core_docs/docs/how_to/tool_calling.ipynb index e4ac412604f0..4560e2d7ddbb 100644 --- a/docs/core_docs/docs/how_to/tool_calling.ipynb +++ b/docs/core_docs/docs/how_to/tool_calling.ipynb @@ -66,14 +66,14 @@ "source": [ "## Passing tools to chat models\n", "\n", - "Chat models that support tool calling features implement a [`.bindTools()`](https://api.js.langchain.com/classes/langchain_core_language_models_chat_models.BaseChatModel.html#bindTools) method, which \n", - "receives a list of LangChain [tool objects](https://api.js.langchain.com/classes/langchain_core_tools.StructuredTool.html)\n", + "Chat models that support tool calling features implement a [`.bindTools()`](https://api.js.langchain.com/classes/langchain_core.language_models_chat_models.BaseChatModel.html#bindTools) method, which \n", + "receives a list of LangChain [tool objects](https://api.js.langchain.com/classes/langchain_core.tools.StructuredTool.html)\n", "and binds them to the chat model in its expected format. Subsequent invocations of the \n", "chat model will include tool schemas in its calls to the LLM.\n", "\n", "```{=mdx}\n", ":::note\n", - "As of `@langchain/core` version `0.2.9`, all chat models with tool calling capabilities now support [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core_language_models_base.ToolDefinition.html).\n", + "As of `@langchain/core` version `0.2.9`, all chat models with tool calling capabilities now support [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core.language_models_base.ToolDefinition.html).\n", ":::\n", "```\n", "\n", @@ -102,7 +102,7 @@ ":::note\n", "The `tool` function is available in `@langchain/core` version 0.2.7 and above.\n", "\n", - "If you are on an older version of core, you should use instantiate and use [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core_tools.DynamicStructuredTool.html) instead.\n", + "If you are on an older version of core, you should use instantiate and use [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) instead.\n", ":::\n", "```" ] @@ -237,9 +237,9 @@ "## Tool calls\n", "\n", "If tool calls are included in a LLM response, they are attached to the corresponding \n", - "[message](https://api.js.langchain.com/classes/langchain_core_messages.AIMessage.html) \n", - "or [message chunk](https://api.js.langchain.com/classes/langchain_core_messages.AIMessageChunk.html) \n", - "as a list of [tool call](https://api.js.langchain.com/types/langchain_core_messages_tool.ToolCall.html) \n", + "[message](https://api.js.langchain.com/classes/langchain_core.messages.AIMessage.html) \n", + "or [message chunk](https://api.js.langchain.com/classes/langchain_core.messages.AIMessageChunk.html) \n", + "as a list of [tool call](https://api.js.langchain.com/types/langchain_core.messages_tool.ToolCall.html) \n", "objects in the `.tool_calls` attribute.\n", "\n", "A `ToolCall` is a typed dict that includes a \n", @@ -288,7 +288,7 @@ "The `.tool_calls` attribute should contain valid tool calls. Note that on occasion, \n", "model providers may output malformed tool calls (e.g., arguments that are not \n", "valid JSON). When parsing fails in these cases, instances \n", - "of [`InvalidToolCall`](https://api.js.langchain.com/types/langchain_core_messages_tool.InvalidToolCall.html) \n", + "of [`InvalidToolCall`](https://api.js.langchain.com/types/langchain_core.messages_tool.InvalidToolCall.html) \n", "are populated in the `.invalid_tool_calls` attribute. An `InvalidToolCall` can have \n", "a name, string arguments, identifier, and error message." ] diff --git a/docs/core_docs/docs/how_to/tool_calls_multimodal.ipynb b/docs/core_docs/docs/how_to/tool_calls_multimodal.ipynb index 1f8119ecd13d..6569208d97ec 100644 --- a/docs/core_docs/docs/how_to/tool_calls_multimodal.ipynb +++ b/docs/core_docs/docs/how_to/tool_calls_multimodal.ipynb @@ -27,7 +27,7 @@ ":::note\n", "The `tool` function is available in `@langchain/core` version 0.2.7 and above.\n", "\n", - "If you are on an older version of core, you should use instantiate and use [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core_tools.DynamicStructuredTool.html) instead.\n", + "If you are on an older version of core, you should use instantiate and use [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) instead.\n", ":::" ] }, diff --git a/docs/core_docs/docs/how_to/tool_configure.ipynb b/docs/core_docs/docs/how_to/tool_configure.ipynb index ae8443deb09a..83e7c2944252 100644 --- a/docs/core_docs/docs/how_to/tool_configure.ipynb +++ b/docs/core_docs/docs/how_to/tool_configure.ipynb @@ -18,13 +18,13 @@ ":::\n", "```\n", "\n", - "Tools are runnables, and you can treat them the same way as any other runnable at the interface level - you can call `invoke()`, `batch()`, and `stream()` on them as normal. However, when writing custom tools, you may want to invoke other runnables like chat models or retrievers. In order to properly trace and configure those sub-invocations, you'll need to manually access and pass in the tool's current [`RunnableConfig`](https://api.js.langchain.com/interfaces/langchain_core_runnables.RunnableConfig.html) object.\n", + "Tools are runnables, and you can treat them the same way as any other runnable at the interface level - you can call `invoke()`, `batch()`, and `stream()` on them as normal. However, when writing custom tools, you may want to invoke other runnables like chat models or retrievers. In order to properly trace and configure those sub-invocations, you'll need to manually access and pass in the tool's current [`RunnableConfig`](https://api.js.langchain.com/interfaces/langchain_core.runnables.RunnableConfig.html) object.\n", "\n", "This guide covers how to do this for custom tools created in different ways.\n", "\n", "## From the `tool` method\n", "\n", - "Accessing the `RunnableConfig` object for a custom tool created with the [`tool`](https://api.js.langchain.com/functions/langchain_core_tools.tool-1.html) helper method is simple - it's always the second parameter passed into your custom function. Here's an example:" + "Accessing the `RunnableConfig` object for a custom tool created with the [`tool`](https://api.js.langchain.com/functions/langchain_core.tools.tool-1.html) helper method is simple - it's always the second parameter passed into your custom function. Here's an example:" ] }, { diff --git a/docs/core_docs/docs/how_to/tools_prompting.ipynb b/docs/core_docs/docs/how_to/tools_prompting.ipynb index 0d4c92defd78..640511df27d7 100644 --- a/docs/core_docs/docs/how_to/tools_prompting.ipynb +++ b/docs/core_docs/docs/how_to/tools_prompting.ipynb @@ -140,7 +140,7 @@ "\n", "```{=mdx}\n", ":::tip\n", - "As of `langchain` version `0.2.8`, the `renderTextDescription` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core_language_models_base.ToolDefinition.html).\n", + "As of `langchain` version `0.2.8`, the `renderTextDescription` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core.language_models_base.ToolDefinition.html).\n", ":::\n", "```" ] diff --git a/docs/core_docs/docs/how_to/trim_messages.ipynb b/docs/core_docs/docs/how_to/trim_messages.ipynb index 7ec290346693..b52b7223a5a5 100644 --- a/docs/core_docs/docs/how_to/trim_messages.ipynb +++ b/docs/core_docs/docs/how_to/trim_messages.ipynb @@ -783,7 +783,7 @@ "source": [ "## API reference\n", "\n", - "For a complete description of all arguments head to the [API reference](https://api.js.langchain.com/functions/langchain_core_messages.trimMessages.html)." + "For a complete description of all arguments head to the [API reference](https://api.js.langchain.com/functions/langchain_core.messages.trimMessages.html)." ] } ], diff --git a/docs/core_docs/docs/how_to/vectorstore_retriever.mdx b/docs/core_docs/docs/how_to/vectorstore_retriever.mdx index b9f44126b799..09394ed51ffa 100644 --- a/docs/core_docs/docs/how_to/vectorstore_retriever.mdx +++ b/docs/core_docs/docs/how_to/vectorstore_retriever.mdx @@ -11,7 +11,7 @@ This guide assumes familiarity with the following concepts: ::: -Vector stores can be converted into retrievers using the [`.asRetriever()`](https://v02.api.js.langchain.com/classes/langchain_core_vectorstores.VectorStore.html#asRetriever) method, which allows you to more easily compose them in chains. +Vector stores can be converted into retrievers using the [`.asRetriever()`](https://v02.api.js.langchain.com/classes/langchain_core.vectorstores.VectorStore.html#asRetriever) method, which allows you to more easily compose them in chains. Below, we show a retrieval-augmented generation (RAG) chain that performs question answering over documents using the following steps: diff --git a/docs/core_docs/docs/how_to/vectorstores.mdx b/docs/core_docs/docs/how_to/vectorstores.mdx index 95ac7f44eeea..7a1582c9e217 100644 --- a/docs/core_docs/docs/how_to/vectorstores.mdx +++ b/docs/core_docs/docs/how_to/vectorstores.mdx @@ -23,7 +23,7 @@ vectors, and then at query time to embed the unstructured query and retrieve the 'most similar' to the embedded query. A vector store takes care of storing embedded data and performing vector search for you. -This walkthrough uses a basic, unoptimized implementation called [`MemoryVectorStore`](https://v02.api.js.langchain.com/classes/langchain_vectorstores_memory.MemoryVectorStore.html) that stores embeddings in-memory and does an exact, linear search for the most similar embeddings. +This walkthrough uses a basic, unoptimized implementation called [`MemoryVectorStore`](https://v02.api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html) that stores embeddings in-memory and does an exact, linear search for the most similar embeddings. LangChain contains many built-in integrations - see [this section](/docs/how_to/vectorstores/#which-one-to-pick) for more, or the [full list of integrations](/docs/integrations/vectorstores/). ## Creating a new index diff --git a/docs/core_docs/docs/integrations/document_loaders/file_loaders/directory.ipynb b/docs/core_docs/docs/integrations/document_loaders/file_loaders/directory.ipynb index 3d19d94677d2..f28d9aefe1f2 100644 --- a/docs/core_docs/docs/integrations/document_loaders/file_loaders/directory.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/file_loaders/directory.ipynb @@ -26,7 +26,7 @@ "\n", "```\n", "\n", - "This notebook provides a quick overview for getting started with `DirectoryLoader` [document loaders](/docs/concepts/#document-loaders). For detailed documentation of all `DirectoryLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_document_loaders_fs_directory.DirectoryLoader.html).\n", + "This notebook provides a quick overview for getting started with `DirectoryLoader` [document loaders](/docs/concepts/#document-loaders). For detailed documentation of all `DirectoryLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.document_loaders_fs_directory.DirectoryLoader.html).\n", "\n", "This example goes over how to load data from folders with multiple files. The second argument is a map of file extensions to loader factories. Each file will be passed to the matching loader, and the resulting documents will be concatenated together.\n", "\n", @@ -45,7 +45,7 @@ "\n", "| Class | Package | Compatibility | Local | PY support | \n", "| :--- | :--- | :---: | :---: | :---: |\n", - "| [DirectoryLoader](https://api.js.langchain.com/classes/langchain_document_loaders_fs_directory.DirectoryLoader.html) | [langchain](https://api.js.langchain.com/modules/langchain_document_loaders_fs_directory.html) | Node-only | ✅ | ✅ |\n", + "| [DirectoryLoader](https://api.js.langchain.com/classes/langchain.document_loaders_fs_directory.DirectoryLoader.html) | [langchain](https://api.js.langchain.com/modules/langchain.document_loaders_fs_directory.html) | Node-only | ✅ | ✅ |\n", "\n", "## Setup\n", "\n", @@ -160,7 +160,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all DirectoryLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_document_loaders_fs_directory.DirectoryLoader.html" + "For detailed documentation of all DirectoryLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain.document_loaders_fs_directory.DirectoryLoader.html" ] }, { diff --git a/docs/core_docs/docs/integrations/document_loaders/file_loaders/text.ipynb b/docs/core_docs/docs/integrations/document_loaders/file_loaders/text.ipynb index bf6c6de8d823..aea4ff33148c 100644 --- a/docs/core_docs/docs/integrations/document_loaders/file_loaders/text.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/file_loaders/text.ipynb @@ -26,14 +26,14 @@ "\n", "```\n", "\n", - "This notebook provides a quick overview for getting started with `TextLoader` [document loaders](/docs/concepts/#document-loaders). For detailed documentation of all `TextLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_document_loaders_fs_text.TextLoader.html).\n", + "This notebook provides a quick overview for getting started with `TextLoader` [document loaders](/docs/concepts/#document-loaders). For detailed documentation of all `TextLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.document_loaders_fs_text.TextLoader.html).\n", "\n", "## Overview\n", "### Integration details\n", "\n", "| Class | Package | Compatibility | Local | PY support | \n", "| :--- | :--- | :---: | :---: | :---: |\n", - "| [TextLoader](https://api.js.langchain.com/classes/langchain_document_loaders_fs_text.TextLoader.html) | [langchain](https://api.js.langchain.com/modules/langchain_document_loaders_fs_text.html) | Node-only | ✅ | ❌ |\n", + "| [TextLoader](https://api.js.langchain.com/classes/langchain.document_loaders_fs_text.TextLoader.html) | [langchain](https://api.js.langchain.com/modules/langchain.document_loaders_fs_text.html) | Node-only | ✅ | ❌ |\n", "\n", "## Setup\n", "\n", @@ -132,7 +132,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all TextLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_document_loaders_fs_text.TextLoader.html" + "For detailed documentation of all TextLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain.document_loaders_fs_text.TextLoader.html" ] }, { diff --git a/docs/core_docs/docs/integrations/document_loaders/file_loaders/unstructured.ipynb b/docs/core_docs/docs/integrations/document_loaders/file_loaders/unstructured.ipynb index 6004fabb0f8a..c8dd6338a5e5 100644 --- a/docs/core_docs/docs/integrations/document_loaders/file_loaders/unstructured.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/file_loaders/unstructured.ipynb @@ -158,7 +158,7 @@ "source": [ "## Directories\n", "\n", - "You can also load all of the files in the directory using [`UnstructuredDirectoryLoader`](https://v02.api.js.langchain.com/classes/langchain_document_loaders_fs_unstructured.UnstructuredDirectoryLoader.html), which inherits from [`DirectoryLoader`](/docs/integrations/document_loaders/file_loaders/directory):\n" + "You can also load all of the files in the directory using [`UnstructuredDirectoryLoader`](https://v02.api.js.langchain.com/classes/langchain.document_loaders_fs_unstructured.UnstructuredDirectoryLoader.html), which inherits from [`DirectoryLoader`](/docs/integrations/document_loaders/file_loaders/directory):\n" ] }, { diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/sitemap.mdx b/docs/core_docs/docs/integrations/document_loaders/web_loaders/sitemap.mdx index b25f229919a1..6989a2101f7b 100644 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/sitemap.mdx +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/sitemap.mdx @@ -1,6 +1,6 @@ # Sitemap Loader -This notebook goes over how to use the [`SitemapLoader`](https://v02.api.js.langchain.com/classes/langchain_document_loaders_web_sitemap.SitemapLoader.html) class to load sitemaps into `Document`s. +This notebook goes over how to use the [`SitemapLoader`](https://v02.api.js.langchain.com/classes/langchain.document_loaders_web_sitemap.SitemapLoader.html) class to load sitemaps into `Document`s. ## Setup diff --git a/docs/core_docs/docs/integrations/retrievers/self_query/chroma.ipynb b/docs/core_docs/docs/integrations/retrievers/self_query/chroma.ipynb index 8fb4f47b7e5f..5ad60e41aac8 100644 --- a/docs/core_docs/docs/integrations/retrievers/self_query/chroma.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/self_query/chroma.ipynb @@ -21,7 +21,7 @@ "source": [ "# Chroma\n", "\n", - "This guide will help you getting started with such a retriever backed by a [Chroma vector store](/docs/integrations/vectorstores/chroma). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html).\n", + "This guide will help you getting started with such a retriever backed by a [Chroma vector store](/docs/integrations/vectorstores/chroma). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html).\n", "\n", "## Overview\n", "\n", @@ -384,7 +384,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all Chroma self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html)." + "For detailed documentation of all Chroma self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/retrievers/self_query/hnswlib.ipynb b/docs/core_docs/docs/integrations/retrievers/self_query/hnswlib.ipynb index 0b0d16400815..f1b683c0187b 100644 --- a/docs/core_docs/docs/integrations/retrievers/self_query/hnswlib.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/self_query/hnswlib.ipynb @@ -21,7 +21,7 @@ "source": [ "# HNSWLib\n", "\n", - "This guide will help you getting started with such a retriever backed by a [HNSWLib vector store](/docs/integrations/vectorstores/hnswlib). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html).\n", + "This guide will help you getting started with such a retriever backed by a [HNSWLib vector store](/docs/integrations/vectorstores/hnswlib). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html).\n", "\n", "## Overview\n", "\n", @@ -378,7 +378,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all HNSWLib self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html)." + "For detailed documentation of all HNSWLib self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/retrievers/self_query/memory.ipynb b/docs/core_docs/docs/integrations/retrievers/self_query/memory.ipynb index 60c4e5cc8dea..cde1d7578d5b 100644 --- a/docs/core_docs/docs/integrations/retrievers/self_query/memory.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/self_query/memory.ipynb @@ -21,7 +21,7 @@ "source": [ "# In-memory\n", "\n", - "This guide will help you getting started with such a retriever backed by an [in-memory vector store](/docs/integrations/vectorstores/memory). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html).\n", + "This guide will help you getting started with such a retriever backed by an [in-memory vector store](/docs/integrations/vectorstores/memory). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html).\n", "\n", "## Overview\n", "\n", @@ -33,7 +33,7 @@ "\n", "| Backing vector store | Self-host | Cloud offering | Package | Py support |\n", "| :--- | :--- | :---: | :---: | :---: |\n", - "[`MemoryVectorStore`](https://api.js.langchain.com/classes/langchain_vectorstores_memory.MemoryVectorStore.html) | ✅ | ❌ | [`langchain`](https://www.npmjs.com/package/langchain) | ❌ |\n", + "[`MemoryVectorStore`](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html) | ✅ | ❌ | [`langchain`](https://www.npmjs.com/package/langchain) | ❌ |\n", "\n", "## Setup\n", "\n", @@ -378,7 +378,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all in-memory self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html)." + "For detailed documentation of all in-memory self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/retrievers/self_query/pinecone.ipynb b/docs/core_docs/docs/integrations/retrievers/self_query/pinecone.ipynb index 277a194fd034..ffb7219eea1f 100644 --- a/docs/core_docs/docs/integrations/retrievers/self_query/pinecone.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/self_query/pinecone.ipynb @@ -21,7 +21,7 @@ "source": [ "# Pinecone\n", "\n", - "This guide will help you getting started with such a retriever backed by a [Pinecone vector store](/docs/integrations/vectorstores/pinecone). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html).\n", + "This guide will help you getting started with such a retriever backed by a [Pinecone vector store](/docs/integrations/vectorstores/pinecone). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html).\n", "\n", "## Overview\n", "\n", @@ -396,7 +396,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all Pinecone self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html)." + "For detailed documentation of all Pinecone self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/retrievers/self_query/qdrant.ipynb b/docs/core_docs/docs/integrations/retrievers/self_query/qdrant.ipynb index 154e9bd6150a..3236c436ebdb 100644 --- a/docs/core_docs/docs/integrations/retrievers/self_query/qdrant.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/self_query/qdrant.ipynb @@ -21,7 +21,7 @@ "source": [ "# Qdrant\n", "\n", - "This guide will help you getting started with such a retriever backed by a [Qdrant vector store](/docs/integrations/vectorstores/qdrant). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html).\n", + "This guide will help you getting started with such a retriever backed by a [Qdrant vector store](/docs/integrations/vectorstores/qdrant). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html).\n", "\n", "## Overview\n", "\n", @@ -391,7 +391,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all Qdrant self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html)." + "For detailed documentation of all Qdrant self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/retrievers/self_query/supabase.ipynb b/docs/core_docs/docs/integrations/retrievers/self_query/supabase.ipynb index b249902a7c66..8e6fac3cc87b 100644 --- a/docs/core_docs/docs/integrations/retrievers/self_query/supabase.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/self_query/supabase.ipynb @@ -21,7 +21,7 @@ "source": [ "# Supabase\n", "\n", - "This guide will help you getting started with such a retriever backed by a [Supabase vector store](/docs/integrations/vectorstores/supabase). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html).\n", + "This guide will help you getting started with such a retriever backed by a [Supabase vector store](/docs/integrations/vectorstores/supabase). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html).\n", "\n", "## Overview\n", "\n", @@ -388,7 +388,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all Supabase self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html)." + "For detailed documentation of all Supabase self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/retrievers/self_query/vectara.ipynb b/docs/core_docs/docs/integrations/retrievers/self_query/vectara.ipynb index 0d45945e7e5f..11635651857e 100644 --- a/docs/core_docs/docs/integrations/retrievers/self_query/vectara.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/self_query/vectara.ipynb @@ -21,7 +21,7 @@ "source": [ "# Vectara\n", "\n", - "This guide will help you getting started with such a retriever backed by a [Vectara vector store](/docs/integrations/vectorstores/vectara). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html).\n", + "This guide will help you getting started with such a retriever backed by a [Vectara vector store](/docs/integrations/vectorstores/vectara). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html).\n", "\n", "## Overview\n", "\n", @@ -390,7 +390,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all Vectara self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html)." + "For detailed documentation of all Vectara self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/retrievers/self_query/weaviate.ipynb b/docs/core_docs/docs/integrations/retrievers/self_query/weaviate.ipynb index 09f3f4ff1d4e..9ccefb112d09 100644 --- a/docs/core_docs/docs/integrations/retrievers/self_query/weaviate.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/self_query/weaviate.ipynb @@ -21,7 +21,7 @@ "source": [ "# Weaviate\n", "\n", - "This guide will help you getting started with such a retriever backed by a [Weaviate vector store](/docs/integrations/vectorstores/weaviate). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html).\n", + "This guide will help you getting started with such a retriever backed by a [Weaviate vector store](/docs/integrations/vectorstores/weaviate). For detailed documentation of all features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html).\n", "\n", "## Overview\n", "\n", @@ -399,7 +399,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all Weaviate self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_retrievers_self_query.SelfQueryRetriever.html)." + "For detailed documentation of all Weaviate self-query retriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.retrievers_self_query.SelfQueryRetriever.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/stores/file_system.ipynb b/docs/core_docs/docs/integrations/stores/file_system.ipynb index f7da9b71cdee..c7c7bdd9ee77 100644 --- a/docs/core_docs/docs/integrations/stores/file_system.ipynb +++ b/docs/core_docs/docs/integrations/stores/file_system.ipynb @@ -30,7 +30,7 @@ "\n", "```\n", "\n", - "This will help you get started with [LocalFileStore](/docs/concepts/#key-value-stores). For detailed documentation of all LocalFileStore features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_storage_file_system.LocalFileStore.html).\n", + "This will help you get started with [LocalFileStore](/docs/concepts/#key-value-stores). For detailed documentation of all LocalFileStore features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.storage_file_system.LocalFileStore.html).\n", "\n", "## Overview\n", "\n", @@ -52,7 +52,7 @@ "\n", "| Class | Package | Local | [PY support](https://python.langchain.com/v0.2/docs/integrations/stores/file_system/) | Package downloads | Package latest |\n", "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [LocalFileStore](https://api.js.langchain.com/classes/langchain_storage_file_system.LocalFileStore.html) | [langchain](https://api.js.langchain.com/modules/langchain_storage_file_system.html) | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/langchain?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/langchain?style=flat-square&label=%20&) |\n", + "| [LocalFileStore](https://api.js.langchain.com/classes/langchain.storage_file_system.LocalFileStore.html) | [langchain](https://api.js.langchain.com/modules/langchain.storage_file_system.html) | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/langchain?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/langchain?style=flat-square&label=%20&) |\n", "\n", "## Setup\n", "\n", diff --git a/docs/core_docs/docs/integrations/stores/in_memory.ipynb b/docs/core_docs/docs/integrations/stores/in_memory.ipynb index beef57053818..18ea964d2511 100644 --- a/docs/core_docs/docs/integrations/stores/in_memory.ipynb +++ b/docs/core_docs/docs/integrations/stores/in_memory.ipynb @@ -19,7 +19,7 @@ "source": [ "# InMemoryStore\n", "\n", - "This will help you get started with [InMemoryStore](/docs/concepts/#key-value-stores). For detailed documentation of all InMemoryStore features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_core_stores.InMemoryStore.html).\n", + "This will help you get started with [InMemoryStore](/docs/concepts/#key-value-stores). For detailed documentation of all InMemoryStore features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_core.stores.InMemoryStore.html).\n", "\n", "The `InMemoryStore` allows for a generic type to be assigned to the values in the store. We'll assign type `BaseMessage` as the type of our values, keeping with the theme of a chat history store.\n", "\n", @@ -29,7 +29,7 @@ "\n", "| Class | Package | Local | [PY support](https://python.langchain.com/v0.2/docs/integrations/stores/in_memory/) | Package downloads | Package latest |\n", "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [InMemoryStore](https://api.js.langchain.com/classes/langchain_core_stores.InMemoryStore.html) | [@langchain/core](https://api.js.langchain.com/modules/langchain_core_stores.html) | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/core?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/core?style=flat-square&label=%20&) |\n", + "| [InMemoryStore](https://api.js.langchain.com/classes/langchain_core.stores.InMemoryStore.html) | [@langchain/core](https://api.js.langchain.com/modules/langchain_core.stores.html) | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/core?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/core?style=flat-square&label=%20&) |\n", "\n", "## Setup\n", "\n", @@ -211,7 +211,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all InMemoryStore features and configurations, head to the [API reference](https://api.js.langchain.com/classes/langchain_core_stores.InMemoryStore.html)" + "For detailed documentation of all InMemoryStore features and configurations, head to the [API reference](https://api.js.langchain.com/classes/langchain_core.stores.InMemoryStore.html)" ] } ], diff --git a/docs/core_docs/docs/integrations/toolkits/openapi.ipynb b/docs/core_docs/docs/integrations/toolkits/openapi.ipynb index 12851b13fe99..846bc030e241 100644 --- a/docs/core_docs/docs/integrations/toolkits/openapi.ipynb +++ b/docs/core_docs/docs/integrations/toolkits/openapi.ipynb @@ -37,7 +37,7 @@ "\n", "```\n", "\n", - "This will help you getting started with the [OpenApiToolkit](/docs/concepts/#toolkits). For detailed documentation of all OpenApiToolkit features and configurations head to the [API reference](https://v02.api.js.langchain.com/classes/langchain_agents.OpenApiToolkit.html).\n", + "This will help you getting started with the [OpenApiToolkit](/docs/concepts/#toolkits). For detailed documentation of all OpenApiToolkit features and configurations head to the [API reference](https://v02.api.js.langchain.com/classes/langchain.agents.OpenApiToolkit.html).\n", "\n", "The `OpenAPIToolkit` has access to the following tools:\n", "\n", @@ -295,7 +295,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all OpenApiToolkit features and configurations head to the [API reference](https://v02.api.js.langchain.com/classes/langchain_agents.OpenApiToolkit.html)." + "For detailed documentation of all OpenApiToolkit features and configurations head to the [API reference](https://v02.api.js.langchain.com/classes/langchain.agents.OpenApiToolkit.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/toolkits/sql.ipynb b/docs/core_docs/docs/integrations/toolkits/sql.ipynb index fa21ffd3df35..026e2f9dcfbf 100644 --- a/docs/core_docs/docs/integrations/toolkits/sql.ipynb +++ b/docs/core_docs/docs/integrations/toolkits/sql.ipynb @@ -21,7 +21,7 @@ "source": [ "# SqlToolkit\n", "\n", - "This will help you getting started with the [SqlToolkit](/docs/concepts/#toolkits). For detailed documentation of all SqlToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_agents_toolkits_sql.SqlToolkit.html). You can also find the documentation for the Python equivalent [here](https://python.langchain.com/docs/integrations/toolkits/sql_database/).\n", + "This will help you getting started with the [SqlToolkit](/docs/concepts/#toolkits). For detailed documentation of all SqlToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents_toolkits_sql.SqlToolkit.html). You can also find the documentation for the Python equivalent [here](https://python.langchain.com/docs/integrations/toolkits/sql_database/).\n", "\n", "This toolkit contains a the following tools:\n", "\n", @@ -289,7 +289,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all SqlToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_agents_toolkits_sql.SqlToolkit.html)." + "For detailed documentation of all SqlToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents_toolkits_sql.SqlToolkit.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb b/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb index 55b0f41ca31d..811ae4f7f756 100644 --- a/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb +++ b/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb @@ -21,7 +21,7 @@ "source": [ "# VectorStoreToolkit\n", "\n", - "This will help you getting started with the [VectorStoreToolkit](/docs/concepts/#toolkits). For detailed documentation of all VectorStoreToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_agents.VectorStoreToolkit.html).\n", + "This will help you getting started with the [VectorStoreToolkit](/docs/concepts/#toolkits). For detailed documentation of all VectorStoreToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents.VectorStoreToolkit.html).\n", "\n", "The `VectorStoreToolkit` is a toolkit which takes in a vector store, and converts it to a tool which can then be invoked, passed to LLMs, agents and more.\n", "\n", @@ -232,7 +232,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all VectorStoreToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_agents.VectorStoreToolkit.html)." + "For detailed documentation of all VectorStoreToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents.VectorStoreToolkit.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/vectorstores/googlevertexai.mdx b/docs/core_docs/docs/integrations/vectorstores/googlevertexai.mdx index afb9961bf64e..55c6e140bfc3 100644 --- a/docs/core_docs/docs/integrations/vectorstores/googlevertexai.mdx +++ b/docs/core_docs/docs/integrations/vectorstores/googlevertexai.mdx @@ -60,7 +60,7 @@ for Matching Engine: You will also need a document store. While an `InMemoryDocstore` is ok for initial testing, you will want to use something like a -[GoogleCloudStorageDocstore](https://v02.api.js.langchain.com/classes/langchain_stores_doc_gcs.GoogleCloudStorageDocstore.html) to store it more permanently. +[GoogleCloudStorageDocstore](https://api.js.langchain.com/classes/_langchain_community.stores_doc_gcs.GoogleCloudStorageDocstore.html) to store it more permanently. ```typescript import { MatchingEngine } from "@langchain/community/vectorstores/googlevertexai"; diff --git a/docs/core_docs/docs/integrations/vectorstores/memory.ipynb b/docs/core_docs/docs/integrations/vectorstores/memory.ipynb index 55c821e94060..8943d85715a9 100644 --- a/docs/core_docs/docs/integrations/vectorstores/memory.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/memory.ipynb @@ -25,7 +25,7 @@ "\n", "As it is intended for demos, it does not yet support ids or deletion.\n", "\n", - "This guide provides a quick overview for getting started with in-memory [`vector stores`](/docs/concepts/#vectorstores). For detailed documentation of all `MemoryVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_vectorstores_memory.MemoryVectorStore.html)." + "This guide provides a quick overview for getting started with in-memory [`vector stores`](/docs/concepts/#vectorstores). For detailed documentation of all `MemoryVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html)." ] }, { @@ -39,7 +39,7 @@ "\n", "| Class | Package | PY support | Package latest |\n", "| :--- | :--- | :---: | :---: |\n", - "| [`MemoryVectorStore`](https://api.js.langchain.com/classes/langchain_vectorstores_memory.MemoryVectorStore.html) | [`langchain`](https://www.npmjs.com/package/langchain) | ❌ | ![NPM - Version](https://img.shields.io/npm/v/langchain?style=flat-square&label=%20&) |" + "| [`MemoryVectorStore`](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html) | [`langchain`](https://www.npmjs.com/package/langchain) | ❌ | ![NPM - Version](https://img.shields.io/npm/v/langchain?style=flat-square&label=%20&) |" ] }, { @@ -335,7 +335,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all `MemoryVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_vectorstores_memory.MemoryVectorStore.html)." + "For detailed documentation of all `MemoryVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html)." ] } ], diff --git a/docs/core_docs/docs/tutorials/qa_chat_history.ipynb b/docs/core_docs/docs/tutorials/qa_chat_history.ipynb index 6c0c9f3e8c03..c430fd5a9a58 100644 --- a/docs/core_docs/docs/tutorials/qa_chat_history.ipynb +++ b/docs/core_docs/docs/tutorials/qa_chat_history.ipynb @@ -363,7 +363,7 @@ "\n", "For this we can use:\n", "\n", - "- [BaseChatMessageHistory](https://v02.api.js.langchain.com/classes/langchain_core_chat_history.BaseChatMessageHistory.html): Store chat history.\n", + "- [BaseChatMessageHistory](https://v02.api.js.langchain.com/classes/langchain_core.chat_history.BaseChatMessageHistory.html): Store chat history.\n", "- [RunnableWithMessageHistory](/docs/how_to/message_history/): Wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n", "\n", "For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history/) LCEL page." diff --git a/docs/core_docs/docs/tutorials/query_analysis.ipynb b/docs/core_docs/docs/tutorials/query_analysis.ipynb index 18c879293ced..5c8da8e8f25e 100644 --- a/docs/core_docs/docs/tutorials/query_analysis.ipynb +++ b/docs/core_docs/docs/tutorials/query_analysis.ipynb @@ -289,7 +289,7 @@ "source": [ "### Query generation\n", "\n", - "To convert user questions to structured queries we'll make use of OpenAI's function-calling API. Specifically we'll use the new [ChatModel.withStructuredOutput()](https://v02.api.js.langchain.com/classes/langchain_core_language_models_base.BaseLanguageModel.html#withStructuredOutput) constructor to handle passing the schema to the model and parsing the output." + "To convert user questions to structured queries we'll make use of OpenAI's function-calling API. Specifically we'll use the new [ChatModel.withStructuredOutput()](https://v02.api.js.langchain.com/classes/langchain_core.language_models_base.BaseLanguageModel.html#withStructuredOutput) constructor to handle passing the schema to the model and parsing the output." ] }, { diff --git a/docs/core_docs/docs/tutorials/rag.ipynb b/docs/core_docs/docs/tutorials/rag.ipynb index 7e48f61db958..ef5e147741ea 100644 --- a/docs/core_docs/docs/tutorials/rag.ipynb +++ b/docs/core_docs/docs/tutorials/rag.ipynb @@ -265,9 +265,9 @@ "Let’s go through the above code step-by-step to really understand what’s going on.\n", "\n", "## 1. Indexing: Load\n", - "We need to first load the blog post contents. We can use [DocumentLoaders](/docs/concepts#document-loaders) for this, which are objects that load in data from a source and return a list of [Documents](https://v02.api.js.langchain.com/classes/langchain_core_documents.Document.html). A Document is an object with some pageContent (`string`) and metadata (`Record`).\n", + "We need to first load the blog post contents. We can use [DocumentLoaders](/docs/concepts#document-loaders) for this, which are objects that load in data from a source and return a list of [Documents](https://v02.api.js.langchain.com/classes/langchain_core.documents.Document.html). A Document is an object with some pageContent (`string`) and metadata (`Record`).\n", "\n", - "In this case we’ll use the [CheerioWebBaseLoader](https://v02.api.js.langchain.com/classes/langchain_document_loaders_web_cheerio.CheerioWebBaseLoader.html), which uses cheerio to load HTML form web URLs and parse it to text. We can pass custom selectors to the constructor to only parse specific elements:" + "In this case we’ll use the [CheerioWebBaseLoader](https://v02.api.js.langchain.com/classes/langchain.document_loaders_web_cheerio.CheerioWebBaseLoader.html), which uses cheerio to load HTML form web URLs and parse it to text. We can pass custom selectors to the constructor to only parse specific elements:" ] }, { @@ -338,7 +338,7 @@ "### Go deeper\n", "`DocumentLoader`: Class that loads data from a source as list of Documents. - [Docs](/docs/concepts#document-loaders): Detailed documentation on how to use\n", "\n", - "`DocumentLoaders`. - [Integrations](/docs/integrations/document_loaders/) - [Interface](https://v02.api.js.langchain.com/classes/langchain_document_loaders_base.BaseDocumentLoader.html): API reference for the base interface." + "`DocumentLoaders`. - [Integrations](/docs/integrations/document_loaders/) - [Interface](https://v02.api.js.langchain.com/classes/langchain.document_loaders_base.BaseDocumentLoader.html): API reference for the base interface." ] }, { @@ -430,7 +430,7 @@ "\n", "`TextSplitter`: Object that splits a list of `Document`s into smaller chunks. Subclass of `DocumentTransformers`. - Explore `Context-aware splitters`, which keep the location (“context”) of each split in the original `Document`: - [Markdown files](/docs/how_to/code_splitter/#markdown) - [Code](/docs/how_to/code_splitter/) (15+ langs) - [Interface](https://v02.api.js.langchain.com/classes/langchain_textsplitters.TextSplitter.html): API reference for the base interface.\n", "\n", - "`DocumentTransformer`: Object that performs a transformation on a list of `Document`s. - Docs: Detailed documentation on how to use `DocumentTransformer`s - [Integrations](/docs/integrations/document_transformers) - [Interface](https://v02.api.js.langchain.com/classes/langchain_core_documents.BaseDocumentTransformer.html): API reference for the base interface." + "`DocumentTransformer`: Object that performs a transformation on a list of `Document`s. - Docs: Detailed documentation on how to use `DocumentTransformer`s - [Integrations](/docs/integrations/document_transformers) - [Interface](https://v02.api.js.langchain.com/classes/langchain_core.documents.BaseDocumentTransformer.html): API reference for the base interface." ] }, { @@ -461,9 +461,9 @@ "source": [ "### Go deeper\n", "\n", - "`Embeddings`: Wrapper around a text embedding model, used for converting text to embeddings. - [Docs](/docs/concepts#embedding-models): Detailed documentation on how to use embeddings. - [Integrations](/docs/integrations/text_embedding): 30+ integrations to choose from. - [Interface](https://v02.api.js.langchain.com/classes/langchain_core_embeddings.Embeddings.html): API reference for the base interface.\n", + "`Embeddings`: Wrapper around a text embedding model, used for converting text to embeddings. - [Docs](/docs/concepts#embedding-models): Detailed documentation on how to use embeddings. - [Integrations](/docs/integrations/text_embedding): 30+ integrations to choose from. - [Interface](https://v02.api.js.langchain.com/classes/langchain_core.embeddings.Embeddings.html): API reference for the base interface.\n", "\n", - "`VectorStore`: Wrapper around a vector database, used for storing and querying embeddings. - [Docs](/docs/concepts#vectorstores): Detailed documentation on how to use vector stores. - [Integrations](/docs/integrations/vectorstores): 40+ integrations to choose from. - [Interface](https://v02.api.js.langchain.com/classes/langchain_core_vectorstores.VectorStore.html): API reference for the base interface.\n", + "`VectorStore`: Wrapper around a vector database, used for storing and querying embeddings. - [Docs](/docs/concepts#vectorstores): Detailed documentation on how to use vector stores. - [Integrations](/docs/integrations/vectorstores): 40+ integrations to choose from. - [Interface](https://v02.api.js.langchain.com/classes/langchain_core.vectorstores.VectorStore.html): API reference for the base interface.\n", "\n", "This completes the **Indexing** portion of the pipeline. At this point we have a query-able vector store containing the chunked contents of our blog post. Given a user question, we should ideally be able to return the snippets of the blog post that answer the question." ] @@ -478,7 +478,7 @@ "\n", "First we need to define our logic for searching over documents. LangChain defines a [Retriever](/docs/concepts#retrievers) interface which wraps an index that can return relevant `Document`s given a string query.\n", "\n", - "The most common type of Retriever is the [VectorStoreRetriever](https://v02.api.js.langchain.com/classes/langchain_core_vectorstores.VectorStoreRetriever.html), which uses the similarity search capabilities of a vector store to facilitate retrieval. Any `VectorStore` can easily be turned into a `Retriever` with `VectorStore.asRetriever()`:" + "The most common type of Retriever is the [VectorStoreRetriever](https://v02.api.js.langchain.com/classes/langchain_core.vectorstores.VectorStoreRetriever.html), which uses the similarity search capabilities of a vector store to facilitate retrieval. Any `VectorStore` can easily be turned into a `Retriever` with `VectorStore.asRetriever()`:" ] }, { @@ -774,9 +774,9 @@ "### Go deeper\n", "\n", "#### Choosing a model\n", - "`ChatModel`: An LLM-backed chat model. Takes in a sequence of messages and returns a message. - [Docs](/docs/concepts/#chat-models): Detailed documentation on - [Integrations](/docs/integrations/chat/): 25+ integrations to choose from. - [Interface](https://v02.api.js.langchain.com/classes/langchain_core_language_models_chat_models.BaseChatModel.html): API reference for the base interface.\n", + "`ChatModel`: An LLM-backed chat model. Takes in a sequence of messages and returns a message. - [Docs](/docs/concepts/#chat-models): Detailed documentation on - [Integrations](/docs/integrations/chat/): 25+ integrations to choose from. - [Interface](https://v02.api.js.langchain.com/classes/langchain_core.language_models_chat_models.BaseChatModel.html): API reference for the base interface.\n", "\n", - "`LLM`: A text-in-text-out LLM. Takes in a string and returns a string. - [Docs](/docs/concepts#llms) - [Integrations](/docs/integrations/llms/): 75+ integrations to choose from. - [Interface](https://v02.api.js.langchain.com/classes/langchain_core_language_models_llms.BaseLLM.html): API reference for the base interface.\n", + "`LLM`: A text-in-text-out LLM. Takes in a string and returns a string. - [Docs](/docs/concepts#llms) - [Integrations](/docs/integrations/llms/): 75+ integrations to choose from. - [Interface](https://v02.api.js.langchain.com/classes/langchain_core.language_models_llms.BaseLLM.html): API reference for the base interface.\n", "\n", "See a guide on RAG with locally-running models [here](/docs/tutorials/local_rag/).\n", "\n", diff --git a/docs/core_docs/docs/tutorials/sql_qa.mdx b/docs/core_docs/docs/tutorials/sql_qa.mdx index d0e0d2001424..f362ffbcc4aa 100644 --- a/docs/core_docs/docs/tutorials/sql_qa.mdx +++ b/docs/core_docs/docs/tutorials/sql_qa.mdx @@ -69,7 +69,7 @@ Let's create a simple chain that takes a question, turns it into a SQL query, ex ### Convert question to SQL query -The first step in a SQL chain or agent is to take the user input and convert it to a SQL query. LangChain comes with a built-in chain for this: [`createSqlQueryChain`](https://v02.api.js.langchain.com/functions/langchain_chains_sql_db.createSqlQueryChain.html) +The first step in a SQL chain or agent is to take the user input and convert it to a SQL query. LangChain comes with a built-in chain for this: [`createSqlQueryChain`](https://v02.api.js.langchain.com/functions/langchain.chains_sql_db.createSqlQueryChain.html) import QuickstartChainExample from "@examples/use_cases/sql/quickstart_chain.ts"; @@ -94,7 +94,7 @@ This is the most dangerous part of creating a SQL chain. Consider carefully if i Minimize the database connection permissions as much as possible. Consider adding a human approval step to you chains before query execution (see below). -We can use the [`QuerySqlTool`](https://v02.api.js.langchain.com/classes/langchain_tools_sql.QuerySqlTool.html) to easily add query execution to our chain: +We can use the [`QuerySqlTool`](https://v02.api.js.langchain.com/classes/langchain.tools_sql.QuerySqlTool.html) to easily add query execution to our chain: import QuickstartExecuteExample from "@examples/use_cases/sql/quickstart_execute_sql.ts"; @@ -133,8 +133,8 @@ LangChain offers a number of tools and functions that allow you to create SQL Ag - It can recover from errors by running a generated query, catching the traceback and regenerating it correctly. - It can answer questions that require multiple dependent queries. - It will save tokens by only considering the schema from relevant tables. -- To initialize the agent, we use [`createOpenAIToolsAgent`](https://v02.api.js.langchain.com/functions/langchain_agents.createOpenAIToolsAgent.html) function. - This agent contains the [`SqlToolkit`](https://v02.api.js.langchain.com/classes/langchain_agents_toolkits_sql.SqlToolkit.html) which contains tools to: +- To initialize the agent, we use [`createOpenAIToolsAgent`](https://v02.api.js.langchain.com/functions/langchain.agents.createOpenAIToolsAgent.html) function. + This agent contains the [`SqlToolkit`](https://v02.api.js.langchain.com/classes/langchain.agents_toolkits_sql.SqlToolkit.html) which contains tools to: - Create and execute queries - Check query syntax - Retrieve table descriptions diff --git a/langchain/src/agents/chat/index.ts b/langchain/src/agents/chat/index.ts index fa4c14b50779..de2046c4afde 100644 --- a/langchain/src/agents/chat/index.ts +++ b/langchain/src/agents/chat/index.ts @@ -44,7 +44,7 @@ export type ChatAgentInput = Optional; * Agent for the MRKL chain. * @augments Agent * - * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain_agents.createStructuredChatAgent.html | createStructuredChatAgent method instead}. + * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain.agents.createStructuredChatAgent.html | createStructuredChatAgent method instead}. */ export class ChatAgent extends Agent { static lc_name() { diff --git a/langchain/src/agents/chat_convo/index.ts b/langchain/src/agents/chat_convo/index.ts index 6fb9df2722f8..6a68bb5b650c 100644 --- a/langchain/src/agents/chat_convo/index.ts +++ b/langchain/src/agents/chat_convo/index.ts @@ -51,7 +51,7 @@ export type ChatConversationalAgentInput = Optional; * Agent for the MRKL chain. * @augments Agent * - * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain_agents.createStructuredChatAgent.html | createStructuredChatAgent method instead}. + * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain.agents.createStructuredChatAgent.html | createStructuredChatAgent method instead}. */ export class ChatConversationalAgent extends Agent { static lc_name() { diff --git a/langchain/src/agents/mrkl/index.ts b/langchain/src/agents/mrkl/index.ts index f033e1831dd9..1a52b264f82e 100644 --- a/langchain/src/agents/mrkl/index.ts +++ b/langchain/src/agents/mrkl/index.ts @@ -56,7 +56,7 @@ export type ZeroShotAgentInput = Optional; * }); * ``` * - * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain_agents.createReactAgent.html | createReactAgent method instead}. + * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain.agents.createReactAgent.html | createReactAgent method instead}. */ export class ZeroShotAgent extends Agent { static lc_name() { diff --git a/langchain/src/agents/openai_functions/index.ts b/langchain/src/agents/openai_functions/index.ts index 387e156639ca..6be6fc4759f6 100644 --- a/langchain/src/agents/openai_functions/index.ts +++ b/langchain/src/agents/openai_functions/index.ts @@ -95,7 +95,7 @@ export interface OpenAIAgentCreatePromptArgs { * extends the Agent class and provides additional functionality specific * to the OpenAIAgent type. * - * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain_agents.createOpenAIFunctionsAgent.html | createOpenAIFunctionsAgent method instead}. + * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain.agents.createOpenAIFunctionsAgent.html | createOpenAIFunctionsAgent method instead}. */ export class OpenAIAgent extends Agent { static lc_name() { diff --git a/langchain/src/agents/structured_chat/index.ts b/langchain/src/agents/structured_chat/index.ts index 27b8972b7567..5549c4528b4b 100644 --- a/langchain/src/agents/structured_chat/index.ts +++ b/langchain/src/agents/structured_chat/index.ts @@ -57,7 +57,7 @@ export type StructuredChatAgentInput = Optional; /** * Agent that interoperates with Structured Tools using React logic. * @augments Agent - * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain_agents.createStructuredChatAgent.html | createStructuredChatAgent method instead}. + * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain.agents.createStructuredChatAgent.html | createStructuredChatAgent method instead}. */ export class StructuredChatAgent extends Agent { static lc_name() { diff --git a/langchain/src/agents/xml/index.ts b/langchain/src/agents/xml/index.ts index cce9803b6c2c..bd169525dbb7 100644 --- a/langchain/src/agents/xml/index.ts +++ b/langchain/src/agents/xml/index.ts @@ -35,7 +35,7 @@ export interface XMLAgentInput { /** * Class that represents an agent that uses XML tags. * - * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain_agents.createXmlAgent.html | createXmlAgent method instead}. + * @deprecated Use the {@link https://v02.api.js.langchain.com/functions/langchain.agents.createXmlAgent.html | createXmlAgent method instead}. */ export class XMLAgent extends BaseSingleActionAgent implements XMLAgentInput { static lc_name() { diff --git a/langchain/src/chains/load.ts b/langchain/src/chains/load.ts index 09a3ec5e959e..362f9e52ac26 100644 --- a/langchain/src/chains/load.ts +++ b/langchain/src/chains/load.ts @@ -30,7 +30,7 @@ const loadChainFromFile: FileLoader = async ( * const chain = await loadChain("/path/to/chain.json"); * ``` * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | load method}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | load method}. */ export const loadChain = async ( uri: string, diff --git a/langchain/src/chains/openai_functions/structured_output.ts b/langchain/src/chains/openai_functions/structured_output.ts index 520af1d0e7a8..02b2d487ee18 100644 --- a/langchain/src/chains/openai_functions/structured_output.ts +++ b/langchain/src/chains/openai_functions/structured_output.ts @@ -142,7 +142,7 @@ export class FunctionCallStructuredOutputParser< } /** - * @deprecated Use {@link https://v02.api.js.langchain.com/functions/langchain_chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead + * @deprecated Use {@link https://v02.api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead * Create a chain that returns output matching a JSON Schema. * @param input Object that includes all LLMChainInput fields except "outputParser" * as well as an additional required "outputSchema" JSON Schema object. @@ -187,7 +187,7 @@ export function createStructuredOutputChain< }); } -/** @deprecated Use {@link https://v02.api.js.langchain.com/functions/langchain_chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */ +/** @deprecated Use {@link https://v02.api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */ export function createStructuredOutputChainFromZod( zodSchema: T, input: Omit, "outputSchema"> diff --git a/langchain/src/chains/serde.ts b/langchain/src/chains/serde.ts index 0d4cae6e4e7b..911e35d01e2a 100644 --- a/langchain/src/chains/serde.ts +++ b/langchain/src/chains/serde.ts @@ -5,7 +5,7 @@ import { SerializedBasePromptTemplate } from "@langchain/core/prompts"; * Represents the serialized form of an LLMChain. It includes properties * such as `_type`, `llm`, and `prompt`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedLLMChain = { _type: "llm_chain"; @@ -18,7 +18,7 @@ export type SerializedLLMChain = { * properties such as `_type`, `input_variables`, `output_variables`, and * `chains`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedSequentialChain = { _type: "sequential_chain"; @@ -31,7 +31,7 @@ export type SerializedSequentialChain = { * Represents the serialized form of a SimpleSequentialChain. It includes * properties such as `_type` and `chains`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedSimpleSequentialChain = { _type: "simple_sequential_chain"; @@ -42,7 +42,7 @@ export type SerializedSimpleSequentialChain = { * Represents the serialized form of a VectorDBQAChain. It includes * properties such as `_type`, `k`, and `combine_documents_chain`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedVectorDBQAChain = { _type: "vector_db_qa"; @@ -55,7 +55,7 @@ export type SerializedVectorDBQAChain = { * such as `_type`, `api_request_chain`, `api_answer_chain`, and * `api_docs`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedAPIChain = { _type: "api_chain"; @@ -68,7 +68,7 @@ export type SerializedAPIChain = { * Represents the serialized form of a StuffDocumentsChain. It includes * properties such as `_type` and `llm_chain`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedStuffDocumentsChain = { _type: "stuff_documents_chain"; @@ -80,7 +80,7 @@ export type SerializedStuffDocumentsChain = { * properties such as `_type`, `k`, `combine_documents_chain`, and * `question_generator`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedChatVectorDBQAChain = { _type: "chat-vector-db"; @@ -94,7 +94,7 @@ export type SerializedChatVectorDBQAChain = { * includes properties such as `_type`, `llm_chain`, and * `combine_document_chain`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedMapReduceDocumentsChain = { _type: "map_reduce_documents_chain"; @@ -106,7 +106,7 @@ export type SerializedMapReduceDocumentsChain = { * Represents the serialized form of a RefineDocumentsChain. It includes * properties such as `_type`, `llm_chain`, and `refine_llm_chain`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedRefineDocumentsChain = { _type: "refine_documents_chain"; @@ -118,7 +118,7 @@ export type SerializedRefineDocumentsChain = { * Represents the serialized form of an AnalyzeDocumentChain. It includes * properties such as `_type` and `combine_document_chain`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedAnalyzeDocumentChain = { _type: "analyze_document_chain"; @@ -130,7 +130,7 @@ export type SerializedAnalyzeDocumentChain = { * includes properties such as `_type`, `critiqueRequest`, * `revisionRequest`, and `name`. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedConstitutionalPrinciple = { _type: "constitutional_principle"; @@ -158,7 +158,7 @@ export type SerializedConstitutionalChain = { * Represents the serialized form of a BaseChain. It can be one of the * above serialized chain types. * - * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain_load.load.html | serialization methods}. + * @deprecated Use newer {@link https://v02.api.js.langchain.com/functions/langchain.load.load.html | serialization methods}. */ export type SerializedBaseChain = | SerializedLLMChain