From bb2ed4615c8a2fd18beb32bd99f6a5c117a27218 Mon Sep 17 00:00:00 2001 From: Johnny Deuss Date: Thu, 12 Oct 2023 16:44:03 +0100 Subject: [PATCH] Fix typos (#11663) --- cookbook/elasticsearch_db_qa.ipynb | 2 +- cookbook/sales_agent_with_context.ipynb | 2 +- cookbook/smart_llm.ipynb | 4 +-- docs/README.md | 2 +- docs/docs/additional_resources/youtube.mdx | 2 +- .../docs/expression_language/how_to/map.ipynb | 2 +- .../guides/evaluation/comparison/custom.ipynb | 6 ++-- .../pairwise_embedding_distance.ipynb | 6 ++-- .../comparison/pairwise_string.ipynb | 6 ++-- .../evaluation/examples/comparisons.ipynb | 6 ++-- .../string/criteria_eval_chain.ipynb | 4 +-- .../guides/evaluation/string/custom.ipynb | 2 +- .../string/embedding_distance.ipynb | 6 ++-- .../evaluation/string/exact_match.ipynb | 2 +- .../evaluation/string/regex_match.ipynb | 2 +- .../evaluation/string/string_distance.ipynb | 2 +- .../guides/evaluation/trajectory/custom.ipynb | 2 +- .../trajectory/trajectory_eval.ipynb | 2 +- docs/docs/guides/langsmith/walkthrough.ipynb | 8 ++--- docs/docs/guides/local_llms.ipynb | 14 ++++----- .../presidio_data_anonymization/index.ipynb | 2 +- .../multi_language.ipynb | 4 +-- .../reversible.ipynb | 2 +- .../safety/amazon_comprehend_chain.ipynb | 11 ++++--- .../integrations/callbacks/promptlayer.ipynb | 2 +- .../integrations/chat/azure_chat_openai.ipynb | 2 +- .../chat/baidu_qianfan_endpoint.ipynb | 6 ++-- .../chat/promptlayer_chatopenai.ipynb | 2 +- .../integrations/chat_loaders/discord.ipynb | 4 +-- .../integrations/chat_loaders/facebook.ipynb | 2 +- .../integrations/chat_loaders/gmail.ipynb | 2 +- .../integrations/chat_loaders/imessage.ipynb | 2 +- .../integrations/chat_loaders/slack.ipynb | 6 ++-- .../integrations/chat_loaders/telegram.ipynb | 6 ++-- .../integrations/chat_loaders/twitter.ipynb | 2 +- .../integrations/chat_loaders/wechat.ipynb | 6 ++-- .../integrations/chat_loaders/whatsapp.ipynb | 8 ++--- .../document_loaders/apify_dataset.ipynb | 2 +- .../document_loaders/dropbox.ipynb | 4 +-- .../document_loaders/etherscan.ipynb | 8 ++--- .../integrations/document_loaders/figma.ipynb | 2 +- .../document_loaders/geopandas.ipynb | 4 +-- .../document_loaders/github.ipynb | 2 +- .../document_loaders/joplin.ipynb | 2 +- .../document_loaders/mastodon.ipynb | 2 +- .../document_loaders/microsoft_onedrive.ipynb | 2 +- .../document_loaders/source_code.ipynb | 2 +- .../document_loaders/weather.ipynb | 2 +- docs/docs/integrations/llms/azure_ml.ipynb | 2 +- .../llms/baidu_qianfan_endpoint.ipynb | 8 ++--- docs/docs/integrations/llms/ctranslate2.ipynb | 2 +- docs/docs/integrations/llms/deepinfra.ipynb | 7 +++-- docs/docs/integrations/llms/forefrontai.ipynb | 5 ++-- docs/docs/integrations/llms/gradient.ipynb | 2 +- docs/docs/integrations/llms/manifest.ipynb | 2 +- docs/docs/integrations/llms/mosaicml.ipynb | 2 +- docs/docs/integrations/llms/ollama.ipynb | 2 +- docs/docs/integrations/llms/predibase.ipynb | 2 +- .../llms/promptlayer_openai.ipynb | 2 +- .../integrations/llms/titan_takeoff.ipynb | 4 +-- .../docs/integrations/providers/chaindesk.mdx | 2 +- .../providers/clearml_tracking.ipynb | 2 +- docs/docs/integrations/providers/cnosdb.mdx | 2 +- .../docs/integrations/providers/databricks.md | 2 +- docs/docs/integrations/providers/doctran.mdx | 2 +- docs/docs/integrations/providers/helicone.mdx | 2 +- docs/docs/integrations/providers/hologres.mdx | 2 +- docs/docs/integrations/providers/log10.mdx | 2 +- .../integrations/providers/promptlayer.mdx | 2 +- docs/docs/integrations/providers/redis.mdx | 2 +- docs/docs/integrations/providers/sklearn.mdx | 2 +- docs/docs/integrations/providers/supabase.mdx | 2 +- docs/docs/integrations/providers/tigris.mdx | 2 +- docs/docs/integrations/providers/trulens.mdx | 2 +- .../docs/integrations/providers/typesense.mdx | 2 +- .../providers/vectara/vectara_chat.ipynb | 6 ++-- docs/docs/integrations/providers/weather.mdx | 2 +- docs/docs/integrations/providers/weaviate.mdx | 2 +- .../retrievers/azure_cognitive_search.ipynb | 2 +- .../retrievers/cohere-reranker.ipynb | 4 +-- .../retrievers/merger_retriever.ipynb | 2 +- .../retrievers/pinecone_hybrid_search.ipynb | 4 +-- .../retrievers/weaviate-hybrid.ipynb | 2 +- .../baidu_qianfan_endpoint.ipynb | 6 ++-- .../google_vertex_ai_palm.ipynb | 2 +- .../text_embedding/gradient.ipynb | 2 +- .../text_embedding/mosaicml.ipynb | 2 +- docs/docs/integrations/toolkits/clickup.ipynb | 2 +- docs/docs/integrations/toolkits/github.ipynb | 4 +-- docs/docs/integrations/toolkits/gitlab.ipynb | 4 +-- .../integrations/toolkits/sql_database.ipynb | 2 +- .../vectorstores/analyticdb.ipynb | 2 +- .../integrations/vectorstores/annoy.ipynb | 2 +- .../integrations/vectorstores/clarifai.ipynb | 2 +- .../integrations/vectorstores/hologres.ipynb | 2 +- .../integrations/vectorstores/llm_rails.ipynb | 2 +- .../integrations/vectorstores/marqo.ipynb | 2 +- .../integrations/vectorstores/myscale.ipynb | 4 +-- .../integrations/vectorstores/pgvector.ipynb | 2 +- .../integrations/vectorstores/qdrant.ipynb | 2 +- .../integrations/vectorstores/sklearn.ipynb | 2 +- .../integrations/vectorstores/supabase.ipynb | 4 +-- .../docs/integrations/vectorstores/tair.ipynb | 2 +- .../integrations/vectorstores/tigris.ipynb | 2 +- .../integrations/vectorstores/typesense.ipynb | 2 +- .../integrations/vectorstores/vectara.ipynb | 2 +- docs/docs/integrations/vectorstores/zep.ipynb | 2 +- .../how_to/sharedmemory_for_tools.ipynb | 12 ++++---- .../agents/tools/multi_input_tool.ipynb | 2 +- .../retrievers/MultiQueryRetriever.ipynb | 4 +-- .../self_query/myscale_self_query.ipynb | 2 +- .../retrievers/web_research.ipynb | 30 +++++++++---------- .../connecting_to_a_feature_store.ipynb | 4 +-- .../prompt_serialization.ipynb | 4 +-- .../prompt_templates/prompts_pipelining.ipynb | 2 +- docs/docs/modules/paul_graham_essay.txt | 6 ++-- docs/docs/modules/state_of_the_union.txt | 2 +- docs/docs/use_cases/apis.ipynb | 2 +- docs/docs/use_cases/chatbots.ipynb | 2 +- docs/docs/use_cases/data_generation.ipynb | 2 +- docs/docs/use_cases/extraction.ipynb | 2 +- .../graph/diffbot_graphtransformer.ipynb | 2 +- .../use_cases/graph/graph_arangodb_qa.ipynb | 2 +- docs/docs/use_cases/qa_structured/sql.ipynb | 8 ++--- .../code_understanding.ipynb | 4 +-- .../conversational_retrieval_agents.ipynb | 2 +- .../document-context-aware-QA.ipynb | 6 ++-- .../use_cases/question_answering/index.ipynb | 2 +- .../local_retrieval_qa.ipynb | 4 +-- .../multiple_retrieval.ipynb | 2 +- docs/docs/use_cases/summarization.ipynb | 2 +- docs/docs/use_cases/tagging.ipynb | 2 +- docs/docs/use_cases/web_scraping.ipynb | 8 ++--- .../agents/agent_types/structured_chat.mdx | 2 +- .../text_splitters/code_splitter.mdx | 8 ++--- .../data_connection/vectorstores/async.mdx | 2 +- 136 files changed, 238 insertions(+), 231 deletions(-) diff --git a/cookbook/elasticsearch_db_qa.ipynb b/cookbook/elasticsearch_db_qa.ipynb index 625abb6cdbe68..33c6455d79a1c 100644 --- a/cookbook/elasticsearch_db_qa.ipynb +++ b/cookbook/elasticsearch_db_qa.ipynb @@ -6,7 +6,7 @@ "source": [ "# Elasticsearch\n", "\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/qa_structured/integrations/elasticsearch.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/qa_structured/integrations/elasticsearch.ipynb)\n", "\n", "We can use LLMs to interact with Elasticsearch analytics databases in natural language.\n", "\n", diff --git a/cookbook/sales_agent_with_context.ipynb b/cookbook/sales_agent_with_context.ipynb index 709ad35ebcb84..409e91903191c 100644 --- a/cookbook/sales_agent_with_context.ipynb +++ b/cookbook/sales_agent_with_context.ipynb @@ -66,7 +66,7 @@ "metadata": {}, "outputs": [], "source": [ - "# install aditional dependencies\n", + "# install additional dependencies\n", "# ! pip install chromadb openai tiktoken" ] }, diff --git a/cookbook/smart_llm.ipynb b/cookbook/smart_llm.ipynb index 617a36307eed4..1ee0076896589 100644 --- a/cookbook/smart_llm.ipynb +++ b/cookbook/smart_llm.ipynb @@ -17,7 +17,7 @@ "\n", "Note that SmartLLMChains\n", "- use more LLM passes (ie n+2 instead of just 1)\n", - "- only work then the underlying LLM has the capability for reflection, whicher smaller models often don't\n", + "- only work then the underlying LLM has the capability for reflection, which smaller models often don't\n", "- only work with underlying models that return exactly 1 output, not multiple\n", "\n", "This notebook demonstrates how to use a SmartLLMChain." @@ -241,7 +241,7 @@ " ideation_llm=ChatOpenAI(temperature=0.9, model_name=\"gpt-4\"),\n", " llm=ChatOpenAI(\n", " temperature=0, model_name=\"gpt-4\"\n", - " ), # will be used for critqiue and resolution as no specific llms are given\n", + " ), # will be used for critique and resolution as no specific llms are given\n", " prompt=prompt,\n", " n_ideas=3,\n", " verbose=True,\n", diff --git a/docs/README.md b/docs/README.md index 6ffad61fe7c7c..3e8dfe2e5d653 100644 --- a/docs/README.md +++ b/docs/README.md @@ -42,7 +42,7 @@ If you are using GitHub pages for hosting, this command is a convenient way to b ### Continuous Integration -Some common defaults for linting/formatting have been set for you. If you integrate your project with an open source Continuous Integration system (e.g. Travis CI, CircleCI), you may check for issues using the following command. +Some common defaults for linting/formatting have been set for you. If you integrate your project with an open-source Continuous Integration system (e.g. Travis CI, CircleCI), you may check for issues using the following command. ``` $ yarn ci diff --git a/docs/docs/additional_resources/youtube.mdx b/docs/docs/additional_resources/youtube.mdx index 6b577bb52d750..94aafe476630f 100644 --- a/docs/docs/additional_resources/youtube.mdx +++ b/docs/docs/additional_resources/youtube.mdx @@ -91,7 +91,7 @@ - [Chat with a `CSV` | `LangChain Agents` Tutorial (Beginners)](https://youtu.be/tjeti5vXWOU) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao) - [Create Your Own ChatGPT with `PDF` Data in 5 Minutes (LangChain Tutorial)](https://youtu.be/au2WVVGUvc8) by [Liam Ottley](https://www.youtube.com/@LiamOttley) - [Build a Custom Chatbot with OpenAI: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU) by [Fabrikod](https://www.youtube.com/@fabrikod) -- [`Flowise` is an open source no-code UI visual tool to build 🦜🔗LangChain applications](https://youtu.be/CovAPtQPU0k) by [Cobus Greyling](https://www.youtube.com/@CobusGreylingZA) +- [`Flowise` is an open-source no-code UI visual tool to build 🦜🔗LangChain applications](https://youtu.be/CovAPtQPU0k) by [Cobus Greyling](https://www.youtube.com/@CobusGreylingZA) - [LangChain & GPT 4 For Data Analysis: The `Pandas` Dataframe Agent](https://youtu.be/rFQ5Kmkd4jc) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics) - [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw) by [Toolfinder AI](https://www.youtube.com/@toolfinderai) - [How to build with Langchain 10x easier | ⛓️ LangFlow & `Flowise`](https://youtu.be/Ya1oGL7ZTvU) by [AI Jason](https://www.youtube.com/@AIJasonZ) diff --git a/docs/docs/expression_language/how_to/map.ipynb b/docs/docs/expression_language/how_to/map.ipynb index 428a1fa6acf3e..4848d8ba1af77 100644 --- a/docs/docs/expression_language/how_to/map.ipynb +++ b/docs/docs/expression_language/how_to/map.ipynb @@ -101,7 +101,7 @@ "source": [ "Here the input to prompt is expected to be a map with keys \"context\" and \"question\". The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the \"question\" key.\n", "\n", - "Note that when composing a RunnableMap when another Runnable we don't even need to wrap our dictuionary in the RunnableMap class — the type conversion is handled for us." + "Note that when composing a RunnableMap when another Runnable we don't even need to wrap our dictionary in the RunnableMap class — the type conversion is handled for us." ] }, { diff --git a/docs/docs/guides/evaluation/comparison/custom.ipynb b/docs/docs/guides/evaluation/comparison/custom.ipynb index c0c67ddb12064..f046cc88a2d61 100644 --- a/docs/docs/guides/evaluation/comparison/custom.ipynb +++ b/docs/docs/guides/evaluation/comparison/custom.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "source": [ "# Custom Pairwise Evaluator\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/comparison/custom.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/comparison/custom.ipynb)\n", "\n", "You can make your own pairwise string evaluators by inheriting from `PairwiseStringEvaluator` class and overwriting the `_evaluate_string_pairs` method (and the `_aevaluate_string_pairs` method if you want to use the evaluator asynchronously).\n", "\n", @@ -28,7 +28,7 @@ "from langchain.evaluation import PairwiseStringEvaluator\n", "\n", "\n", - "class LengthComparisonPairwiseEvalutor(PairwiseStringEvaluator):\n", + "class LengthComparisonPairwiseEvaluator(PairwiseStringEvaluator):\n", " \"\"\"\n", " Custom evaluator to compare two strings.\n", " \"\"\"\n", @@ -66,7 +66,7 @@ } ], "source": [ - "evaluator = LengthComparisonPairwiseEvalutor()\n", + "evaluator = LengthComparisonPairwiseEvaluator()\n", "\n", "evaluator.evaluate_string_pairs(\n", " prediction=\"The quick brown fox jumped over the lazy dog.\",\n", diff --git a/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb b/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb index ace4f8f9747c4..5202bf0072e50 100644 --- a/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb +++ b/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb @@ -8,7 +8,7 @@ }, "source": [ "# Pairwise Embedding Distance \n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb)\n", "\n", "One way to measure the similarity (or dissimilarity) between two predictions on a shared or similar input is to embed the predictions and compute a vector distance between the two embeddings.[[1]](#cite_note-1)\n", "\n", @@ -86,7 +86,7 @@ "source": [ "## Select the Distance Metric\n", "\n", - "By default, the evalutor uses cosine distance. You can choose a different distance metric if you'd like. " + "By default, the evaluator uses cosine distance. You can choose a different distance metric if you'd like. " ] }, { @@ -230,4 +230,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb b/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb index 748f3ce9f7f58..074ef3b17b228 100644 --- a/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb +++ b/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb @@ -6,13 +6,13 @@ "metadata": {}, "source": [ "# Pairwise String Comparison\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb)\n", "\n", "Often you will want to compare predictions of an LLM, Chain, or Agent for a given input. The `StringComparison` evaluators facilitate this so you can answer questions like:\n", "\n", "- Which LLM or prompt produces a preferred output for a given question?\n", "- Which examples should I include for few-shot example selection?\n", - "- Which output is better to include for fintetuning?\n", + "- Which output is better to include for fine-tuning?\n", "\n", "The simplest and often most reliable automated way to choose a preferred prediction for a given input is to use the `pairwise_string` evaluator.\n", "\n", @@ -379,4 +379,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/docs/guides/evaluation/examples/comparisons.ipynb b/docs/docs/guides/evaluation/examples/comparisons.ipynb index 7bf56ef26ed30..27ac1eeb986e5 100644 --- a/docs/docs/guides/evaluation/examples/comparisons.ipynb +++ b/docs/docs/guides/evaluation/examples/comparisons.ipynb @@ -5,7 +5,7 @@ "metadata": {}, "source": [ "# Comparing Chain Outputs\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/examples/comparisons.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/examples/comparisons.ipynb)\n", "\n", "Suppose you have two different prompts (or LLMs). How do you know which will generate \"better\" results?\n", "\n", @@ -16,7 +16,7 @@ "2. A dataset of inputs\n", "3. 2 (or more) LLMs, Chains, or Agents to compare\n", "\n", - "Then we will aggregate the restults to determine the preferred model.\n", + "Then we will aggregate the results to determine the preferred model.\n", "\n", "### Step 1. Create the Evaluator\n", "\n", @@ -445,4 +445,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb b/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb index 112b9b643714f..bfbbd33452c7e 100644 --- a/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb +++ b/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "source": [ "# Criteria Evaluation\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb)\n", "\n", "In scenarios where you wish to assess a model's output using a specific rubric or criteria set, the `criteria` evaluator proves to be a handy tool. It allows you to verify if an LLM or Chain's output complies with a defined set of criteria.\n", "\n", @@ -73,7 +73,7 @@ "- prediction (str) – The predicted response.\n", "\n", "The criteria evaluators return a dictionary with the following values:\n", - "- score: Binary integeer 0 to 1, where 1 would mean that the output is compliant with the criteria, and 0 otherwise\n", + "- score: Binary integer 0 to 1, where 1 would mean that the output is compliant with the criteria, and 0 otherwise\n", "- value: A \"Y\" or \"N\" corresponding to the score\n", "- reasoning: String \"chain of thought reasoning\" from the LLM generated prior to creating the score" ] diff --git a/docs/docs/guides/evaluation/string/custom.ipynb b/docs/docs/guides/evaluation/string/custom.ipynb index 50e1b938dddf1..72b103bf96288 100644 --- a/docs/docs/guides/evaluation/string/custom.ipynb +++ b/docs/docs/guides/evaluation/string/custom.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "source": [ "# Custom String Evaluator\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/custom.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/custom.ipynb)\n", "\n", "You can make your own custom string evaluators by inheriting from the `StringEvaluator` class and implementing the `_evaluate_strings` (and `_aevaluate_strings` for async support) methods.\n", "\n", diff --git a/docs/docs/guides/evaluation/string/embedding_distance.ipynb b/docs/docs/guides/evaluation/string/embedding_distance.ipynb index 490487437f9fa..9ab5d1ebcede9 100644 --- a/docs/docs/guides/evaluation/string/embedding_distance.ipynb +++ b/docs/docs/guides/evaluation/string/embedding_distance.ipynb @@ -7,7 +7,7 @@ }, "source": [ "# Embedding Distance\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/embedding_distance.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/embedding_distance.ipynb)\n", "\n", "To measure semantic similarity (or dissimilarity) between a prediction and a reference label string, you could use a vector vector distance metric the two embedded representations using the `embedding_distance` evaluator.[[1]](#cite_note-1)\n", "\n", @@ -80,7 +80,7 @@ "source": [ "## Select the Distance Metric\n", "\n", - "By default, the evalutor uses cosine distance. You can choose a different distance metric if you'd like. " + "By default, the evaluator uses cosine distance. You can choose a different distance metric if you'd like. " ] }, { @@ -221,4 +221,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/docs/guides/evaluation/string/exact_match.ipynb b/docs/docs/guides/evaluation/string/exact_match.ipynb index 8a48d381d9075..13707e0bf4283 100644 --- a/docs/docs/guides/evaluation/string/exact_match.ipynb +++ b/docs/docs/guides/evaluation/string/exact_match.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "source": [ "# Exact Match\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/exact_match.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/exact_match.ipynb)\n", "\n", "Probably the simplest ways to evaluate an LLM or runnable's string output against a reference label is by a simple string equivalence.\n", "\n", diff --git a/docs/docs/guides/evaluation/string/regex_match.ipynb b/docs/docs/guides/evaluation/string/regex_match.ipynb index c47b0fd8661d4..eba05ad8e7edb 100644 --- a/docs/docs/guides/evaluation/string/regex_match.ipynb +++ b/docs/docs/guides/evaluation/string/regex_match.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "source": [ "# Regex Match\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/regex_match.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/regex_match.ipynb)\n", "\n", "To evaluate chain or runnable string predictions against a custom regex, you can use the `regex_match` evaluator." ] diff --git a/docs/docs/guides/evaluation/string/string_distance.ipynb b/docs/docs/guides/evaluation/string/string_distance.ipynb index aaf3f0b9b6224..a60dadee112a2 100644 --- a/docs/docs/guides/evaluation/string/string_distance.ipynb +++ b/docs/docs/guides/evaluation/string/string_distance.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "source": [ "# String Distance\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/string_distance.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/string_distance.ipynb)\n", "\n", "One of the simplest ways to compare an LLM or chain's string output against a reference label is by using string distance measurements such as Levenshtein or postfix distance. This can be used alongside approximate/fuzzy matching criteria for very basic unit testing.\n", "\n", diff --git a/docs/docs/guides/evaluation/trajectory/custom.ipynb b/docs/docs/guides/evaluation/trajectory/custom.ipynb index fc03d6bc11857..0e18a4769d7dd 100644 --- a/docs/docs/guides/evaluation/trajectory/custom.ipynb +++ b/docs/docs/guides/evaluation/trajectory/custom.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "source": [ "# Custom Trajectory Evaluator\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/trajectory/custom.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/trajectory/custom.ipynb)\n", "\n", "You can make your own custom trajectory evaluators by inheriting from the [AgentTrajectoryEvaluator](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.schema.AgentTrajectoryEvaluator.html#langchain.evaluation.schema.AgentTrajectoryEvaluator) class and overwriting the `_evaluate_agent_trajectory` (and `_aevaluate_agent_action`) method.\n", "\n", diff --git a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb index 51db91a09cf9a..cba5cebb905a5 100644 --- a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb +++ b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb @@ -8,7 +8,7 @@ }, "source": [ "# Agent Trajectory\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb)\n", "\n", "Agents can be difficult to holistically evaluate due to the breadth of actions and generation they can make. We recommend using multiple evaluation techniques appropriate to your use case. One way to evaluate an agent is to look at the whole trajectory of actions taken along with their responses.\n", "\n", diff --git a/docs/docs/guides/langsmith/walkthrough.ipynb b/docs/docs/guides/langsmith/walkthrough.ipynb index 37862c7bd487c..a03babd767049 100644 --- a/docs/docs/guides/langsmith/walkthrough.ipynb +++ b/docs/docs/guides/langsmith/walkthrough.ipynb @@ -8,7 +8,7 @@ }, "source": [ "# LangSmith Walkthrough\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/langsmith/walkthrough.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/langsmith/walkthrough.ipynb)\n", "\n", "LangChain makes it easy to prototype LLM applications and Agents. However, delivering LLM applications to production can be deceptively difficult. You will likely have to heavily customize and iterate on your prompts, chains, and other components to create a high-quality product.\n", "\n", @@ -402,7 +402,7 @@ " # You can select a default one such as \"helpfulness\" or provide your own.\n", " RunEvalConfig.LabeledCriteria(\"helpfulness\"),\n", " # The LabeledScoreString evaluator outputs a score on a scale from 1-10.\n", - " # You can use defalut criteria or write our own rubric\n", + " # You can use default criteria or write our own rubric\n", " RunEvalConfig.LabeledScoreString(\n", " {\n", " \"accuracy\": \"\"\"\n", @@ -433,7 +433,7 @@ "Use the [run_on_dataset](https://api.python.langchain.com/en/latest/smith/langchain.smith.evaluation.runner_utils.run_on_dataset.html#langchain.smith.evaluation.runner_utils.run_on_dataset) (or asynchronous [arun_on_dataset](https://api.python.langchain.com/en/latest/smith/langchain.smith.evaluation.runner_utils.arun_on_dataset.html#langchain.smith.evaluation.runner_utils.arun_on_dataset)) function to evaluate your model. This will:\n", "1. Fetch example rows from the specified dataset.\n", "2. Run your agent (or any custom function) on each example.\n", - "3. Apply evalutors to the resulting run traces and corresponding reference examples to generate automated feedback.\n", + "3. Apply evaluators to the resulting run traces and corresponding reference examples to generate automated feedback.\n", "\n", "The results will be visible in the LangSmith app." ] @@ -756,7 +756,7 @@ "source": [ "## Conclusion\n", "\n", - "Congratulations! You have succesfully traced and evaluated an agent using LangSmith!\n", + "Congratulations! You have successfully traced and evaluated an agent using LangSmith!\n", "\n", "This was a quick guide to get started, but there are many more ways to use LangSmith to speed up your developer flow and produce better results.\n", "\n", diff --git a/docs/docs/guides/local_llms.ipynb b/docs/docs/guides/local_llms.ipynb index 9e35e5ae869e2..baba1b0e5025b 100644 --- a/docs/docs/guides/local_llms.ipynb +++ b/docs/docs/guides/local_llms.ipynb @@ -20,14 +20,14 @@ "\n", "Running an LLM locally requires a few things:\n", "\n", - "1. `Open source LLM`: An open source LLM that can be freely modified and shared \n", + "1. `Open-source LLM`: An open-source LLM that can be freely modified and shared \n", "2. `Inference`: Ability to run this LLM on your device w/ acceptable latency\n", "\n", - "### Open Source LLMs\n", + "### Open-source LLMs\n", "\n", - "Users can now gain access to a rapidly growing set of [open source LLMs](https://cameronrwolfe.substack.com/p/the-history-of-open-source-llms-better). \n", + "Users can now gain access to a rapidly growing set of [open-source LLMs](https://cameronrwolfe.substack.com/p/the-history-of-open-source-llms-better). \n", "\n", - "These LLMs can be assessed across at least two dimentions (see figure):\n", + "These LLMs can be assessed across at least two dimensions (see figure):\n", " \n", "1. `Base model`: What is the base-model and how was it trained?\n", "2. `Fine-tuning approach`: Was the base-model fine-tuned and, if so, what [set of instructions](https://cameronrwolfe.substack.com/p/beyond-llama-the-power-of-open-llms#%C2%A7alpaca-an-instruction-following-llama-model) was used?\n", @@ -42,7 +42,7 @@ "\n", "### Inference\n", "\n", - "A few frameworks for this have emerged to support inference of open source LLMs on various devices:\n", + "A few frameworks for this have emerged to support inference of open-source LLMs on various devices:\n", "\n", "1. [`llama.cpp`](https://github.com/ggerganov/llama.cpp): C++ implementation of llama inference code with [weight optimization / quantization](https://finbarr.ca/how-is-llama-cpp-possible/)\n", "2. [`gpt4all`](https://docs.gpt4all.io/index.html): Optimized C backend for inference\n", @@ -164,7 +164,7 @@ "\n", "See the [`llama.cpp`](docs/integrations/llms/llamacpp) setup [here](https://github.com/abetlen/llama-cpp-python/blob/main/docs/install/macos.md) to enable this.\n", "\n", - "In particular, ensure that conda is using the correct virtual enviorment that you created (`miniforge3`).\n", + "In particular, ensure that conda is using the correct virtual environment that you created (`miniforge3`).\n", "\n", "E.g., for me:\n", "\n", @@ -574,7 +574,7 @@ "* `Privacy`: private data (e.g., journals, etc) that a user does not want to share \n", "* `Cost`: text preprocessing (extraction/tagging), summarization, and agent simulations are token-use-intensive tasks\n", "\n", - "In addition, [here](https://blog.langchain.dev/using-langsmith-to-support-fine-tuning-of-open-source-llms/) is an overview on fine-tuning, which can utilize open source LLMs." + "In addition, [here](https://blog.langchain.dev/using-langsmith-to-support-fine-tuning-of-open-source-llms/) is an overview on fine-tuning, which can utilize open-source LLMs." ] } ], diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb index b2f5750ab1856..2310892a11a1f 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb @@ -6,7 +6,7 @@ "source": [ "# Data anonymization with Microsoft Presidio\n", "\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb)\n", "\n", "## Use case\n", "\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/multi_language.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/multi_language.ipynb index c7e8d6c87058e..b8f0a30954f26 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/multi_language.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/multi_language.ipynb @@ -14,9 +14,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Mutli-language data anonymization with Microsoft Presidio\n", + "# Multi-language data anonymization with Microsoft Presidio\n", "\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/privacy/presidio_data_anonymization/multi_language.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/privacy/presidio_data_anonymization/multi_language.ipynb)\n", "\n", "\n", "## Use case\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb index a94b0eabaf7bf..41ba4036088f3 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb @@ -16,7 +16,7 @@ "source": [ "# Reversible data anonymization with Microsoft Presidio\n", "\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb)\n", "\n", "\n", "## Use case\n", diff --git a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb index 25eb4f2505df4..9af0bb44be80f 100644 --- a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb +++ b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb @@ -95,7 +95,8 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain.chains import LLMChain\n", "from langchain.llms.fake import FakeListLLM\n", "from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ModerationPiiError\n", "\n", @@ -399,7 +400,8 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain.chains import LLMChain\n", "from langchain.llms.fake import FakeListLLM\n", "\n", "template = \"\"\"Question: {question}\n", @@ -565,7 +567,8 @@ "outputs": [], "source": [ "from langchain.llms import HuggingFaceHub\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain.chains import LLMChain\n", "\n", "template = \"\"\"Question: {question}\n", "\n", @@ -659,7 +662,7 @@ "---\n", "## With Amazon SageMaker Jumpstart\n", "\n", - "The exmaple below shows how to use Amazon Comprehend Moderation chain with an Amazon SageMaker Jumpstart hosted LLM. You should have an Amazon SageMaker Jumpstart hosted LLM endpoint within your AWS Account. " + "The example below shows how to use Amazon Comprehend Moderation chain with an Amazon SageMaker Jumpstart hosted LLM. You should have an Amazon SageMaker Jumpstart hosted LLM endpoint within your AWS Account. " ] }, { diff --git a/docs/docs/integrations/callbacks/promptlayer.ipynb b/docs/docs/integrations/callbacks/promptlayer.ipynb index 2f3b5b9f81b37..6d6d6a9aad8b5 100644 --- a/docs/docs/integrations/callbacks/promptlayer.ipynb +++ b/docs/docs/integrations/callbacks/promptlayer.ipynb @@ -130,7 +130,7 @@ "\n", "In this example we unlock more of the power of PromptLayer.\n", "\n", - "PromptLayer allows you to visually create, version, and track prompt templates. Using the [Prompt Registry](https://docs.promptlayer.com/features/prompt-registry), we can programatically fetch the prompt template called `example`.\n", + "PromptLayer allows you to visually create, version, and track prompt templates. Using the [Prompt Registry](https://docs.promptlayer.com/features/prompt-registry), we can programmatically fetch the prompt template called `example`.\n", "\n", "We also define a `pl_id_callback` function which takes in the `promptlayer_request_id` and logs a score, metadata and links the prompt template used. Read more about tracking on [our docs](https://docs.promptlayer.com/features/prompt-history/request-id)." ] diff --git a/docs/docs/integrations/chat/azure_chat_openai.ipynb b/docs/docs/integrations/chat/azure_chat_openai.ipynb index d176996999a30..a8ebf79595881 100644 --- a/docs/docs/integrations/chat/azure_chat_openai.ipynb +++ b/docs/docs/integrations/chat/azure_chat_openai.ipynb @@ -81,7 +81,7 @@ "metadata": {}, "source": [ "## Model Version\n", - "Azure OpenAI responses contain `model` property, which is name of the model used to generate the response. However unlike native OpenAI responses, it does not contain the version of the model, which is set on the deplyoment in Azure. This makes it tricky to know which version of the model was used to generate the response, which as result can lead to e.g. wrong total cost calculation with `OpenAICallbackHandler`.\n", + "Azure OpenAI responses contain `model` property, which is name of the model used to generate the response. However unlike native OpenAI responses, it does not contain the version of the model, which is set on the deployment in Azure. This makes it tricky to know which version of the model was used to generate the response, which as result can lead to e.g. wrong total cost calculation with `OpenAICallbackHandler`.\n", "\n", "To solve this problem, you can pass `model_version` parameter to `AzureChatOpenAI` class, which will be added to the model name in the llm output. This way you can easily distinguish between different versions of the model." ] diff --git a/docs/docs/integrations/chat/baidu_qianfan_endpoint.ipynb b/docs/docs/integrations/chat/baidu_qianfan_endpoint.ipynb index 63d1a486ab469..d25a05c7aab9c 100644 --- a/docs/docs/integrations/chat/baidu_qianfan_endpoint.ipynb +++ b/docs/docs/integrations/chat/baidu_qianfan_endpoint.ipynb @@ -7,7 +7,7 @@ "source": [ "# Baidu Qianfan\n", "\n", - "Baidu AI Cloud Qianfan Platform is a one-stop large model development and service operation platform for enterprise developers. Qianfan not only provides including the model of Wenxin Yiyan (ERNIE-Bot) and the third-party open source models, but also provides various AI development tools and the whole set of development environment, which facilitates customers to use and develop large model applications easily.\n", + "Baidu AI Cloud Qianfan Platform is a one-stop large model development and service operation platform for enterprise developers. Qianfan not only provides including the model of Wenxin Yiyan (ERNIE-Bot) and the third-party open-source models, but also provides various AI development tools and the whole set of development environment, which facilitates customers to use and develop large model applications easily.\n", "\n", "Basically, those model are split into the following type:\n", "\n", @@ -144,10 +144,10 @@ "source": [ "## Use different models in Qianfan\n", "\n", - "In the case you want to deploy your own model based on Ernie Bot or third-party open sources model, you could follow these steps:\n", + "In the case you want to deploy your own model based on Ernie Bot or third-party open-source model, you could follow these steps:\n", "\n", "- 1. (Optional, if the model are included in the default models, skip it)Deploy your model in Qianfan Console, get your own customized deploy endpoint.\n", - "- 2. Set up the field called `endpoint` in the initlization:" + "- 2. Set up the field called `endpoint` in the initialization:" ] }, { diff --git a/docs/docs/integrations/chat/promptlayer_chatopenai.ipynb b/docs/docs/integrations/chat/promptlayer_chatopenai.ipynb index d75c3a0a3e46f..df311407efc20 100644 --- a/docs/docs/integrations/chat/promptlayer_chatopenai.ipynb +++ b/docs/docs/integrations/chat/promptlayer_chatopenai.ipynb @@ -131,7 +131,7 @@ "metadata": {}, "source": [ "## Using PromptLayer Track\n", - "If you would like to use any of the [PromptLayer tracking features](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9), you need to pass the argument `return_pl_id` when instantializing the PromptLayer LLM to get the request id. " + "If you would like to use any of the [PromptLayer tracking features](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9), you need to pass the argument `return_pl_id` when instantiating the PromptLayer LLM to get the request id. " ] }, { diff --git a/docs/docs/integrations/chat_loaders/discord.ipynb b/docs/docs/integrations/chat_loaders/discord.ipynb index f8748706c6695..59ed0d6bb211b 100644 --- a/docs/docs/integrations/chat_loaders/discord.ipynb +++ b/docs/docs/integrations/chat_loaders/discord.ipynb @@ -15,7 +15,7 @@ "3. Initialize the `DiscordChatLoader` with the file path pointed to the text file.\n", "4. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion.\n", "\n", - "## 1. Creat message dump\n", + "## 1. Create message dump\n", "\n", "Currently (2023/08/23) this loader only supports .txt files in the format generated by copying messages in the app to your clipboard and pasting in a file. Below is an example." ] @@ -266,7 +266,7 @@ "source": [ "### Next Steps\n", "\n", - "You can then use these messages how you see fit, such as finetuning a model, few-shot example selection, or directly make predictions for the next message " + "You can then use these messages how you see fit, such as fine-tuning a model, few-shot example selection, or directly make predictions for the next message " ] }, { diff --git a/docs/docs/integrations/chat_loaders/facebook.ipynb b/docs/docs/integrations/chat_loaders/facebook.ipynb index ab0a41ebe073f..053b31fc07a6d 100644 --- a/docs/docs/integrations/chat_loaders/facebook.ipynb +++ b/docs/docs/integrations/chat_loaders/facebook.ipynb @@ -7,7 +7,7 @@ "source": [ "# Facebook Messenger\n", "\n", - "This notebook shows how to load data from Facebook in a format you can finetune on. The overall steps are:\n", + "This notebook shows how to load data from Facebook in a format you can fine-tune on. The overall steps are:\n", "\n", "1. Download your messenger data to disk.\n", "2. Create the Chat Loader and call `loader.load()` (or `loader.lazy_load()`) to perform the conversion.\n", diff --git a/docs/docs/integrations/chat_loaders/gmail.ipynb b/docs/docs/integrations/chat_loaders/gmail.ipynb index 30e86d67b9c6e..bf7651c587b4f 100644 --- a/docs/docs/integrations/chat_loaders/gmail.ipynb +++ b/docs/docs/integrations/chat_loaders/gmail.ipynb @@ -7,7 +7,7 @@ "source": [ "# GMail\n", "\n", - "This loader goes over how to load data from GMail. There are many ways you could want to load data from GMail. This loader is currently fairly opionated in how to do so. The way it does it is it first looks for all messages that you have sent. It then looks for messages where you are responding to a previous email. It then fetches that previous email, and creates a training example of that email, followed by your email.\n", + "This loader goes over how to load data from GMail. There are many ways you could want to load data from GMail. This loader is currently fairly opinionated in how to do so. The way it does it is it first looks for all messages that you have sent. It then looks for messages where you are responding to a previous email. It then fetches that previous email, and creates a training example of that email, followed by your email.\n", "\n", "Note that there are clear limitations here. For example, all examples created are only looking at the previous email for context.\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/imessage.ipynb b/docs/docs/integrations/chat_loaders/imessage.ipynb index 58f502e53b070..ead7e28278871 100644 --- a/docs/docs/integrations/chat_loaders/imessage.ipynb +++ b/docs/docs/integrations/chat_loaders/imessage.ipynb @@ -17,7 +17,7 @@ "\n", "## 1. Access Chat DB\n", "\n", - "It's likely that your terminal is denied access to `~/Library/Messages`. To use this class, you can copy the DB to an accessible directory (e.g., Documents) and load from there. Alternatively (and not recommended), you can grant full disk access for your terminal emulator in System Settings > Securityand Privacy > Full Disk Access.\n", + "It's likely that your terminal is denied access to `~/Library/Messages`. To use this class, you can copy the DB to an accessible directory (e.g., Documents) and load from there. Alternatively (and not recommended), you can grant full disk access for your terminal emulator in System Settings > Security and Privacy > Full Disk Access.\n", "\n", "We have created an example database you can use at [this linked drive file](https://drive.google.com/file/d/1NebNKqTA2NXApCmeH6mu0unJD2tANZzo/view?usp=sharing)." ] diff --git a/docs/docs/integrations/chat_loaders/slack.ipynb b/docs/docs/integrations/chat_loaders/slack.ipynb index f63cd7cdfa418..f15a015c871f7 100644 --- a/docs/docs/integrations/chat_loaders/slack.ipynb +++ b/docs/docs/integrations/chat_loaders/slack.ipynb @@ -14,9 +14,9 @@ "2. Create the `SlackChatLoader` with the file path pointed to the json file or directory of JSON files\n", "3. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion. Optionally use `merge_chat_runs` to combine message from the same sender in sequence, and/or `map_ai_messages` to convert messages from the specified sender to the \"AIMessage\" class.\n", "\n", - "## 1. Creat message dump\n", + "## 1. Create message dump\n", "\n", - "Currently (2023/08/23) this loader best supports a zip directory of files in the format generated by exporting your a direct message converstion from Slack. Follow up-to-date instructions from slack on how to do so.\n", + "Currently (2023/08/23) this loader best supports a zip directory of files in the format generated by exporting your a direct message conversation from Slack. Follow up-to-date instructions from slack on how to do so.\n", "\n", "We have an example in the LangChain repo." ] @@ -106,7 +106,7 @@ "source": [ "### Next Steps\n", "\n", - "You can then use these messages how you see fit, such as finetuning a model, few-shot example selection, or directly make predictions for the next message. " + "You can then use these messages how you see fit, such as fine-tuning a model, few-shot example selection, or directly make predictions for the next message. " ] }, { diff --git a/docs/docs/integrations/chat_loaders/telegram.ipynb b/docs/docs/integrations/chat_loaders/telegram.ipynb index 156472691c8f4..a26194d97a81b 100644 --- a/docs/docs/integrations/chat_loaders/telegram.ipynb +++ b/docs/docs/integrations/chat_loaders/telegram.ipynb @@ -5,7 +5,7 @@ "id": "735455a6-f82e-4252-b545-27385ef883f4", "metadata": {}, "source": [ - "# Telegram\n", + " Telegram\n", "\n", "This notebook shows how to use the Telegram chat loader. This class helps map exported Telegram conversations to LangChain chat messages.\n", "\n", @@ -14,7 +14,7 @@ "2. Create the `TelegramChatLoader` with the file path pointed to the json file or directory of JSON files\n", "3. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion. Optionally use `merge_chat_runs` to combine message from the same sender in sequence, and/or `map_ai_messages` to convert messages from the specified sender to the \"AIMessage\" class.\n", "\n", - "## 1. Creat message dump\n", + "## 1. Create message dump\n", "\n", "Currently (2023/08/23) this loader best supports json files in the format generated by exporting your chat history from the [Telegram Desktop App](https://desktop.telegram.org/).\n", "\n", @@ -155,7 +155,7 @@ "source": [ "### Next Steps\n", "\n", - "You can then use these messages how you see fit, such as finetuning a model, few-shot example selection, or directly make predictions for the next message " + "You can then use these messages how you see fit, such as fine-tuning a model, few-shot example selection, or directly make predictions for the next message " ] }, { diff --git a/docs/docs/integrations/chat_loaders/twitter.ipynb b/docs/docs/integrations/chat_loaders/twitter.ipynb index 61dc650b853c5..110917512027f 100644 --- a/docs/docs/integrations/chat_loaders/twitter.ipynb +++ b/docs/docs/integrations/chat_loaders/twitter.ipynb @@ -7,7 +7,7 @@ "source": [ "# Twitter (via Apify)\n", "\n", - "This notebook shows how to load chat messages from Twitter to finetune on. We do this by utilizing Apify. \n", + "This notebook shows how to load chat messages from Twitter to fine-tune on. We do this by utilizing Apify. \n", "\n", "First, use Apify to export tweets. An example" ] diff --git a/docs/docs/integrations/chat_loaders/wechat.ipynb b/docs/docs/integrations/chat_loaders/wechat.ipynb index 7eb86e8169013..8f6e7f2d596ce 100644 --- a/docs/docs/integrations/chat_loaders/wechat.ipynb +++ b/docs/docs/integrations/chat_loaders/wechat.ipynb @@ -7,7 +7,7 @@ "source": [ "# WeChat\n", "\n", - "There is not yet a straightforward way to export personal WeChat messages. However if you just need no more than few hundrudes of messages for model fine-tuning or few-shot examples, this notebook shows how to create your own chat loader that works on copy-pasted WeChat messages to a list of LangChain messages.\n", + "There is not yet a straightforward way to export personal WeChat messages. However if you just need no more than few hundreds of messages for model fine-tuning or few-shot examples, this notebook shows how to create your own chat loader that works on copy-pasted WeChat messages to a list of LangChain messages.\n", "\n", "> Highly inspired by https://python.langchain.com/docs/integrations/chat_loaders/discord\n", "\n", @@ -19,7 +19,7 @@ "4. Initialize the `WeChatChatLoader` with the file path pointed to the text file.\n", "5. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion.\n", "\n", - "## 1. Creat message dump\n", + "## 1. Create message dump\n", "\n", "This loader only supports .txt files in the format generated by copying messages in the app to your clipboard and pasting in a file. Below is an example." ] @@ -249,7 +249,7 @@ "source": [ "### Next Steps\n", "\n", - "You can then use these messages how you see fit, such as finetuning a model, few-shot example selection, or directly make predictions for the next message " + "You can then use these messages how you see fit, such as fine-tuning a model, few-shot example selection, or directly make predictions for the next message " ] }, { diff --git a/docs/docs/integrations/chat_loaders/whatsapp.ipynb b/docs/docs/integrations/chat_loaders/whatsapp.ipynb index a0815584359d7..434a86e5186d7 100644 --- a/docs/docs/integrations/chat_loaders/whatsapp.ipynb +++ b/docs/docs/integrations/chat_loaders/whatsapp.ipynb @@ -14,7 +14,7 @@ "2. Create the `WhatsAppChatLoader` with the file path pointed to the json file or directory of JSON files\n", "3. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion.\n", "\n", - "## 1. Creat message dump\n", + "## 1. Create message dump\n", "\n", "To make the export of your WhatsApp conversation(s), complete the following steps:\n", "\n", @@ -22,7 +22,7 @@ "2. Click the three dots in the top right corner and select \"More\".\n", "3. Then select \"Export chat\" and choose \"Without media\".\n", "\n", - "An example of the data format for each converation is below: " + "An example of the data format for each conversation is below: " ] }, { @@ -64,7 +64,7 @@ "\n", "The WhatsAppChatLoader accepts the resulting zip file, unzipped directory, or the path to any of the chat `.txt` files therein.\n", "\n", - "Provide that as well as the user name you want to take on the role of \"AI\" when finetuning." + "Provide that as well as the user name you want to take on the role of \"AI\" when fine-tuning." ] }, { @@ -145,7 +145,7 @@ "source": [ "### Next Steps\n", "\n", - "You can then use these messages how you see fit, such as finetuning a model, few-shot example selection, or directly make predictions for the next message." + "You can then use these messages how you see fit, such as fine-tuning a model, few-shot example selection, or directly make predictions for the next message." ] }, { diff --git a/docs/docs/integrations/document_loaders/apify_dataset.ipynb b/docs/docs/integrations/document_loaders/apify_dataset.ipynb index 33709a417b5df..ebf65f9d16ca0 100644 --- a/docs/docs/integrations/document_loaders/apify_dataset.ipynb +++ b/docs/docs/integrations/document_loaders/apify_dataset.ipynb @@ -6,7 +6,7 @@ "source": [ "# Apify Dataset\n", "\n", - ">[Apify Dataset](https://docs.apify.com/platform/storage/dataset) is a scaleable append-only storage with sequential access built for storing structured web scraping results, such as a list of products or Google SERPs, and then export them to various formats like JSON, CSV, or Excel. Datasets are mainly used to save results of [Apify Actors](https://apify.com/store)—serverless cloud programs for varius web scraping, crawling, and data extraction use cases.\n", + ">[Apify Dataset](https://docs.apify.com/platform/storage/dataset) is a scalable append-only storage with sequential access built for storing structured web scraping results, such as a list of products or Google SERPs, and then export them to various formats like JSON, CSV, or Excel. Datasets are mainly used to save results of [Apify Actors](https://apify.com/store)—serverless cloud programs for various web scraping, crawling, and data extraction use cases.\n", "\n", "This notebook shows how to load Apify datasets to LangChain.\n", "\n", diff --git a/docs/docs/integrations/document_loaders/dropbox.ipynb b/docs/docs/integrations/document_loaders/dropbox.ipynb index 43ec915b197c7..7533caaa1e8fd 100644 --- a/docs/docs/integrations/document_loaders/dropbox.ipynb +++ b/docs/docs/integrations/document_loaders/dropbox.ipynb @@ -6,7 +6,7 @@ "source": [ "# Dropbox\n", "\n", - "[Drobpox](https://en.wikipedia.org/wiki/Dropbox) is a file hosting service that brings everything-traditional files, cloud content, and web shortcuts together in one place.\n", + "[Dropbox](https://en.wikipedia.org/wiki/Dropbox) is a file hosting service that brings everything-traditional files, cloud content, and web shortcuts together in one place.\n", "\n", "This notebook covers how to load documents from *Dropbox*. In addition to common files such as text and PDF files, it also supports *Dropbox Paper* files.\n", "\n", @@ -17,7 +17,7 @@ "3. Generate access token: https://www.dropbox.com/developers/apps/create.\n", "4. `pip install dropbox` (requires `pip install unstructured` for PDF filetype).\n", "\n", - "## Intructions\n", + "## Instructions\n", "\n", "`DropboxLoader`` requires you to create a Dropbox App and generate an access token. This can be done from https://www.dropbox.com/developers/apps/create. You also need to have the Dropbox Python SDK installed (pip install dropbox).\n", "\n", diff --git a/docs/docs/integrations/document_loaders/etherscan.ipynb b/docs/docs/integrations/document_loaders/etherscan.ipynb index 5ccffce0aabdc..9c165ccb9c7f0 100644 --- a/docs/docs/integrations/document_loaders/etherscan.ipynb +++ b/docs/docs/integrations/document_loaders/etherscan.ipynb @@ -13,11 +13,11 @@ "\n", "## Overview\n", "\n", - "The `Etherscan` loader use `Etherscan API` to load transacactions histories under specific account on `Ethereum Mainnet`.\n", + "The `Etherscan` loader use `Etherscan API` to load transactions histories under specific account on `Ethereum Mainnet`.\n", "\n", "You will need a `Etherscan api key` to proceed. The free api key has 5 calls per seconds quota.\n", "\n", - "The loader supports the following six functinalities:\n", + "The loader supports the following six functionalities:\n", "* Retrieve normal transactions under specific account on Ethereum Mainet\n", "* Retrieve internal transactions under specific account on Ethereum Mainet\n", "* Retrieve erc20 transactions under specific account on Ethereum Mainet\n", @@ -28,7 +28,7 @@ "\n", "If the account does not have corresponding transactions, the loader will a list with one document. The content of document is ''.\n", "\n", - "You can pass differnt filters to loader to access different functionalities we mentioned above:\n", + "You can pass different filters to loader to access different functionalities we mentioned above:\n", "* \"normal_transaction\"\n", "* \"internal_transaction\"\n", "* \"erc20_transaction\"\n", @@ -41,7 +41,7 @@ "\n", "All functions related to transactions histories are restricted 1000 histories maximum because of Etherscan limit. You can use the following parameters to find the transaction histories you need:\n", "* offset: default to 20. Shows 20 transactions for one time\n", - "* page: default to 1. This controls pagenation.\n", + "* page: default to 1. This controls pagination.\n", "* start_block: Default to 0. The transaction histories starts from 0 block.\n", "* end_block: Default to 99999999. The transaction histories starts from 99999999 block\n", "* sort: \"desc\" or \"asc\". Set default to \"desc\" to get latest transactions." diff --git a/docs/docs/integrations/document_loaders/figma.ipynb b/docs/docs/integrations/document_loaders/figma.ipynb index 51ff9cb095c4b..c8615e7121595 100644 --- a/docs/docs/integrations/document_loaders/figma.ipynb +++ b/docs/docs/integrations/document_loaders/figma.ipynb @@ -89,7 +89,7 @@ "def generate_code(human_input):\n", " # I have no idea if the Jon Carmack thing makes for better code. YMMV.\n", " # See https://python.langchain.com/en/latest/modules/models/chat/getting_started.html for chat info\n", - " system_prompt_template = \"\"\"You are expert coder Jon Carmack. Use the provided design context to create idomatic HTML/CSS code as possible based on the user request.\n", + " system_prompt_template = \"\"\"You are expert coder Jon Carmack. Use the provided design context to create idiomatic HTML/CSS code as possible based on the user request.\n", " Everything must be inline in one file and your response must be directly renderable by the browser.\n", " Figma file nodes and metadata: {context}\"\"\"\n", "\n", diff --git a/docs/docs/integrations/document_loaders/geopandas.ipynb b/docs/docs/integrations/document_loaders/geopandas.ipynb index c330aef6a3e02..997f5de9982c6 100644 --- a/docs/docs/integrations/document_loaders/geopandas.ipynb +++ b/docs/docs/integrations/document_loaders/geopandas.ipynb @@ -7,7 +7,7 @@ "source": [ "# Geopandas\n", "\n", - "[Geopandas](https://geopandas.org/en/stable/index.html) is an open source project to make working with geospatial data in python easier. \n", + "[Geopandas](https://geopandas.org/en/stable/index.html) is an open-source project to make working with geospatial data in python easier. \n", "\n", "GeoPandas extends the datatypes used by pandas to allow spatial operations on geometric types. \n", "\n", @@ -95,7 +95,7 @@ "id": "030a535c", "metadata": {}, "source": [ - "Visiualization of the sample of SF crimne data. " + "Visualization of the sample of SF crime data. " ] }, { diff --git a/docs/docs/integrations/document_loaders/github.ipynb b/docs/docs/integrations/document_loaders/github.ipynb index 5a7a35e842f9f..3582385aeb1bd 100644 --- a/docs/docs/integrations/document_loaders/github.ipynb +++ b/docs/docs/integrations/document_loaders/github.ipynb @@ -20,7 +20,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To access the GitHub API, you need a personal access token - you can set up yours here: https://github.com/settings/tokens?type=beta. You can either set this token as the environment variable ``GITHUB_PERSONAL_ACCESS_TOKEN`` and it will be automatically pulled in, or you can pass it in directly at initializaiton as the ``access_token`` named parameter." + "To access the GitHub API, you need a personal access token - you can set up yours here: https://github.com/settings/tokens?type=beta. You can either set this token as the environment variable ``GITHUB_PERSONAL_ACCESS_TOKEN`` and it will be automatically pulled in, or you can pass it in directly at initialization as the ``access_token`` named parameter." ] }, { diff --git a/docs/docs/integrations/document_loaders/joplin.ipynb b/docs/docs/integrations/document_loaders/joplin.ipynb index 78dc59183b005..cd2d7725f5217 100644 --- a/docs/docs/integrations/document_loaders/joplin.ipynb +++ b/docs/docs/integrations/document_loaders/joplin.ipynb @@ -8,7 +8,7 @@ "source": [ "# Joplin\n", "\n", - ">[Joplin](https://joplinapp.org/) is an open source note-taking app. Capture your thoughts and securely access them from any device.\n", + ">[Joplin](https://joplinapp.org/) is an open-source note-taking app. Capture your thoughts and securely access them from any device.\n", "\n", "This notebook covers how to load documents from a `Joplin` database.\n", "\n", diff --git a/docs/docs/integrations/document_loaders/mastodon.ipynb b/docs/docs/integrations/document_loaders/mastodon.ipynb index 120da7c90f6e4..bcf5da4d28f95 100644 --- a/docs/docs/integrations/document_loaders/mastodon.ipynb +++ b/docs/docs/integrations/document_loaders/mastodon.ipynb @@ -54,7 +54,7 @@ "\n", "# Or set up access information to use a Mastodon app.\n", "# Note that the access token can either be passed into\n", - "# constructor or you can set the envirovnment \"MASTODON_ACCESS_TOKEN\".\n", + "# constructor or you can set the environment \"MASTODON_ACCESS_TOKEN\".\n", "# loader = MastodonTootsLoader(\n", "# access_token=\"\",\n", "# api_base_url=\"\",\n", diff --git a/docs/docs/integrations/document_loaders/microsoft_onedrive.ipynb b/docs/docs/integrations/document_loaders/microsoft_onedrive.ipynb index a7d8fb467462a..3a18c047a2949 100644 --- a/docs/docs/integrations/document_loaders/microsoft_onedrive.ipynb +++ b/docs/docs/integrations/document_loaders/microsoft_onedrive.ipynb @@ -34,7 +34,7 @@ "os.environ['O365_CLIENT_SECRET'] = \"YOUR CLIENT SECRET\"\n", "```\n", "\n", - "This loader uses an authentication called [*on behalf of a user*](https://learn.microsoft.com/en-us/graph/auth-v2-user?context=graph%2Fapi%2F1.0&view=graph-rest-1.0). It is a 2 step authentication with user consent. When you instantiate the loader, it will call will print a url that the user must visit to give consent to the app on the required permissions. The user must then visit this url and give consent to the application. Then the user must copy the resulting page url and paste it back on the console. The method will then return True if the login attempt was succesful.\n", + "This loader uses an authentication called [*on behalf of a user*](https://learn.microsoft.com/en-us/graph/auth-v2-user?context=graph%2Fapi%2F1.0&view=graph-rest-1.0). It is a 2 step authentication with user consent. When you instantiate the loader, it will call will print a url that the user must visit to give consent to the app on the required permissions. The user must then visit this url and give consent to the application. Then the user must copy the resulting page url and paste it back on the console. The method will then return True if the login attempt was successful.\n", "\n", "\n", "```python\n", diff --git a/docs/docs/integrations/document_loaders/source_code.ipynb b/docs/docs/integrations/document_loaders/source_code.ipynb index 78e375617de79..19281a59b5ca7 100644 --- a/docs/docs/integrations/document_loaders/source_code.ipynb +++ b/docs/docs/integrations/document_loaders/source_code.ipynb @@ -7,7 +7,7 @@ "source": [ "# Source Code\n", "\n", - "This notebook covers how to load source code files using a special approach with language parsing: each top-level function and class in the code is loaded into separate documents. Any remaining code top-level code outside the already loaded functions and classes will be loaded into a seperate document.\n", + "This notebook covers how to load source code files using a special approach with language parsing: each top-level function and class in the code is loaded into separate documents. Any remaining code top-level code outside the already loaded functions and classes will be loaded into a separate document.\n", "\n", "This approach can potentially improve the accuracy of QA models over source code. Currently, the supported languages for code parsing are Python and JavaScript. The language used for parsing can be configured, along with the minimum number of lines required to activate the splitting based on syntax." ] diff --git a/docs/docs/integrations/document_loaders/weather.ipynb b/docs/docs/integrations/document_loaders/weather.ipynb index 44f90612a0608..9ed6f0d2b87fb 100644 --- a/docs/docs/integrations/document_loaders/weather.ipynb +++ b/docs/docs/integrations/document_loaders/weather.ipynb @@ -7,7 +7,7 @@ "source": [ "# Weather\n", "\n", - ">[OpenWeatherMap](https://openweathermap.org/) is an open source weather service provider\n", + ">[OpenWeatherMap](https://openweathermap.org/) is an open-source weather service provider\n", "\n", "This loader fetches the weather data from the OpenWeatherMap's OneCall API, using the pyowm Python package. You must initialize the loader with your OpenWeatherMap API token and the names of the cities you want the weather data for." ] diff --git a/docs/docs/integrations/llms/azure_ml.ipynb b/docs/docs/integrations/llms/azure_ml.ipynb index 38c1cd2aea561..4443a53d7970f 100644 --- a/docs/docs/integrations/llms/azure_ml.ipynb +++ b/docs/docs/integrations/llms/azure_ml.ipynb @@ -46,7 +46,7 @@ "* `HFContentFormatter`: Formats request and response data for text-generation Hugging Face models\n", "* `LLamaContentFormatter`: Formats request and response data for LLaMa2\n", "\n", - "*Note: `OSSContentFormatter` is being deprecated and replaced with `GPT2ContentFormatter`. The logic is the same but `GPT2ContentFormatter` is a more suitable name. You can still continue to use `OSSContentFormatter` as the changes are backwards compatibile.*\n", + "*Note: `OSSContentFormatter` is being deprecated and replaced with `GPT2ContentFormatter`. The logic is the same but `GPT2ContentFormatter` is a more suitable name. You can still continue to use `OSSContentFormatter` as the changes are backwards compatible.*\n", "\n", "Below is an example using a summarization model from Hugging Face." ] diff --git a/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb b/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb index 46a8deeebb72a..cf5e7eed01834 100644 --- a/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb +++ b/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb @@ -7,13 +7,13 @@ "source": [ "# Baidu Qianfan\n", "\n", - "Baidu AI Cloud Qianfan Platform is a one-stop large model development and service operation platform for enterprise developers. Qianfan not only provides including the model of Wenxin Yiyan (ERNIE-Bot) and the third-party open source models, but also provides various AI development tools and the whole set of development environment, which facilitates customers to use and develop large model applications easily.\n", + "Baidu AI Cloud Qianfan Platform is a one-stop large model development and service operation platform for enterprise developers. Qianfan not only provides including the model of Wenxin Yiyan (ERNIE-Bot) and the third-party open-source models, but also provides various AI development tools and the whole set of development environment, which facilitates customers to use and develop large model applications easily.\n", "\n", "Basically, those model are split into the following type:\n", "\n", "- Embedding\n", "- Chat\n", - "- Coompletion\n", + "- Completion\n", "\n", "In this notebook, we will introduce how to use langchain with [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) mainly in `Completion` corresponding\n", " to the package `langchain/llms` in langchain:\n", @@ -24,7 +24,7 @@ "\n", "To use the LLM services based on Baidu Qianfan, you have to initialize these parameters:\n", "\n", - "You could either choose to init the AK,SK in enviroment variables or init params:\n", + "You could either choose to init the AK,SK in environment variables or init params:\n", "\n", "```base\n", "export QIANFAN_AK=XXX\n", @@ -158,7 +158,7 @@ "In the case you want to deploy your own model based on EB or serval open sources model, you could follow these steps:\n", "\n", "- 1. (Optional, if the model are included in the default models, skip it)Deploy your model in Qianfan Console, get your own customized deploy endpoint.\n", - "- 2. Set up the field called `endpoint` in the initlization:" + "- 2. Set up the field called `endpoint` in the initialization:" ] }, { diff --git a/docs/docs/integrations/llms/ctranslate2.ipynb b/docs/docs/integrations/llms/ctranslate2.ipynb index dd0bf0978e00e..3612c59980d15 100644 --- a/docs/docs/integrations/llms/ctranslate2.ipynb +++ b/docs/docs/integrations/llms/ctranslate2.ipynb @@ -50,7 +50,7 @@ } ], "source": [ - "# converstion can take several minutes\n", + "# conversation can take several minutes\n", "!ct2-transformers-converter --model meta-llama/Llama-2-7b-hf --quantization bfloat16 --output_dir ./llama-2-7b-ct2 --force" ] }, diff --git a/docs/docs/integrations/llms/deepinfra.ipynb b/docs/docs/integrations/llms/deepinfra.ipynb index ecb0fd6391887..5e9f0dea418bb 100644 --- a/docs/docs/integrations/llms/deepinfra.ipynb +++ b/docs/docs/integrations/llms/deepinfra.ipynb @@ -28,7 +28,8 @@ "source": [ "import os\n", "from langchain.llms import DeepInfra\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate\n", + "from langchain.chains import LLMChain" ] }, { @@ -50,7 +51,7 @@ }, "outputs": [ { - "name": "stdin", + "name": "stdout", "output_type": "stream", "text": [ " ········\n" @@ -81,7 +82,7 @@ "metadata": {}, "source": [ "## Create the DeepInfra instance\n", - "You can also use our open source [deepctl tool](https://github.com/deepinfra/deepctl#deepctl) to manage your model deployments. You can view a list of available parameters [here](https://deepinfra.com/databricks/dolly-v2-12b#API)." + "You can also use our open-source [deepctl tool](https://github.com/deepinfra/deepctl#deepctl) to manage your model deployments. You can view a list of available parameters [here](https://deepinfra.com/databricks/dolly-v2-12b#API)." ] }, { diff --git a/docs/docs/integrations/llms/forefrontai.ipynb b/docs/docs/integrations/llms/forefrontai.ipynb index 5f988c25c9a3c..7dac40fd7c311 100644 --- a/docs/docs/integrations/llms/forefrontai.ipynb +++ b/docs/docs/integrations/llms/forefrontai.ipynb @@ -7,7 +7,7 @@ "# ForefrontAI\n", "\n", "\n", - "The `Forefront` platform gives you the ability to fine-tune and use [open source large language models](https://docs.forefront.ai/forefront/master/models).\n", + "The `Forefront` platform gives you the ability to fine-tune and use [open-source large language models](https://docs.forefront.ai/forefront/master/models).\n", "\n", "This notebook goes over how to use Langchain with [ForefrontAI](https://www.forefront.ai/).\n" ] @@ -27,7 +27,8 @@ "source": [ "import os\n", "from langchain.llms import ForefrontAI\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate\n", + "from langchain.chains import LLMChain" ] }, { diff --git a/docs/docs/integrations/llms/gradient.ipynb b/docs/docs/integrations/llms/gradient.ipynb index 6951db89487e7..6152e7b03f8c7 100644 --- a/docs/docs/integrations/llms/gradient.ipynb +++ b/docs/docs/integrations/llms/gradient.ipynb @@ -61,7 +61,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Optional: Validate your Enviroment variables ```GRADIENT_ACCESS_TOKEN``` and ```GRADIENT_WORKSPACE_ID``` to get currently deployed models." + "Optional: Validate your Environment variables ```GRADIENT_ACCESS_TOKEN``` and ```GRADIENT_WORKSPACE_ID``` to get currently deployed models." ] }, { diff --git a/docs/docs/integrations/llms/manifest.ipynb b/docs/docs/integrations/llms/manifest.ipynb index 1238ae84f459c..514c3f84726a6 100644 --- a/docs/docs/integrations/llms/manifest.ipynb +++ b/docs/docs/integrations/llms/manifest.ipynb @@ -15,7 +15,7 @@ "id": "59fcaebc", "metadata": {}, "source": [ - "For more detailed information on `manifest`, and how to use it with local hugginface models like in this example, see https://github.com/HazyResearch/manifest\n", + "For more detailed information on `manifest`, and how to use it with local huggingface models like in this example, see https://github.com/HazyResearch/manifest\n", "\n", "Another example of [using Manifest with Langchain](https://github.com/HazyResearch/manifest/blob/main/examples/langchain_chatgpt.html)." ] diff --git a/docs/docs/integrations/llms/mosaicml.ipynb b/docs/docs/integrations/llms/mosaicml.ipynb index 08d378cdccfa9..141bfe7121759 100644 --- a/docs/docs/integrations/llms/mosaicml.ipynb +++ b/docs/docs/integrations/llms/mosaicml.ipynb @@ -7,7 +7,7 @@ "source": [ "# MosaicML\n", "\n", - "[MosaicML](https://docs.mosaicml.com/en/latest/inference.html) offers a managed inference service. You can either use a variety of open source models, or deploy your own.\n", + "[MosaicML](https://docs.mosaicml.com/en/latest/inference.html) offers a managed inference service. You can either use a variety of open-source models, or deploy your own.\n", "\n", "This example goes over how to use LangChain to interact with MosaicML Inference for text completion." ] diff --git a/docs/docs/integrations/llms/ollama.ipynb b/docs/docs/integrations/llms/ollama.ipynb index 358f0dde79d0f..e395603138df0 100644 --- a/docs/docs/integrations/llms/ollama.ipynb +++ b/docs/docs/integrations/llms/ollama.ipynb @@ -334,7 +334,7 @@ "source": [ "## Using the Hub for prompt management\n", " \n", - "Open source models often benefit from specific prompts. \n", + "Open-source models often benefit from specific prompts. \n", "\n", "For example, [Mistral 7b](https://mistral.ai/news/announcing-mistral-7b/) was fine-tuned for chat using the prompt format shown [here](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1).\n", "\n", diff --git a/docs/docs/integrations/llms/predibase.ipynb b/docs/docs/integrations/llms/predibase.ipynb index bd208a4345fca..ec1924b12a1fb 100644 --- a/docs/docs/integrations/llms/predibase.ipynb +++ b/docs/docs/integrations/llms/predibase.ipynb @@ -6,7 +6,7 @@ "source": [ "# Predibase\n", "\n", - "[Predibase](https://predibase.com/) allows you to train, finetune, and deploy any ML model—from linear regression to large language model. \n", + "[Predibase](https://predibase.com/) allows you to train, fine-tune, and deploy any ML model—from linear regression to large language model. \n", "\n", "This example demonstrates using Langchain with models deployed on Predibase" ] diff --git a/docs/docs/integrations/llms/promptlayer_openai.ipynb b/docs/docs/integrations/llms/promptlayer_openai.ipynb index 685deca3d8ac5..6c2404bb5fc28 100644 --- a/docs/docs/integrations/llms/promptlayer_openai.ipynb +++ b/docs/docs/integrations/llms/promptlayer_openai.ipynb @@ -180,7 +180,7 @@ "metadata": {}, "source": [ "## Using PromptLayer Track\n", - "If you would like to use any of the [PromptLayer tracking features](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9), you need to pass the argument `return_pl_id` when instantializing the PromptLayer LLM to get the request id. " + "If you would like to use any of the [PromptLayer tracking features](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9), you need to pass the argument `return_pl_id` when instantiating the PromptLayer LLM to get the request id. " ] }, { diff --git a/docs/docs/integrations/llms/titan_takeoff.ipynb b/docs/docs/integrations/llms/titan_takeoff.ipynb index 9d79166000f99..fe4cdc8a640dd 100644 --- a/docs/docs/integrations/llms/titan_takeoff.ipynb +++ b/docs/docs/integrations/llms/titan_takeoff.ipynb @@ -44,10 +44,10 @@ "## Choose a Model\n", "Takeoff supports many of the most powerful generative text models, such as Falcon, MPT, and Llama. See the [supported models](https://docs.titanml.co/docs/titan-takeoff/supported-models) for more information. For information about using your own models, see the [custom models](https://docs.titanml.co/docs/titan-takeoff/Advanced/custom-models).\n", "\n", - "Going forward in this demo we will be using the falcon 7B instruct model. This is a good open source model that is trained to follow instructions, and is small enough to easily inference even on CPUs.\n", + "Going forward in this demo we will be using the falcon 7B instruct model. This is a good open-source model that is trained to follow instructions, and is small enough to easily inference even on CPUs.\n", "\n", "## Taking off\n", - "Models are referred to by their model id on HuggingFace. Takeoff uses port 8000 by default, but can be configured to use another port. There is also support to use a Nvidia GPU by specifing cuda for the device flag.\n", + "Models are referred to by their model id on HuggingFace. Takeoff uses port 8000 by default, but can be configured to use another port. There is also support to use a Nvidia GPU by specifying cuda for the device flag.\n", "\n", "To start the takeoff server, run:" ] diff --git a/docs/docs/integrations/providers/chaindesk.mdx b/docs/docs/integrations/providers/chaindesk.mdx index 202d9ad602f45..7cfd5e96b88f3 100644 --- a/docs/docs/integrations/providers/chaindesk.mdx +++ b/docs/docs/integrations/providers/chaindesk.mdx @@ -1,6 +1,6 @@ # Chaindesk ->[Chaindesk](https://chaindesk.ai) is an [open source](https://github.com/gmpetrov/databerry) document retrieval platform that helps to connect your personal data with Large Language Models. +>[Chaindesk](https://chaindesk.ai) is an [open-source](https://github.com/gmpetrov/databerry) document retrieval platform that helps to connect your personal data with Large Language Models. ## Installation and Setup diff --git a/docs/docs/integrations/providers/clearml_tracking.ipynb b/docs/docs/integrations/providers/clearml_tracking.ipynb index 00add9bf867a6..e6bd0e953dbaa 100644 --- a/docs/docs/integrations/providers/clearml_tracking.ipynb +++ b/docs/docs/integrations/providers/clearml_tracking.ipynb @@ -570,7 +570,7 @@ "\n", "- If you close the ClearML Callback using `clearml_callback.flush_tracker(..., finish=True)` the Callback cannot be used anymore. Make a new one if you want to keep logging.\n", "\n", - "- Check out the rest of the open source ClearML ecosystem, there is a data version manager, a remote execution agent, automated pipelines and much more!\n" + "- Check out the rest of the open-source ClearML ecosystem, there is a data version manager, a remote execution agent, automated pipelines and much more!\n" ] }, { diff --git a/docs/docs/integrations/providers/cnosdb.mdx b/docs/docs/integrations/providers/cnosdb.mdx index 60cadd28204ca..d93f2e0f4c3ce 100644 --- a/docs/docs/integrations/providers/cnosdb.mdx +++ b/docs/docs/integrations/providers/cnosdb.mdx @@ -1,5 +1,5 @@ # CnosDB -> [CnosDB](https://github.com/cnosdb/cnosdb) is an open source distributed time series database with high performance, high compression rate and high ease of use. +> [CnosDB](https://github.com/cnosdb/cnosdb) is an open-source distributed time series database with high performance, high compression rate and high ease of use. ## Installation and Setup diff --git a/docs/docs/integrations/providers/databricks.md b/docs/docs/integrations/providers/databricks.md index 41d86b1f5ee90..6dbabbce60c67 100644 --- a/docs/docs/integrations/providers/databricks.md +++ b/docs/docs/integrations/providers/databricks.md @@ -19,7 +19,7 @@ See the notebook [Connect to Databricks](/docs/use_cases/qa_structured/integrati Databricks MLflow integrates with LangChain ------------------------------------------- -MLflow is an open source platform to manage the ML lifecycle, including experimentation, reproducibility, deployment, and a central model registry. See the notebook [MLflow Callback Handler](/docs/integrations/providers/mlflow_tracking) for details about MLflow's integration with LangChain. +MLflow is an open-source platform to manage the ML lifecycle, including experimentation, reproducibility, deployment, and a central model registry. See the notebook [MLflow Callback Handler](/docs/integrations/providers/mlflow_tracking) for details about MLflow's integration with LangChain. Databricks provides a fully managed and hosted version of MLflow integrated with enterprise security features, high availability, and other Databricks workspace features such as experiment and run management and notebook revision capture. MLflow on Databricks offers an integrated experience for tracking and securing machine learning model training runs and running machine learning projects. See [MLflow guide](https://docs.databricks.com/mlflow/index.html) for more details. diff --git a/docs/docs/integrations/providers/doctran.mdx b/docs/docs/integrations/providers/doctran.mdx index 44e899bf5ee63..98848b8a0a0d6 100644 --- a/docs/docs/integrations/providers/doctran.mdx +++ b/docs/docs/integrations/providers/doctran.mdx @@ -1,6 +1,6 @@ # Doctran ->[Doctran](https://github.com/psychic-api/doctran) is a python package. It uses LLMs and open source +>[Doctran](https://github.com/psychic-api/doctran) is a python package. It uses LLMs and open-source > NLP libraries to transform raw text into clean, structured, information-dense documents > that are optimized for vector space retrieval. You can think of `Doctran` as a black box where > messy strings go in and nice, clean, labelled strings come out. diff --git a/docs/docs/integrations/providers/helicone.mdx b/docs/docs/integrations/providers/helicone.mdx index df9b3bde70621..0e76b9604be57 100644 --- a/docs/docs/integrations/providers/helicone.mdx +++ b/docs/docs/integrations/providers/helicone.mdx @@ -4,7 +4,7 @@ This page covers how to use the [Helicone](https://helicone.ai) ecosystem within ## What is Helicone? -Helicone is an [open source](https://github.com/Helicone/helicone) observability platform that proxies your OpenAI traffic and provides you key insights into your spend, latency and usage. +Helicone is an [open-source](https://github.com/Helicone/helicone) observability platform that proxies your OpenAI traffic and provides you key insights into your spend, latency and usage. ![Helicone](/img/HeliconeDashboard.png) diff --git a/docs/docs/integrations/providers/hologres.mdx b/docs/docs/integrations/providers/hologres.mdx index 02b13540dae11..44ecf3008b192 100644 --- a/docs/docs/integrations/providers/hologres.mdx +++ b/docs/docs/integrations/providers/hologres.mdx @@ -4,7 +4,7 @@ >`Hologres` supports standard `SQL` syntax, is compatible with `PostgreSQL`, and supports most PostgreSQL functions. Hologres supports online analytical processing (OLAP) and ad hoc analysis for up to petabytes of data, and provides high-concurrency and low-latency online data services. >`Hologres` provides **vector database** functionality by adopting [Proxima](https://www.alibabacloud.com/help/en/hologres/latest/vector-processing). ->`Proxima` is a high-performance software library developed by `Alibaba DAMO Academy`. It allows you to search for the nearest neighbors of vectors. Proxima provides higher stability and performance than similar open source software such as Faiss. Proxima allows you to search for similar text or image embeddings with high throughput and low latency. Hologres is deeply integrated with Proxima to provide a high-performance vector search service. +>`Proxima` is a high-performance software library developed by `Alibaba DAMO Academy`. It allows you to search for the nearest neighbors of vectors. Proxima provides higher stability and performance than similar open-source software such as Faiss. Proxima allows you to search for similar text or image embeddings with high throughput and low latency. Hologres is deeply integrated with Proxima to provide a high-performance vector search service. ## Installation and Setup diff --git a/docs/docs/integrations/providers/log10.mdx b/docs/docs/integrations/providers/log10.mdx index a4d634566d2ea..d458435e55286 100644 --- a/docs/docs/integrations/providers/log10.mdx +++ b/docs/docs/integrations/providers/log10.mdx @@ -4,7 +4,7 @@ This page covers how to use the [Log10](https://log10.io) within LangChain. ## What is Log10? -Log10 is an [open source](https://github.com/log10-io/log10) proxiless LLM data management and application development platform that lets you log, debug and tag your Langchain calls. +Log10 is an [open-source](https://github.com/log10-io/log10) proxiless LLM data management and application development platform that lets you log, debug and tag your Langchain calls. ## Quick start diff --git a/docs/docs/integrations/providers/promptlayer.mdx b/docs/docs/integrations/providers/promptlayer.mdx index 923b2a3dc47fe..1a7b1cb926290 100644 --- a/docs/docs/integrations/providers/promptlayer.mdx +++ b/docs/docs/integrations/providers/promptlayer.mdx @@ -43,7 +43,7 @@ You can use the PromptLayer request ID to add a prompt, score, or other metadata This LLM is identical to the [OpenAI](/docs/ecosystem/integrations/openai.html) LLM, except that - all your requests will be logged to your PromptLayer account - you can add `pl_tags` when instantiating to tag your requests on PromptLayer -- you can add `return_pl_id` when instantializing to return a PromptLayer request id to use [while tracking requests](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9). +- you can add `return_pl_id` when instantiating to return a PromptLayer request id to use [while tracking requests](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9). PromptLayer also provides native wrappers for [`PromptLayerChatOpenAI`](/docs/integrations/chat/promptlayer_chatopenai.html) and `PromptLayerOpenAIChat` diff --git a/docs/docs/integrations/providers/redis.mdx b/docs/docs/integrations/providers/redis.mdx index 92c9f963e82b3..445bf3b19ca22 100644 --- a/docs/docs/integrations/providers/redis.mdx +++ b/docs/docs/integrations/providers/redis.mdx @@ -54,7 +54,7 @@ The only way to use a Redis Cluster is with LangChain classes accepting a precon The Cache wrapper allows for [Redis](https://redis.io) to be used as a remote, low-latency, in-memory cache for LLM prompts and responses. #### Standard Cache -The standard cache is the Redis bread & butter of use case in production for both [open source](https://redis.io) and [enterprise](https://redis.com) users globally. +The standard cache is the Redis bread & butter of use case in production for both [open-source](https://redis.io) and [enterprise](https://redis.com) users globally. To import this cache: ```python diff --git a/docs/docs/integrations/providers/sklearn.mdx b/docs/docs/integrations/providers/sklearn.mdx index 09bd746a5b730..341bb671d0a86 100644 --- a/docs/docs/integrations/providers/sklearn.mdx +++ b/docs/docs/integrations/providers/sklearn.mdx @@ -1,6 +1,6 @@ # scikit-learn ->[scikit-learn](https://scikit-learn.org/stable/) is an open source collection of machine learning algorithms, +>[scikit-learn](https://scikit-learn.org/stable/) is an open-source collection of machine learning algorithms, > including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format. ## Installation and Setup diff --git a/docs/docs/integrations/providers/supabase.mdx b/docs/docs/integrations/providers/supabase.mdx index a57b7079b6c35..5c7d89eabbb59 100644 --- a/docs/docs/integrations/providers/supabase.mdx +++ b/docs/docs/integrations/providers/supabase.mdx @@ -1,6 +1,6 @@ # Supabase (Postgres) ->[Supabase](https://supabase.com/docs) is an open source `Firebase` alternative. +>[Supabase](https://supabase.com/docs) is an open-source `Firebase` alternative. > `Supabase` is built on top of `PostgreSQL`, which offers strong `SQL` > querying capabilities and enables a simple interface with already-existing tools and frameworks. diff --git a/docs/docs/integrations/providers/tigris.mdx b/docs/docs/integrations/providers/tigris.mdx index 08c79f0951682..4485e8379ab1a 100644 --- a/docs/docs/integrations/providers/tigris.mdx +++ b/docs/docs/integrations/providers/tigris.mdx @@ -1,6 +1,6 @@ # Tigris -> [Tigris](https://tigrisdata.com) is an open source Serverless NoSQL Database and Search Platform designed to simplify building high-performance vector search applications. +> [Tigris](https://tigrisdata.com) is an open-source Serverless NoSQL Database and Search Platform designed to simplify building high-performance vector search applications. > `Tigris` eliminates the infrastructure complexity of managing, operating, and synchronizing multiple tools, allowing you to focus on building great applications instead. ## Installation and Setup diff --git a/docs/docs/integrations/providers/trulens.mdx b/docs/docs/integrations/providers/trulens.mdx index 8748d19b44bd2..97dc0c14a246e 100644 --- a/docs/docs/integrations/providers/trulens.mdx +++ b/docs/docs/integrations/providers/trulens.mdx @@ -4,7 +4,7 @@ This page covers how to use [TruLens](https://trulens.org) to evaluate and track ## What is TruLens? -TruLens is an [opensource](https://github.com/truera/trulens) package that provides instrumentation and evaluation tools for large language model (LLM) based applications. +TruLens is an [open-source](https://github.com/truera/trulens) package that provides instrumentation and evaluation tools for large language model (LLM) based applications. ## Quick start diff --git a/docs/docs/integrations/providers/typesense.mdx b/docs/docs/integrations/providers/typesense.mdx index 55ceb08eafab0..62d5581139e2c 100644 --- a/docs/docs/integrations/providers/typesense.mdx +++ b/docs/docs/integrations/providers/typesense.mdx @@ -1,6 +1,6 @@ # Typesense -> [Typesense](https://typesense.org) is an open source, in-memory search engine, that you can either +> [Typesense](https://typesense.org) is an open-source, in-memory search engine, that you can either > [self-host](https://typesense.org/docs/guide/install-typesense.html#option-2-local-machine-self-hosting) or run > on [Typesense Cloud](https://cloud.typesense.org/). > `Typesense` focuses on performance by storing the entire index in RAM (with a backup on disk) and also diff --git a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb index a706103062ad5..206c4d423234a 100644 --- a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb +++ b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb @@ -155,7 +155,7 @@ "metadata": {}, "outputs": [], "source": [ - "query = \"Did he mention who she suceeded\"\n", + "query = \"Did he mention who she succeeded\"\n", "result = qa({\"question\": query})" ] }, @@ -267,7 +267,7 @@ "outputs": [], "source": [ "chat_history = [(query, result[\"answer\"])]\n", - "query = \"Did he mention who she suceeded\"\n", + "query = \"Did he mention who she succeeded\"\n", "result = qa({\"question\": query, \"chat_history\": chat_history})" ] }, @@ -656,7 +656,7 @@ ], "source": [ "chat_history = [(query, result[\"answer\"])]\n", - "query = \"Did he mention who she suceeded\"\n", + "query = \"Did he mention who she succeeded\"\n", "result = qa({\"question\": query, \"chat_history\": chat_history})" ] }, diff --git a/docs/docs/integrations/providers/weather.mdx b/docs/docs/integrations/providers/weather.mdx index 20623489c40e7..5d557190b9825 100644 --- a/docs/docs/integrations/providers/weather.mdx +++ b/docs/docs/integrations/providers/weather.mdx @@ -1,6 +1,6 @@ # Weather ->[OpenWeatherMap](https://openweathermap.org/) is an open source weather service provider. +>[OpenWeatherMap](https://openweathermap.org/) is an open-source weather service provider. diff --git a/docs/docs/integrations/providers/weaviate.mdx b/docs/docs/integrations/providers/weaviate.mdx index e68105bf6f0b5..1c358ec6c64db 100644 --- a/docs/docs/integrations/providers/weaviate.mdx +++ b/docs/docs/integrations/providers/weaviate.mdx @@ -9,7 +9,7 @@ What is `Weaviate`? - Weaviate allows you to store JSON documents in a class property-like fashion while attaching machine learning vectors to these documents to represent them in vector space. - Weaviate can be used stand-alone (aka bring your vectors) or with a variety of modules that can do the vectorization for you and extend the core capabilities. - Weaviate has a GraphQL-API to access your data easily. -- We aim to bring your vector search set up to production to query in mere milliseconds (check our [open source benchmarks](https://weaviate.io/developers/weaviate/current/benchmarks/) to see if Weaviate fits your use case). +- We aim to bring your vector search set up to production to query in mere milliseconds (check our [open-source benchmarks](https://weaviate.io/developers/weaviate/current/benchmarks/) to see if Weaviate fits your use case). - Get to know Weaviate in the [basics getting started guide](https://weaviate.io/developers/weaviate/current/core-knowledge/basics.html) in under five minutes. **Weaviate in detail:** diff --git a/docs/docs/integrations/retrievers/azure_cognitive_search.ipynb b/docs/docs/integrations/retrievers/azure_cognitive_search.ipynb index 9b09e63464b45..5a3c2ca283b3a 100644 --- a/docs/docs/integrations/retrievers/azure_cognitive_search.ipynb +++ b/docs/docs/integrations/retrievers/azure_cognitive_search.ipynb @@ -26,7 +26,7 @@ "source": [ "## Set up Azure Cognitive Search\n", "\n", - "To set up ACS, please follow the instrcutions [here](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal).\n", + "To set up ACS, please follow the instructions [here](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal).\n", "\n", "Please note\n", "1. the name of your ACS service, \n", diff --git a/docs/docs/integrations/retrievers/cohere-reranker.ipynb b/docs/docs/integrations/retrievers/cohere-reranker.ipynb index 6c2c25c9cbcac..7f18cd00f0961 100644 --- a/docs/docs/integrations/retrievers/cohere-reranker.ipynb +++ b/docs/docs/integrations/retrievers/cohere-reranker.ipynb @@ -137,7 +137,7 @@ "\n", "I’ve worked on these issues a long time. \n", "\n", - "I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n", + "I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n", "\n", "So let’s not abandon our streets. Or choose between safety and equal justice.\n", "----------------------------------------------------------------------------------------------------\n", @@ -373,7 +373,7 @@ "\n", "I’ve worked on these issues a long time. \n", "\n", - "I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n", + "I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n", "\n", "So let’s not abandon our streets. Or choose between safety and equal justice.\n", "----------------------------------------------------------------------------------------------------\n", diff --git a/docs/docs/integrations/retrievers/merger_retriever.ipynb b/docs/docs/integrations/retrievers/merger_retriever.ipynb index 0189c2d46d2d8..f0e7bdc654171 100644 --- a/docs/docs/integrations/retrievers/merger_retriever.ipynb +++ b/docs/docs/integrations/retrievers/merger_retriever.ipynb @@ -157,7 +157,7 @@ "metadata": {}, "outputs": [], "source": [ - "# You can use an additional document transformer to reorder documents after removing redudance.\n", + "# You can use an additional document transformer to reorder documents after removing redundance.\n", "from langchain.document_transformers import LongContextReorder\n", "\n", "filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings)\n", diff --git a/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb b/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb index 0eacf0554c907..4353f47364d6d 100644 --- a/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb +++ b/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb @@ -11,7 +11,7 @@ "\n", "This notebook goes over how to use a retriever that under the hood uses Pinecone and Hybrid Search.\n", "\n", - "The logic of this retriever is taken from [this documentaion](https://docs.pinecone.io/docs/hybrid-search)\n", + "The logic of this retriever is taken from [this documentation](https://docs.pinecone.io/docs/hybrid-search)\n", "\n", "To use Pinecone, you must have an API key and an Environment. \n", "Here are the [installation instructions](https://docs.pinecone.io/docs/quickstart)." @@ -140,7 +140,7 @@ " dimension=1536, # dimensionality of dense model\n", " metric=\"dotproduct\", # sparse values supported only for dotproduct\n", " pod_type=\"s1\",\n", - " metadata_config={\"indexed\": []}, # see explaination above\n", + " metadata_config={\"indexed\": []}, # see explanation above\n", ")" ] }, diff --git a/docs/docs/integrations/retrievers/weaviate-hybrid.ipynb b/docs/docs/integrations/retrievers/weaviate-hybrid.ipynb index f256d49d068f2..bdbf7a921562d 100644 --- a/docs/docs/integrations/retrievers/weaviate-hybrid.ipynb +++ b/docs/docs/integrations/retrievers/weaviate-hybrid.ipynb @@ -7,7 +7,7 @@ "source": [ "# Weaviate Hybrid Search\n", "\n", - ">[Weaviate](https://weaviate.io/developers/weaviate) is an open source vector database.\n", + ">[Weaviate](https://weaviate.io/developers/weaviate) is an open-source vector database.\n", "\n", ">[Hybrid search](https://weaviate.io/blog/hybrid-search-explained) is a technique that combines multiple search algorithms to improve the accuracy and relevance of search results. It uses the best features of both keyword-based search algorithms with vector search techniques.\n", "\n", diff --git a/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb b/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb index 5272f235309ea..e971b0389146c 100644 --- a/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb +++ b/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb @@ -7,7 +7,7 @@ "source": [ "# Baidu Qianfan\n", "\n", - "Baidu AI Cloud Qianfan Platform is a one-stop large model development and service operation platform for enterprise developers. Qianfan not only provides including the model of Wenxin Yiyan (ERNIE-Bot) and the third-party open source models, but also provides various AI development tools and the whole set of development environment, which facilitates customers to use and develop large model applications easily.\n", + "Baidu AI Cloud Qianfan Platform is a one-stop large model development and service operation platform for enterprise developers. Qianfan not only provides including the model of Wenxin Yiyan (ERNIE-Bot) and the third-party open-source models, but also provides various AI development tools and the whole set of development environment, which facilitates customers to use and develop large model applications easily.\n", "\n", "Basically, those model are split into the following type:\n", "\n", @@ -24,7 +24,7 @@ "\n", "To use the LLM services based on Baidu Qianfan, you have to initialize these parameters:\n", "\n", - "You could either choose to init the AK,SK in enviroment variables or init params:\n", + "You could either choose to init the AK,SK in environment variables or init params:\n", "\n", "```base\n", "export QIANFAN_AK=XXX\n", @@ -97,7 +97,7 @@ "In the case you want to deploy your own model based on Ernie Bot or third-party open sources model, you could follow these steps:\n", "\n", "- 1. (Optional, if the model are included in the default models, skip it)Deploy your model in Qianfan Console, get your own customized deploy endpoint.\n", - "- 2. Set up the field called `endpoint` in the initlization:" + "- 2. Set up the field called `endpoint` in the initialization:" ] }, { diff --git a/docs/docs/integrations/text_embedding/google_vertex_ai_palm.ipynb b/docs/docs/integrations/text_embedding/google_vertex_ai_palm.ipynb index 4c0c515e80693..108839679089e 100644 --- a/docs/docs/integrations/text_embedding/google_vertex_ai_palm.ipynb +++ b/docs/docs/integrations/text_embedding/google_vertex_ai_palm.ipynb @@ -8,7 +8,7 @@ "\n", ">[Vertex AI PaLM API](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) is a service on Google Cloud exposing the embedding models. \n", "\n", - "Note: This integration is seperate from the Google PaLM integration.\n", + "Note: This integration is separate from the Google PaLM integration.\n", "\n", "By default, Google Cloud [does not use](https://cloud.google.com/vertex-ai/docs/generative-ai/data-governance#foundation_model_development) Customer Data to train its foundation models as part of Google Cloud`s AI/ML Privacy Commitment. More details about how Google processes data can also be found in [Google's Customer Data Processing Addendum (CDPA)](https://cloud.google.com/terms/data-processing-addendum).\n", "\n", diff --git a/docs/docs/integrations/text_embedding/gradient.ipynb b/docs/docs/integrations/text_embedding/gradient.ipynb index f95a0686b908f..fa8e192958cb2 100644 --- a/docs/docs/integrations/text_embedding/gradient.ipynb +++ b/docs/docs/integrations/text_embedding/gradient.ipynb @@ -57,7 +57,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Optional: Validate your Environment variables ```GRADIENT_ACCESS_TOKEN``` and ```GRADIENT_WORKSPACE_ID``` to get currently deployed models. Using the `gradientai` Python package." + "Optional: Validate your environment variables ```GRADIENT_ACCESS_TOKEN``` and ```GRADIENT_WORKSPACE_ID``` to get currently deployed models. Using the `gradientai` Python package." ] }, { diff --git a/docs/docs/integrations/text_embedding/mosaicml.ipynb b/docs/docs/integrations/text_embedding/mosaicml.ipynb index 24d7aecb7241b..8119d9f5f5cda 100644 --- a/docs/docs/integrations/text_embedding/mosaicml.ipynb +++ b/docs/docs/integrations/text_embedding/mosaicml.ipynb @@ -6,7 +6,7 @@ "source": [ "# MosaicML\n", "\n", - ">[MosaicML](https://docs.mosaicml.com/en/latest/inference.html) offers a managed inference service. You can either use a variety of open source models, or deploy your own.\n", + ">[MosaicML](https://docs.mosaicml.com/en/latest/inference.html) offers a managed inference service. You can either use a variety of open-source models, or deploy your own.\n", "\n", "This example goes over how to use LangChain to interact with `MosaicML` Inference for text embedding." ] diff --git a/docs/docs/integrations/toolkits/clickup.ipynb b/docs/docs/integrations/toolkits/clickup.ipynb index 46e83a46a9208..3b85660674379 100644 --- a/docs/docs/integrations/toolkits/clickup.ipynb +++ b/docs/docs/integrations/toolkits/clickup.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# ClickUp Langchiain Toolkit" + "# ClickUp Langchain Toolkit" ] }, { diff --git a/docs/docs/integrations/toolkits/github.ipynb b/docs/docs/integrations/toolkits/github.ipynb index 36d13cb7f71f4..f72c0c8a60a15 100644 --- a/docs/docs/integrations/toolkits/github.ipynb +++ b/docs/docs/integrations/toolkits/github.ipynb @@ -15,11 +15,11 @@ "3. Set your environmental variables\n", "4. Pass the tools to your agent with `toolkit.get_tools()`\n", "\n", - "Each of these steps will be explained in greate detail below.\n", + "Each of these steps will be explained in great detail below.\n", "\n", "1. **Get Issues**- fetches issues from the repository.\n", "\n", - "2. **Get Issue**- feteches details about a specific issue.\n", + "2. **Get Issue**- fetches details about a specific issue.\n", "\n", "3. **Comment on Issue**- posts a comment on a specific issue.\n", "\n", diff --git a/docs/docs/integrations/toolkits/gitlab.ipynb b/docs/docs/integrations/toolkits/gitlab.ipynb index a8f28f09fa32d..cabf3dae7e3b6 100644 --- a/docs/docs/integrations/toolkits/gitlab.ipynb +++ b/docs/docs/integrations/toolkits/gitlab.ipynb @@ -15,11 +15,11 @@ "3. Set your environmental variables\n", "4. Pass the tools to your agent with `toolkit.get_tools()`\n", "\n", - "Each of these steps will be explained in greate detail below.\n", + "Each of these steps will be explained in great detail below.\n", "\n", "1. **Get Issues**- fetches issues from the repository.\n", "\n", - "2. **Get Issue**- feteches details about a specific issue.\n", + "2. **Get Issue**- fetches details about a specific issue.\n", "\n", "3. **Comment on Issue**- posts a comment on a specific issue.\n", "\n", diff --git a/docs/docs/integrations/toolkits/sql_database.ipynb b/docs/docs/integrations/toolkits/sql_database.ipynb index eae793da1acab..76d50084ba1f5 100644 --- a/docs/docs/integrations/toolkits/sql_database.ipynb +++ b/docs/docs/integrations/toolkits/sql_database.ipynb @@ -111,7 +111,7 @@ "id": "54c01168", "metadata": {}, "source": [ - "## Disclamer ⚠️\n", + "## Disclaimer ⚠️\n", "\n", "The query chain may generate insert/update/delete queries. When this is not expected, use a custom prompt or create a SQL users without write permissions.\n", "\n", diff --git a/docs/docs/integrations/vectorstores/analyticdb.ipynb b/docs/docs/integrations/vectorstores/analyticdb.ipynb index 43fa2b14068ff..86894ce8dd674 100644 --- a/docs/docs/integrations/vectorstores/analyticdb.ipynb +++ b/docs/docs/integrations/vectorstores/analyticdb.ipynb @@ -8,7 +8,7 @@ "\n", ">[AnalyticDB for PostgreSQL](https://www.alibabacloud.com/help/en/analyticdb-for-postgresql/latest/product-introduction-overview) is a massively parallel processing (MPP) data warehousing service that is designed to analyze large volumes of data online.\n", "\n", - ">`AnalyticDB for PostgreSQL` is developed based on the open source `Greenplum Database` project and is enhanced with in-depth extensions by `Alibaba Cloud`. AnalyticDB for PostgreSQL is compatible with the ANSI SQL 2003 syntax and the PostgreSQL and Oracle database ecosystems. AnalyticDB for PostgreSQL also supports row store and column store. AnalyticDB for PostgreSQL processes petabytes of data offline at a high performance level and supports highly concurrent online queries.\n", + ">`AnalyticDB for PostgreSQL` is developed based on the open-source `Greenplum Database` project and is enhanced with in-depth extensions by `Alibaba Cloud`. AnalyticDB for PostgreSQL is compatible with the ANSI SQL 2003 syntax and the PostgreSQL and Oracle database ecosystems. AnalyticDB for PostgreSQL also supports row store and column store. AnalyticDB for PostgreSQL processes petabytes of data offline at a high performance level and supports highly concurrent online queries.\n", "\n", "This notebook shows how to use functionality related to the `AnalyticDB` vector database.\n", "To run, you should have an [AnalyticDB](https://www.alibabacloud.com/help/en/analyticdb-for-postgresql/latest/product-introduction-overview) instance up and running:\n", diff --git a/docs/docs/integrations/vectorstores/annoy.ipynb b/docs/docs/integrations/vectorstores/annoy.ipynb index bf71d5bf2de6d..7f71d0c9e5d21 100644 --- a/docs/docs/integrations/vectorstores/annoy.ipynb +++ b/docs/docs/integrations/vectorstores/annoy.ipynb @@ -18,7 +18,7 @@ "metadata": {}, "source": [ "```{note}\n", - "NOTE: Annoy is read-only - once the index is built you cannot add any more emebddings!\n", + "NOTE: Annoy is read-only - once the index is built you cannot add any more embeddings!\n", "If you want to progressively add new entries to your VectorStore then better choose an alternative!\n", "```" ] diff --git a/docs/docs/integrations/vectorstores/clarifai.ipynb b/docs/docs/integrations/vectorstores/clarifai.ipynb index 02cafd01238df..9c48f1917b6e7 100644 --- a/docs/docs/integrations/vectorstores/clarifai.ipynb +++ b/docs/docs/integrations/vectorstores/clarifai.ipynb @@ -276,7 +276,7 @@ "data": { "text/plain": [ "[Document(page_content='And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. \\n\\nAnd I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? \\n\\nBan assault weapons and high-capacity magazines. \\n\\nRepeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. \\n\\nThese laws don’t infringe on the Second Amendment. They save lives. \\n\\nThe most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. \\n\\nIn state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \\n\\nWe cannot let this happen.', metadata={'source': '../../../state_of_the_union.txt'}),\n", - " Document(page_content='We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \\n\\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \\n\\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \\n\\nOfficer Mora was 27 years old. \\n\\nOfficer Rivera was 22. \\n\\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \\n\\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \\n\\nI’ve worked on these issues a long time. \\n\\nI know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.', metadata={'source': '../../../state_of_the_union.txt'}),\n", + " Document(page_content='We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \\n\\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \\n\\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \\n\\nOfficer Mora was 27 years old. \\n\\nOfficer Rivera was 22. \\n\\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \\n\\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \\n\\nI’ve worked on these issues a long time. \\n\\nI know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.', metadata={'source': '../../../state_of_the_union.txt'}),\n", " Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \\n\\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \\n\\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \\n\\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \\n\\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \\n\\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.', metadata={'source': '../../../state_of_the_union.txt'}),\n", " Document(page_content='So let’s not abandon our streets. Or choose between safety and equal justice. \\n\\nLet’s come together to protect our communities, restore trust, and hold law enforcement accountable. \\n\\nThat’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. \\n\\nThat’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope. \\n\\nWe should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. \\n\\nI ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe.', metadata={'source': '../../../state_of_the_union.txt'})]" ] diff --git a/docs/docs/integrations/vectorstores/hologres.ipynb b/docs/docs/integrations/vectorstores/hologres.ipynb index 77ff7bf032e35..de28c2713639d 100644 --- a/docs/docs/integrations/vectorstores/hologres.ipynb +++ b/docs/docs/integrations/vectorstores/hologres.ipynb @@ -10,7 +10,7 @@ ">Hologres supports standard SQL syntax, is compatible with PostgreSQL, and supports most PostgreSQL functions. Hologres supports online analytical processing (OLAP) and ad hoc analysis for up to petabytes of data, and provides high-concurrency and low-latency online data services. \n", "\n", ">Hologres provides **vector database** functionality by adopting [Proxima](https://www.alibabacloud.com/help/en/hologres/latest/vector-processing).\n", - ">Proxima is a high-performance software library developed by Alibaba DAMO Academy. It allows you to search for the nearest neighbors of vectors. Proxima provides higher stability and performance than similar open source software such as Faiss. Proxima allows you to search for similar text or image embeddings with high throughput and low latency. Hologres is deeply integrated with Proxima to provide a high-performance vector search service.\n", + ">Proxima is a high-performance software library developed by Alibaba DAMO Academy. It allows you to search for the nearest neighbors of vectors. Proxima provides higher stability and performance than similar open-source software such as Faiss. Proxima allows you to search for similar text or image embeddings with high throughput and low latency. Hologres is deeply integrated with Proxima to provide a high-performance vector search service.\n", "\n", "This notebook shows how to use functionality related to the `Hologres Proxima` vector database.\n", "Click [here](https://www.alibabacloud.com/zh/product/hologres) to fast deploy a Hologres cloud instance." diff --git a/docs/docs/integrations/vectorstores/llm_rails.ipynb b/docs/docs/integrations/vectorstores/llm_rails.ipynb index 0d6a1bc49b9e8..a703c2136de89 100644 --- a/docs/docs/integrations/vectorstores/llm_rails.ipynb +++ b/docs/docs/integrations/vectorstores/llm_rails.ipynb @@ -11,7 +11,7 @@ "See the [LLMRails API documentation ](https://docs.llmrails.com/) for more information on how to use the API.\n", "\n", "This notebook shows how to use functionality related to the `LLMRails`'s integration with langchain.\n", - "Note that unlike many other integrations in this category, LLMRails provides an end-to-end managed service for retrieval agumented generation, which includes:\n", + "Note that unlike many other integrations in this category, LLMRails provides an end-to-end managed service for retrieval augmented generation, which includes:\n", "1. A way to extract text from document files and chunk them into sentences.\n", "2. Its own embeddings model and vector store - each text segment is encoded into a vector embedding and stored in the LLMRails internal vector store\n", "3. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.llmrails.com/datastores/search))\n", diff --git a/docs/docs/integrations/vectorstores/marqo.ipynb b/docs/docs/integrations/vectorstores/marqo.ipynb index d208f7a6f740f..01c88627ffa05 100644 --- a/docs/docs/integrations/vectorstores/marqo.ipynb +++ b/docs/docs/integrations/vectorstores/marqo.ipynb @@ -10,7 +10,7 @@ "\n", "This notebook shows how to use functionality related to the Marqo vectorstore.\n", "\n", - ">[Marqo](https://www.marqo.ai/) is an open-source vector search engine. Marqo allows you to store and query multimodal data such as text and images. Marqo creates the vectors for you using a huge selection of opensource models, you can also provide your own finetuned models and Marqo will handle the loading and inference for you.\n", + ">[Marqo](https://www.marqo.ai/) is an open-source vector search engine. Marqo allows you to store and query multi-modal data such as text and images. Marqo creates the vectors for you using a huge selection of open-source models, you can also provide your own fine-tuned models and Marqo will handle the loading and inference for you.\n", "\n", "To run this notebook with our docker image please run the following commands first to get Marqo:\n", "\n", diff --git a/docs/docs/integrations/vectorstores/myscale.ipynb b/docs/docs/integrations/vectorstores/myscale.ipynb index 98fd3d147886e..0eb98f88d913e 100644 --- a/docs/docs/integrations/vectorstores/myscale.ipynb +++ b/docs/docs/integrations/vectorstores/myscale.ipynb @@ -19,7 +19,7 @@ "id": "43ead5d5-2c1f-4dce-a69a-cb00e4f9d6f0", "metadata": {}, "source": [ - "## Setting up envrionments" + "## Setting up environments" ] }, { @@ -174,7 +174,7 @@ "\n", "**NOTE**: Please be aware of SQL injection, this interface must not be directly called by end-user.\n", "\n", - "If you custimized your `column_map` under your setting, you search with filter like this:" + "If you customized your `column_map` under your setting, you search with filter like this:" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgvector.ipynb b/docs/docs/integrations/vectorstores/pgvector.ipynb index 397758f216b17..4e34060d3e089 100644 --- a/docs/docs/integrations/vectorstores/pgvector.ipynb +++ b/docs/docs/integrations/vectorstores/pgvector.ipynb @@ -344,7 +344,7 @@ "\n", "I’ve worked on these issues a long time. \n", "\n", - "I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.\n", + "I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.\n", "--------------------------------------------------------------------------------\n", "--------------------------------------------------------------------------------\n", "Score: 0.2448441215698569\n", diff --git a/docs/docs/integrations/vectorstores/qdrant.ipynb b/docs/docs/integrations/vectorstores/qdrant.ipynb index c362639c84fcc..e82388ac391e0 100644 --- a/docs/docs/integrations/vectorstores/qdrant.ipynb +++ b/docs/docs/integrations/vectorstores/qdrant.ipynb @@ -497,7 +497,7 @@ "\n", "I\u2019ve worked on these issues a long time. \n", "\n", - "I know what works: Investing in crime preventionand community police officers who\u2019ll walk the beat, who\u2019ll know the neighborhood, and who can restore trust and safety. \n", + "I know what works: Investing in crime prevention and community police officers who\u2019ll walk the beat, who\u2019ll know the neighborhood, and who can restore trust and safety. \n", "\n" ] } diff --git a/docs/docs/integrations/vectorstores/sklearn.ipynb b/docs/docs/integrations/vectorstores/sklearn.ipynb index b93c734a74f10..ce397fa647d27 100644 --- a/docs/docs/integrations/vectorstores/sklearn.ipynb +++ b/docs/docs/integrations/vectorstores/sklearn.ipynb @@ -6,7 +6,7 @@ "source": [ "# scikit-learn\n", "\n", - ">[scikit-learn](https://scikit-learn.org/stable/) is an open source collection of machine learning algorithms, including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format.\n", + ">[scikit-learn](https://scikit-learn.org/stable/) is an open-source collection of machine learning algorithms, including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format.\n", "\n", "This notebook shows how to use the `SKLearnVectorStore` vector database." ] diff --git a/docs/docs/integrations/vectorstores/supabase.ipynb b/docs/docs/integrations/vectorstores/supabase.ipynb index 961ac208dae74..ad142aa9ffd4e 100644 --- a/docs/docs/integrations/vectorstores/supabase.ipynb +++ b/docs/docs/integrations/vectorstores/supabase.ipynb @@ -13,7 +13,7 @@ "id": "cc80fa84-1f2f-48b4-bd39-3e6412f012f1", "metadata": {}, "source": [ - ">[Supabase](https://supabase.com/docs) is an open source Firebase alternative. `Supabase` is built on top of `PostgreSQL`, which offers strong SQL querying capabilities and enables a simple interface with already-existing tools and frameworks.\n", + ">[Supabase](https://supabase.com/docs) is an open-source Firebase alternative. `Supabase` is built on top of `PostgreSQL`, which offers strong SQL querying capabilities and enables a simple interface with already-existing tools and frameworks.\n", "\n", ">[PostgreSQL](https://en.wikipedia.org/wiki/PostgreSQL) also known as `Postgres`, is a free and open-source relational database management system (RDBMS) emphasizing extensibility and SQL compliance.\n", "\n", @@ -437,7 +437,7 @@ "\n", "I’ve worked on these issues a long time. \n", "\n", - "I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.\n" + "I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.\n" ] } ], diff --git a/docs/docs/integrations/vectorstores/tair.ipynb b/docs/docs/integrations/vectorstores/tair.ipynb index 0b55b9f56110b..8f0f6a22aa021 100644 --- a/docs/docs/integrations/vectorstores/tair.ipynb +++ b/docs/docs/integrations/vectorstores/tair.ipynb @@ -7,7 +7,7 @@ "# Tair\n", "\n", ">[Tair](https://www.alibabacloud.com/help/en/tair/latest/what-is-tair) is a cloud native in-memory database service developed by `Alibaba Cloud`. \n", - "It provides rich data models and enterprise-grade capabilities to support your real-time online scenarios while maintaining full compatibility with open source `Redis`. `Tair` also introduces persistent memory-optimized instances that are based on the new non-volatile memory (NVM) storage medium.\n", + "It provides rich data models and enterprise-grade capabilities to support your real-time online scenarios while maintaining full compatibility with open-source `Redis`. `Tair` also introduces persistent memory-optimized instances that are based on the new non-volatile memory (NVM) storage medium.\n", "\n", "This notebook shows how to use functionality related to the `Tair` vector database.\n", "\n", diff --git a/docs/docs/integrations/vectorstores/tigris.ipynb b/docs/docs/integrations/vectorstores/tigris.ipynb index ba529c1033b60..c2119ac47045f 100644 --- a/docs/docs/integrations/vectorstores/tigris.ipynb +++ b/docs/docs/integrations/vectorstores/tigris.ipynb @@ -6,7 +6,7 @@ "source": [ "# Tigris\n", "\n", - "> [Tigris](htttps://tigrisdata.com) is an open source Serverless NoSQL Database and Search Platform designed to simplify building high-performance vector search applications.\n", + "> [Tigris](htttps://tigrisdata.com) is an open-source Serverless NoSQL Database and Search Platform designed to simplify building high-performance vector search applications.\n", "> `Tigris` eliminates the infrastructure complexity of managing, operating, and synchronizing multiple tools, allowing you to focus on building great applications instead." ] }, diff --git a/docs/docs/integrations/vectorstores/typesense.ipynb b/docs/docs/integrations/vectorstores/typesense.ipynb index a547f5c640f0b..2daf5cf160cac 100644 --- a/docs/docs/integrations/vectorstores/typesense.ipynb +++ b/docs/docs/integrations/vectorstores/typesense.ipynb @@ -6,7 +6,7 @@ "source": [ "# Typesense\n", "\n", - "> [Typesense](https://typesense.org) is an open source, in-memory search engine, that you can either [self-host](https://typesense.org/docs/guide/install-typesense.html#option-2-local-machine-self-hosting) or run on [Typesense Cloud](https://cloud.typesense.org/).\n", + "> [Typesense](https://typesense.org) is an open-source, in-memory search engine, that you can either [self-host](https://typesense.org/docs/guide/install-typesense.html#option-2-local-machine-self-hosting) or run on [Typesense Cloud](https://cloud.typesense.org/).\n", ">\n", "> Typesense focuses on performance by storing the entire index in RAM (with a backup on disk) and also focuses on providing an out-of-the-box developer experience by simplifying available options and setting good defaults.\n", ">\n", diff --git a/docs/docs/integrations/vectorstores/vectara.ipynb b/docs/docs/integrations/vectorstores/vectara.ipynb index e95504860bdee..907588a6dd431 100644 --- a/docs/docs/integrations/vectorstores/vectara.ipynb +++ b/docs/docs/integrations/vectorstores/vectara.ipynb @@ -11,7 +11,7 @@ "See the [Vectara API documentation ](https://docs.vectara.com/docs/) for more information on how to use the API.\n", "\n", "This notebook shows how to use functionality related to the `Vectara`'s integration with langchain.\n", - "Note that unlike many other integrations in this category, Vectara provides an end-to-end managed service for [Grounded Generation](https://vectara.com/grounded-generation/) (aka retrieval agumented generation), which includes:\n", + "Note that unlike many other integrations in this category, Vectara provides an end-to-end managed service for [Grounded Generation](https://vectara.com/grounded-generation/) (aka retrieval augmented generation), which includes:\n", "1. A way to extract text from document files and chunk them into sentences.\n", "2. Its own embeddings model and vector store - each text segment is encoded into a vector embedding and stored in the Vectara internal vector store\n", "3. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching))\n", diff --git a/docs/docs/integrations/vectorstores/zep.ipynb b/docs/docs/integrations/vectorstores/zep.ipynb index 4456aece0be44..b8a3e5fdfb871 100644 --- a/docs/docs/integrations/vectorstores/zep.ipynb +++ b/docs/docs/integrations/vectorstores/zep.ipynb @@ -5,7 +5,7 @@ "source": [ "# Zep\n", "\n", - "Zep is an open source long-term memory store for LLM applications. Zep makes it easy to add relevant documents,\n", + "Zep is an open-source long-term memory store for LLM applications. Zep makes it easy to add relevant documents,\n", "chat history memory & rich user data to your LLM app's prompts.\n", "\n", "**Note:** The `ZepVectorStore` works with `Documents` and is intended to be used as a `Retriever`.\n", diff --git a/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb b/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb index ece210c841552..2d0d4ed2d2661 100644 --- a/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb +++ b/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb @@ -24,7 +24,9 @@ "source": [ "from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n", "from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n", - "from langchain.llms import OpenAI\nfrom langchain.chains import LLMChain\nfrom langchain.prompts import PromptTemplate\n", + "from langchain.llms import OpenAI\n", + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "from langchain.utilities import GoogleSearchAPIWrapper" ] }, @@ -45,7 +47,7 @@ "prompt = PromptTemplate(input_variables=[\"input\", \"chat_history\"], template=template)\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", "readonlymemory = ReadOnlySharedMemory(memory=memory)\n", - "summry_chain = LLMChain(\n", + "summary_chain = LLMChain(\n", " llm=OpenAI(),\n", " prompt=prompt,\n", " verbose=True,\n", @@ -69,7 +71,7 @@ " ),\n", " Tool(\n", " name=\"Summary\",\n", - " func=summry_chain.run,\n", + " func=summary_chain.run,\n", " description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\",\n", " ),\n", "]" @@ -317,7 +319,7 @@ "\n", "prompt = PromptTemplate(input_variables=[\"input\", \"chat_history\"], template=template)\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", - "summry_chain = LLMChain(\n", + "summary_chain = LLMChain(\n", " llm=OpenAI(),\n", " prompt=prompt,\n", " verbose=True,\n", @@ -333,7 +335,7 @@ " ),\n", " Tool(\n", " name=\"Summary\",\n", - " func=summry_chain.run,\n", + " func=summary_chain.run,\n", " description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\",\n", " ),\n", "]\n", diff --git a/docs/docs/modules/agents/tools/multi_input_tool.ipynb b/docs/docs/modules/agents/tools/multi_input_tool.ipynb index 8ea3bb639a070..15376d0d98544 100644 --- a/docs/docs/modules/agents/tools/multi_input_tool.ipynb +++ b/docs/docs/modules/agents/tools/multi_input_tool.ipynb @@ -139,7 +139,7 @@ "source": [ "## Multi-Input Tools with a string format\n", "\n", - "An alternative to the structured tool would be to use the regular `Tool` class and accept a single string. The tool would then have to handle the parsing logic to extract the relavent values from the text, which tightly couples the tool representation to the agent prompt. This is still useful if the underlying language model can't reliably generate structured schema. \n", + "An alternative to the structured tool would be to use the regular `Tool` class and accept a single string. The tool would then have to handle the parsing logic to extract the relevant values from the text, which tightly couples the tool representation to the agent prompt. This is still useful if the underlying language model can't reliably generate structured schema. \n", "\n", "Let's take the multiplication function as an example. In order to use this, we will tell the agent to generate the \"Action Input\" as a comma-separated list of length two. We will then write a thin wrapper that takes a string, splits it into two around a comma, and passes both parsed sides as integers to the multiplication function." ] diff --git a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb index 86aadd97c7e07..11b21c6af22e5 100644 --- a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb @@ -45,7 +45,7 @@ "source": [ "#### Simple usage\n", "\n", - "Specify the LLM to use for query generation, and the retriver will do the rest." + "Specify the LLM to use for query generation, and the retriever will do the rest." ] }, { @@ -155,7 +155,7 @@ " different versions of the given user question to retrieve relevant documents from a vector \n", " database. By generating multiple perspectives on the user question, your goal is to help\n", " the user overcome some of the limitations of the distance-based similarity search. \n", - " Provide these alternative questions seperated by newlines.\n", + " Provide these alternative questions separated by newlines.\n", " Original question: {question}\"\"\",\n", ")\n", "llm = ChatOpenAI(temperature=0)\n", diff --git a/docs/docs/modules/data_connection/retrievers/self_query/myscale_self_query.ipynb b/docs/docs/modules/data_connection/retrievers/self_query/myscale_self_query.ipynb index d437d95f53d6d..0c02dbfcc68ad 100644 --- a/docs/docs/modules/data_connection/retrievers/self_query/myscale_self_query.ipynb +++ b/docs/docs/modules/data_connection/retrievers/self_query/myscale_self_query.ipynb @@ -47,7 +47,7 @@ "id": "83811610-7df3-4ede-b268-68a6a83ba9e2", "metadata": {}, "source": [ - "In this tutorial we follow other example's setting and use `OpenAIEmbeddings`. Remember to get an OpenAI API Key for valid accesss to LLMs." + "In this tutorial we follow other example's setting and use `OpenAIEmbeddings`. Remember to get an OpenAI API Key for valid access to LLMs." ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/web_research.ipynb b/docs/docs/modules/data_connection/retrievers/web_research.ipynb index f880266a0f244..d016e8e4d40bb 100644 --- a/docs/docs/modules/data_connection/retrievers/web_research.ipynb +++ b/docs/docs/modules/data_connection/retrievers/web_research.ipynb @@ -144,14 +144,14 @@ "INFO:langchain.retrievers.web_research:Generating questions for Google Search ...\n", "INFO:langchain.retrievers.web_research:Questions for Google Search (raw): {'question': 'What is Task Decomposition in LLM Powered Autonomous Agents?', 'text': LineList(lines=['1. How do LLM powered autonomous agents utilize task decomposition?\\n', '2. Can you explain the concept of task decomposition in LLM powered autonomous agents?\\n', '3. What role does task decomposition play in the functioning of LLM powered autonomous agents?\\n', '4. Why is task decomposition important for LLM powered autonomous agents?\\n'])}\n", "INFO:langchain.retrievers.web_research:Questions for Google Search: ['1. How do LLM powered autonomous agents utilize task decomposition?\\n', '2. Can you explain the concept of task decomposition in LLM powered autonomous agents?\\n', '3. What role does task decomposition play in the functioning of LLM powered autonomous agents?\\n', '4. Why is task decomposition important for LLM powered autonomous agents?\\n']\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\" , \"What are the subgoals for achieving XYZ?'}]\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\" , \"What are the subgoals for achieving XYZ?\" , (2)\\xa0...'}]\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... In a LLM-powered autonomous agent system, LLM functions as the ... Task decomposition can be done (1) by LLM with simple prompting like\\xa0...'}]\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Agent System Overview In a LLM-powered autonomous agent system, ... Task decomposition can be done (1) by LLM with simple prompting like\\xa0...'}]\n", "INFO:langchain.retrievers.web_research:New URLs to load: []\n" ] @@ -271,14 +271,14 @@ "INFO:langchain.retrievers.web_research:Generating questions for Google Search ...\n", "INFO:langchain.retrievers.web_research:Questions for Google Search (raw): {'question': 'What is Task Decomposition in LLM Powered Autonomous Agents?', 'text': LineList(lines=['1. How do LLM powered autonomous agents use task decomposition?\\n', '2. Why is task decomposition important for LLM powered autonomous agents?\\n', '3. Can you explain the concept of task decomposition in LLM powered autonomous agents?\\n', '4. What are the benefits of task decomposition in LLM powered autonomous agents?\\n'])}\n", "INFO:langchain.retrievers.web_research:Questions for Google Search: ['1. How do LLM powered autonomous agents use task decomposition?\\n', '2. Why is task decomposition important for LLM powered autonomous agents?\\n', '3. Can you explain the concept of task decomposition in LLM powered autonomous agents?\\n', '4. What are the benefits of task decomposition in LLM powered autonomous agents?\\n']\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\" , \"What are the subgoals for achieving XYZ?'}]\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\" , \"What are the subgoals for achieving XYZ?\" , (2)\\xa0...'}]\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\" , \"What are the subgoals for achieving XYZ?'}]\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\" , \"What are the subgoals for achieving XYZ?'}]\n", "INFO:langchain.retrievers.web_research:New URLs to load: ['https://lilianweng.github.io/posts/2023-06-23-agent/']\n", "INFO:langchain.retrievers.web_research:Grabbing most relevant splits from urls ...\n", @@ -490,14 +490,14 @@ "llama_print_timings: total time = 16236.13 ms\n", "INFO:langchain.retrievers.web_research:Questions for Google Search (raw): {'question': 'What is Task Decomposition in LLM Powered Autonomous Agents?', 'text': LineList(lines=['1. How does Task Decomposition work in LLM Powered Autonomous Agents? \\n', '2. What are the benefits of using Task Decomposition in LLM Powered Autonomous Agents? \\n', '3. Can you provide examples of Task Decomposition in LLM Powered Autonomous Agents? \\n', '4. How does Task Decomposition improve the performance of LLM Powered Autonomous Agents? \\n'])}\n", "INFO:langchain.retrievers.web_research:Questions for Google Search: ['1. How does Task Decomposition work in LLM Powered Autonomous Agents? \\n', '2. What are the benefits of using Task Decomposition in LLM Powered Autonomous Agents? \\n', '3. Can you provide examples of Task Decomposition in LLM Powered Autonomous Agents? \\n', '4. How does Task Decomposition improve the performance of LLM Powered Autonomous Agents? \\n']\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\" , \"What are the subgoals for achieving XYZ?'}]\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\" , \"What are the subgoals for achieving XYZ?\" , (2)\\xa0...'}]\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... A complicated task usually involves many steps. An agent needs to know what they are and plan ahead. Task Decomposition#. Chain of thought (CoT;\\xa0...'}]\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Agent System Overview In a LLM-powered autonomous agent system, ... Task decomposition can be done (1) by LLM with simple prompting like\\xa0...'}]\n", "INFO:langchain.retrievers.web_research:New URLs to load: ['https://lilianweng.github.io/posts/2023-06-23-agent/']\n", "INFO:langchain.retrievers.web_research:Grabbing most relevant splits from urls ...\n", diff --git a/docs/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store.ipynb b/docs/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store.ipynb index ec5e681f6c520..24610e16de892 100644 --- a/docs/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store.ipynb +++ b/docs/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store.ipynb @@ -23,7 +23,7 @@ "source": [ "## Feast\n", "\n", - "To start, we will use the popular open source feature store framework [Feast](https://github.com/feast-dev/feast).\n", + "To start, we will use the popular open-source feature store framework [Feast](https://github.com/feast-dev/feast).\n", "\n", "This assumes you have already run the steps in the README around getting started. We will build off of that example in getting started, and create and LLMChain to write a note to a specific driver regarding their up-to-date statistics." ] @@ -220,7 +220,7 @@ "source": [ "## Tecton\n", "\n", - "Above, we showed how you could use Feast, a popular open source and self-managed feature store, with LangChain. Our examples below will show a similar integration using Tecton. Tecton is a fully managed feature platform built to orchestrate the complete ML feature lifecycle, from transformation to online serving, with enterprise-grade SLAs." + "Above, we showed how you could use Feast, a popular open-source and self-managed feature store, with LangChain. Our examples below will show a similar integration using Tecton. Tecton is a fully managed feature platform built to orchestrate the complete ML feature lifecycle, from transformation to online serving, with enterprise-grade SLAs." ] }, { diff --git a/docs/docs/modules/model_io/prompts/prompt_templates/prompt_serialization.ipynb b/docs/docs/modules/model_io/prompts/prompt_templates/prompt_serialization.ipynb index 28bc342fe4124..0d0f12f90a483 100644 --- a/docs/docs/modules/model_io/prompts/prompt_templates/prompt_serialization.ipynb +++ b/docs/docs/modules/model_io/prompts/prompt_templates/prompt_serialization.ipynb @@ -7,13 +7,13 @@ "source": [ "# Serialization\n", "\n", - "It is often preferrable to store prompts not as python code but as files. This can make it easy to share, store, and version prompts. This notebook covers how to do that in LangChain, walking through all the different types of prompts and the different serialization options.\n", + "It is often preferable to store prompts not as python code but as files. This can make it easy to share, store, and version prompts. This notebook covers how to do that in LangChain, walking through all the different types of prompts and the different serialization options.\n", "\n", "At a high level, the following design principles are applied to serialization:\n", "\n", "1. Both JSON and YAML are supported. We want to support serialization methods that are human readable on disk, and YAML and JSON are two of the most popular methods for that. Note that this rule applies to prompts. For other assets, like examples, different serialization methods may be supported.\n", "\n", - "2. We support specifying everything in one file, or storing different components (templates, examples, etc) in different files and referencing them. For some cases, storing everything in file makes the most sense, but for others it is preferrable to split up some of the assets (long templates, large examples, reusable components). LangChain supports both.\n", + "2. We support specifying everything in one file, or storing different components (templates, examples, etc) in different files and referencing them. For some cases, storing everything in file makes the most sense, but for others it is preferable to split up some of the assets (long templates, large examples, reusable components). LangChain supports both.\n", "\n", "There is also a single entry point to load prompts from disk, making it easy to load any type of prompt." ] diff --git a/docs/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining.ipynb b/docs/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining.ipynb index 74316f704f254..c2233c462977e 100644 --- a/docs/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining.ipynb +++ b/docs/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining.ipynb @@ -17,7 +17,7 @@ "source": [ "## String prompt pipelining\n", "\n", - "When working with string prompts, each template is joined togther. You can work with either prompts directly or strings (the first element in the list needs to be a prompt)." + "When working with string prompts, each template is joined together. You can work with either prompts directly or strings (the first element in the list needs to be a prompt)." ] }, { diff --git a/docs/docs/modules/paul_graham_essay.txt b/docs/docs/modules/paul_graham_essay.txt index b572cb6726b85..0bce3830b9968 100644 --- a/docs/docs/modules/paul_graham_essay.txt +++ b/docs/docs/modules/paul_graham_essay.txt @@ -172,15 +172,15 @@ Around this time, in the spring of 2000, I had an idea. It was clear from our ex I got so excited about this idea that I couldn't think about anything else. It seemed obvious that this was the future. I didn't particularly want to start another company, but it was clear that this idea would have to be embodied as one, so I decided to move to Cambridge and start it. I hoped to lure Robert into working on it with me, but there I ran into a hitch. Robert was now a postdoc at MIT, and though he'd made a lot of money the last time I'd lured him into working on one of my schemes, it had also been a huge time sink. So while he agreed that it sounded like a plausible idea, he firmly refused to work on it. -Hmph. Well, I'd do it myself then. I recruited Dan Giffin, who had worked for Viaweb, and two undergrads who wanted summer jobs, and we got to work trying to build what it's now clear is about twenty companies and several open source projects worth of software. The language for defining applications would of course be a dialect of Lisp. But I wasn't so naive as to assume I could spring an overt Lisp on a general audience; we'd hide the parentheses, like Dylan did. +Hmph. Well, I'd do it myself then. I recruited Dan Giffin, who had worked for Viaweb, and two undergrads who wanted summer jobs, and we got to work trying to build what it's now clear is about twenty companies and several open-source projects worth of software. The language for defining applications would of course be a dialect of Lisp. But I wasn't so naive as to assume I could spring an overt Lisp on a general audience; we'd hide the parentheses, like Dylan did. By then there was a name for the kind of company Viaweb was, an "application service provider," or ASP. This name didn't last long before it was replaced by "software as a service," but it was current for long enough that I named this new company after it: it was going to be called Aspra. -I started working on the application builder, Dan worked on network infrastructure, and the two undergrads worked on the first two services (images and phone calls). But about halfway through the summer I realized I really didn't want to run a company — especially not a big one, which it was looking like this would have to be. I'd only started Viaweb because I needed the money. Now that I didn't need money anymore, why was I doing this? If this vision had to be realized as a company, then screw the vision. I'd build a subset that could be done as an open source project. +I started working on the application builder, Dan worked on network infrastructure, and the two undergrads worked on the first two services (images and phone calls). But about halfway through the summer I realized I really didn't want to run a company — especially not a big one, which it was looking like this would have to be. I'd only started Viaweb because I needed the money. Now that I didn't need money anymore, why was I doing this? If this vision had to be realized as a company, then screw the vision. I'd build a subset that could be done as an open-source project. Much to my surprise, the time I spent working on this stuff was not wasted after all. After we started Y Combinator, I would often encounter startups working on parts of this new architecture, and it was very useful to have spent so much time thinking about it and even trying to write some of it. -The subset I would build as an open source project was the new Lisp, whose parentheses I now wouldn't even have to hide. A lot of Lisp hackers dream of building a new Lisp, partly because one of the distinctive features of the language is that it has dialects, and partly, I think, because we have in our minds a Platonic form of Lisp that all existing dialects fall short of. I certainly did. So at the end of the summer Dan and I switched to working on this new dialect of Lisp, which I called Arc, in a house I bought in Cambridge. +The subset I would build as an open-source project was the new Lisp, whose parentheses I now wouldn't even have to hide. A lot of Lisp hackers dream of building a new Lisp, partly because one of the distinctive features of the language is that it has dialects, and partly, I think, because we have in our minds a Platonic form of Lisp that all existing dialects fall short of. I certainly did. So at the end of the summer Dan and I switched to working on this new dialect of Lisp, which I called Arc, in a house I bought in Cambridge. The following spring, lightning struck. I was invited to give a talk at a Lisp conference, so I gave one about how we'd used Lisp at Viaweb. Afterward I put a postscript file of this talk online, on paulgraham.com, which I'd created years before using Viaweb but had never used for anything. In one day it got 30,000 page views. What on earth had happened? The referring urls showed that someone had posted it on Slashdot. [10] diff --git a/docs/docs/modules/state_of_the_union.txt b/docs/docs/modules/state_of_the_union.txt index d50175de40e70..b453aacdae3b6 100644 --- a/docs/docs/modules/state_of_the_union.txt +++ b/docs/docs/modules/state_of_the_union.txt @@ -495,7 +495,7 @@ I spoke with their families and told them that we are forever in debt for their I’ve worked on these issues a long time. -I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. +I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. So let’s not abandon our streets. Or choose between safety and equal justice. diff --git a/docs/docs/use_cases/apis.ipynb b/docs/docs/use_cases/apis.ipynb index 789e01e4e3674..0072909e5805e 100644 --- a/docs/docs/use_cases/apis.ipynb +++ b/docs/docs/use_cases/apis.ipynb @@ -16,7 +16,7 @@ "id": "a15e6a18", "metadata": {}, "source": [ - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/apis.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/apis.ipynb)\n", "\n", "## Use case \n", "\n", diff --git a/docs/docs/use_cases/chatbots.ipynb b/docs/docs/use_cases/chatbots.ipynb index cfad32fc815e0..0af38db7e6d31 100644 --- a/docs/docs/use_cases/chatbots.ipynb +++ b/docs/docs/use_cases/chatbots.ipynb @@ -16,7 +16,7 @@ "id": "ee7f95e4", "metadata": {}, "source": [ - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/chatbots.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/chatbots.ipynb)\n", "\n", "## Use case\n", "\n", diff --git a/docs/docs/use_cases/data_generation.ipynb b/docs/docs/use_cases/data_generation.ipynb index 5b862108912e7..9b543d1a039ff 100644 --- a/docs/docs/use_cases/data_generation.ipynb +++ b/docs/docs/use_cases/data_generation.ipynb @@ -17,7 +17,7 @@ "id": "aa3571cc", "metadata": {}, "source": [ - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/data_generation.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/data_generation.ipynb)\n", "\n", "## Use case\n", "\n", diff --git a/docs/docs/use_cases/extraction.ipynb b/docs/docs/use_cases/extraction.ipynb index 6fe7d0fc92ee5..9403afd6c2dfc 100644 --- a/docs/docs/use_cases/extraction.ipynb +++ b/docs/docs/use_cases/extraction.ipynb @@ -576,7 +576,7 @@ "\n", "### Going deeper\n", "\n", - "* The [output parser](/docs/modules/model_io/output_parsers/) documentation includes various parser examples for specific types (e.g., lists, datetimne, enum, etc). \n", + "* The [output parser](/docs/modules/model_io/output_parsers/) documentation includes various parser examples for specific types (e.g., lists, datetime, enum, etc). \n", "* [JSONFormer](/docs/integrations/llms/jsonformer_experimental) offers another way for structured decoding of a subset of the JSON Schema.\n", "* [Kor](https://eyurtsev.github.io/kor/) is another library for extraction where schema and examples can be provided to the LLM." ] diff --git a/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb b/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb index 7a63f78888de5..30144d0330f26 100644 --- a/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb +++ b/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb @@ -7,7 +7,7 @@ "source": [ "# Diffbot Graph Transformer\n", "\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb)\n", "\n", "## Use case\n", "\n", diff --git a/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb b/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb index e82da530ffc58..411a2ef056801 100644 --- a/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb @@ -9,7 +9,7 @@ "source": [ "# ArangoDB QA chain\n", "\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/arangodb/interactive_tutorials/blob/master/notebooks/Langchain.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/arangodb/interactive_tutorials/blob/master/notebooks/Langchain.ipynb)\n", "\n", "This notebook shows how to use LLMs to provide a natural language interface to an [ArangoDB](https://github.com/arangodb/arangodb#readme) database." ] diff --git a/docs/docs/use_cases/qa_structured/sql.ipynb b/docs/docs/use_cases/qa_structured/sql.ipynb index a3f8e3750037e..032b4eefb4a38 100644 --- a/docs/docs/use_cases/qa_structured/sql.ipynb +++ b/docs/docs/use_cases/qa_structured/sql.ipynb @@ -14,13 +14,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/qa_structured/sql.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/qa_structured/sql.ipynb)\n", "\n", "## Use case\n", "\n", "Enterprise data is often stored in SQL databases.\n", "\n", - "LLMs make it possible to interact with SQL databases using natural langugae.\n", + "LLMs make it possible to interact with SQL databases using natural language.\n", "\n", "LangChain offers SQL Chains and Agents to build and run SQL queries based on natural language prompts. \n", "\n", @@ -275,7 +275,7 @@ "\n", "{table_info}.\n", "\n", - "Some examples of SQL queries that corrsespond to questions are:\n", + "Some examples of SQL queries that correspond to questions are:\n", "\n", "{few_shot_examples}\n", "\n", @@ -950,7 +950,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we can proceed with creating the custom **retreiver tool** and the final agent:" + "Now we can proceed with creating the custom **retriever tool** and the final agent:" ] }, { diff --git a/docs/docs/use_cases/question_answering/code_understanding.ipynb b/docs/docs/use_cases/question_answering/code_understanding.ipynb index 9bdf9c49730e8..500d4177d2bf1 100644 --- a/docs/docs/use_cases/question_answering/code_understanding.ipynb +++ b/docs/docs/use_cases/question_answering/code_understanding.ipynb @@ -57,7 +57,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We'lll follow the structure of [this notebook](https://github.com/cristobalcl/LearningLangChain/blob/master/notebooks/04%20-%20QA%20with%20code.ipynb) and employ [context aware code splitting](https://python.langchain.com/docs/integrations/document_loaders/source_code)." + "We'll follow the structure of [this notebook](https://github.com/cristobalcl/LearningLangChain/blob/master/notebooks/04%20-%20QA%20with%20code.ipynb) and employ [context aware code splitting](https://python.langchain.com/docs/integrations/document_loaders/source_code)." ] }, { @@ -331,7 +331,7 @@ "source": [ "The can look at the [LangSmith trace](https://smith.langchain.com/public/2b23045f-4e49-4d2d-8980-dec85259af36/r) to see what is happening under the hood:\n", "\n", - "* In particular, the code well structured and kept together in the retrival output\n", + "* In particular, the code well structured and kept together in the retrieval output\n", "* The retrieved code and chat history are passed to the LLM for answer distillation\n", "\n", "![Image description](/img/code_retrieval.png)" diff --git a/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb b/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb index ad8f202e77d0d..6f1e6615a6bb9 100644 --- a/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb +++ b/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb @@ -269,7 +269,7 @@ "Invoking: `search_state_of_union` with `{'query': 'Kentaji Brown Jackson'}`\n", "\n", "\n", - "\u001b[0m\u001b[36;1m\u001b[1;3m[Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../../../docs/docs/modules/state_of_the_union.txt'}), Document(page_content='One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \\n\\nWhen they came home, many of the world’s fittest and best trained warriors were never the same. \\n\\nHeadaches. Numbness. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. \\n\\nI know. \\n\\nOne of those soldiers was my son Major Beau Biden. \\n\\nWe don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \\n\\nBut I’m committed to finding out everything we can. \\n\\nCommitted to military families like Danielle Robinson from Ohio. \\n\\nThe widow of Sergeant First Class Heath Robinson. \\n\\nHe was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \\n\\nStationed near Baghdad, just yards from burn pits the size of football fields. \\n\\nHeath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter.', metadata={'source': '../../../../../docs/docs/modules/state_of_the_union.txt'}), Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \\n\\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \\n\\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \\n\\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \\n\\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \\n\\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.', metadata={'source': '../../../../../docs/docs/modules/state_of_the_union.txt'}), Document(page_content='We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \\n\\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \\n\\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \\n\\nOfficer Mora was 27 years old. \\n\\nOfficer Rivera was 22. \\n\\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \\n\\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \\n\\nI’ve worked on these issues a long time. \\n\\nI know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.', metadata={'source': '../../../../../docs/docs/modules/state_of_the_union.txt'})]\u001b[0m\u001b[32;1m\u001b[1;3mIn the most recent state of the union, the President mentioned Kentaji Brown Jackson. The President nominated Circuit Court of Appeals Judge Ketanji Brown Jackson to serve on the United States Supreme Court. The President described Judge Ketanji Brown Jackson as one of our nation's top legal minds who will continue Justice Breyer's legacy of excellence.\u001b[0m\n", + "\u001b[0m\u001b[36;1m\u001b[1;3m[Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../../../docs/docs/modules/state_of_the_union.txt'}), Document(page_content='One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \\n\\nWhen they came home, many of the world’s fittest and best trained warriors were never the same. \\n\\nHeadaches. Numbness. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. \\n\\nI know. \\n\\nOne of those soldiers was my son Major Beau Biden. \\n\\nWe don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \\n\\nBut I’m committed to finding out everything we can. \\n\\nCommitted to military families like Danielle Robinson from Ohio. \\n\\nThe widow of Sergeant First Class Heath Robinson. \\n\\nHe was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \\n\\nStationed near Baghdad, just yards from burn pits the size of football fields. \\n\\nHeath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter.', metadata={'source': '../../../../../docs/docs/modules/state_of_the_union.txt'}), Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \\n\\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \\n\\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \\n\\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \\n\\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \\n\\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.', metadata={'source': '../../../../../docs/docs/modules/state_of_the_union.txt'}), Document(page_content='We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \\n\\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \\n\\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \\n\\nOfficer Mora was 27 years old. \\n\\nOfficer Rivera was 22. \\n\\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \\n\\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \\n\\nI’ve worked on these issues a long time. \\n\\nI know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.', metadata={'source': '../../../../../docs/docs/modules/state_of_the_union.txt'})]\u001b[0m\u001b[32;1m\u001b[1;3mIn the most recent state of the union, the President mentioned Kentaji Brown Jackson. The President nominated Circuit Court of Appeals Judge Ketanji Brown Jackson to serve on the United States Supreme Court. The President described Judge Ketanji Brown Jackson as one of our nation's top legal minds who will continue Justice Breyer's legacy of excellence.\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] diff --git a/docs/docs/use_cases/question_answering/document-context-aware-QA.ipynb b/docs/docs/use_cases/question_answering/document-context-aware-QA.ipynb index 54fe471375c90..f2237c94581f1 100644 --- a/docs/docs/use_cases/question_answering/document-context-aware-QA.ipynb +++ b/docs/docs/use_cases/question_answering/document-context-aware-QA.ipynb @@ -94,7 +94,7 @@ "source": [ "This sets us up well do perform metadata filtering based on the document structure.\n", "\n", - "Let's bring this all togther by building a vectorstore first." + "Let's bring this all together by building a vectorstore first." ] }, { @@ -151,7 +151,7 @@ "]\n", "document_content_description = \"Major sections of the document\"\n", "\n", - "# Define self query retriver\n", + "# Define self query retriever\n", "llm = OpenAI(temperature=0)\n", "retriever = SelfQueryRetriever.from_llm(\n", " llm, vectorstore, document_content_description, metadata_field_info, verbose=True\n", @@ -272,7 +272,7 @@ "id": "1af7720f", "metadata": {}, "source": [ - "Now, we can create chat or Q+A apps that are aware of the explict document structure. \n", + "Now, we can create chat or Q+A apps that are aware of the explicit document structure. \n", "\n", "The ability to retain document structure for metadata filtering can be helpful for complicated or longer documents." ] diff --git a/docs/docs/use_cases/question_answering/index.ipynb b/docs/docs/use_cases/question_answering/index.ipynb index f27abd5102c34..4669f690617b5 100644 --- a/docs/docs/use_cases/question_answering/index.ipynb +++ b/docs/docs/use_cases/question_answering/index.ipynb @@ -7,7 +7,7 @@ "source": [ "# Retrieval-augmented generation (RAG)\n", "\n", - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/question_answering/qa.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/question_answering/qa.ipynb)\n", "\n", "## Use case\n", "Suppose you have some text documents (PDF, blog, Notion pages, etc.) and want to ask questions related to the contents of those documents. \n", diff --git a/docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb b/docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb index 9f4b8a816acfc..f2139ff696a50 100644 --- a/docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb +++ b/docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb @@ -9,7 +9,7 @@ "\n", "The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), and [GPT4All](https://github.com/nomic-ai/gpt4all) underscore the importance of running LLMs locally.\n", "\n", - "LangChain has [integrations](https://integrations.langchain.com/) with many open source LLMs that can be run locally.\n", + "LangChain has [integrations](https://integrations.langchain.com/) with many open-source LLMs that can be run locally.\n", "\n", "See [here](docs/guides/local_llms) for setup instructions for these LLMs. \n", "\n", @@ -35,7 +35,7 @@ "id": "5e7543fa", "metadata": {}, "source": [ - "Load and split an example docucment.\n", + "Load and split an example document.\n", "\n", "We'll use a blog post on agents as an example." ] diff --git a/docs/docs/use_cases/question_answering/multiple_retrieval.ipynb b/docs/docs/use_cases/question_answering/multiple_retrieval.ipynb index 7d197acb673a5..8847a2b79e89e 100644 --- a/docs/docs/use_cases/question_answering/multiple_retrieval.ipynb +++ b/docs/docs/use_cases/question_answering/multiple_retrieval.ipynb @@ -9,7 +9,7 @@ "\n", "Often times you may want to do retrieval over multiple sources. These can be different vectorstores (where one contains information about topic X and the other contains info about topic Y). They could also be completely different databases altogether!\n", "\n", - "A key part is is doing as much of the retrieval in parrelel as possible. This will keep the latency as low as possible. Luckily, [LangChain Expression Language](../../) supports parrellism out of the box.\n", + "A key part is is doing as much of the retrieval in parallel as possible. This will keep the latency as low as possible. Luckily, [LangChain Expression Language](../../) supports parallelism out of the box.\n", "\n", "Let's take a look where we do retrieval over a SQL database and a vectorstore." ] diff --git a/docs/docs/use_cases/summarization.ipynb b/docs/docs/use_cases/summarization.ipynb index 01bf1b2331423..fd22cdc9b04f6 100644 --- a/docs/docs/use_cases/summarization.ipynb +++ b/docs/docs/use_cases/summarization.ipynb @@ -16,7 +16,7 @@ "id": "cf13f702", "metadata": {}, "source": [ - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/summarization.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/summarization.ipynb)\n", "\n", "## Use case\n", "\n", diff --git a/docs/docs/use_cases/tagging.ipynb b/docs/docs/use_cases/tagging.ipynb index f24ec43e38cd9..7b6b5280d9ff0 100644 --- a/docs/docs/use_cases/tagging.ipynb +++ b/docs/docs/use_cases/tagging.ipynb @@ -16,7 +16,7 @@ "id": "a0507a4b", "metadata": {}, "source": [ - "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/tagging.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/tagging.ipynb)\n", "\n", "## Use case\n", "\n", diff --git a/docs/docs/use_cases/web_scraping.ipynb b/docs/docs/use_cases/web_scraping.ipynb index fc01c3ecfa810..5802eec8fc6a8 100644 --- a/docs/docs/use_cases/web_scraping.ipynb +++ b/docs/docs/use_cases/web_scraping.ipynb @@ -537,10 +537,10 @@ "INFO:langchain.retrievers.web_research:Generating questions for Google Search ...\n", "INFO:langchain.retrievers.web_research:Questions for Google Search (raw): {'question': 'How do LLM Powered Autonomous Agents work?', 'text': LineList(lines=['1. What is the functioning principle of LLM Powered Autonomous Agents?\\n', '2. How do LLM Powered Autonomous Agents operate?\\n'])}\n", "INFO:langchain.retrievers.web_research:Questions for Google Search: ['1. What is the functioning principle of LLM Powered Autonomous Agents?\\n', '2. How do LLM Powered Autonomous Agents operate?\\n']\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': 'LLM Powered Autonomous Agents | Hacker News', 'link': 'https://news.ycombinator.com/item?id=36488871', 'snippet': 'Jun 26, 2023 ... Exactly. A temperature of 0 means you always pick the highest probability token (i.e. the \"max\" function), while a temperature of 1 means you\\xa0...'}]\n", - "INFO:langchain.retrievers.web_research:Searching for relevat urls ...\n", + "INFO:langchain.retrievers.web_research:Searching for relevant urls ...\n", "INFO:langchain.retrievers.web_research:Search results: [{'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'link': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'snippet': 'Jun 23, 2023 ... Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\" , \"What are the subgoals for achieving XYZ?\" , (2) by\\xa0...'}]\n", "INFO:langchain.retrievers.web_research:New URLs to load: []\n", "INFO:langchain.retrievers.web_research:Grabbing most relevant splits from urls...\n" @@ -577,7 +577,7 @@ "source": [ "### Going deeper \n", "\n", - "* Here's a [app](https://github.com/langchain-ai/web-explorer/tree/main) that wraps this retriver with a lighweight UI." + "* Here's a [app](https://github.com/langchain-ai/web-explorer/tree/main) that wraps this retriever with a lighweight UI." ], "id": "7a940df1" }, diff --git a/docs/snippets/modules/agents/agent_types/structured_chat.mdx b/docs/snippets/modules/agents/agent_types/structured_chat.mdx index e7b7d6e5dea97..6abbac5c4dbcb 100644 --- a/docs/snippets/modules/agents/agent_types/structured_chat.mdx +++ b/docs/snippets/modules/agents/agent_types/structured_chat.mdx @@ -126,7 +126,7 @@ print(response) TL;DR - We recently open-sourced an auto-evaluator tool for grading LLM question-answer chains. We are now releasing an open source, free to use hosted app and API to expand usability. Below we discuss a few opportunities to further improve May 1, 2023 5 min read Callbacks Improvements TL;DR: We're announcing improvements to our callbacks system, which powers logging, tracing, streaming output, and some awesome third-party integrations. This will better support concurrent runs with independent callbacks, tracing of deeply nested trees of LangChain components, and callback handlers scoped to a single request (which is super useful for May 1, 2023 3 min read Unleashing the power of AI Collaboration with Parallelized LLM Agent Actor Trees Editor's note: the following is a guest blog post from Cyrus at Shaman AI. We use guest blog posts to highlight interesting and novel applications, and this is certainly that. There's been a lot of talk about agents recently, but most have been discussions around a single agent. If multiple Apr 28, 2023 4 min read Gradio & LLM Agents Editor's note: this is a guest blog post from Freddy Boulton, a software engineer at Gradio. We're excited to share this post because it brings a large number of exciting new tools into the ecosystem. Agents are largely defined by the tools they have, so to be able to equip Apr 23, 2023 4 min read RecAlign - The smart content filter for social media feed [Editor's Note] This is a guest post by Tian Jin. We are highlighting this application as we think it is a novel use case. Specifically, we think recommendation systems are incredibly impactful in our everyday lives and there has not been a ton of discourse on how LLMs will impact Apr 22, 2023 3 min read Improving Document Retrieval with Contextual Compression Note: This post assumes some familiarity with LangChain and is moderately technical. + We recently open-sourced an auto-evaluator tool for grading LLM question-answer chains. We are now releasing an open-source, free to use hosted app and API to expand usability. Below we discuss a few opportunities to further improve May 1, 2023 5 min read Callbacks Improvements TL;DR: We're announcing improvements to our callbacks system, which powers logging, tracing, streaming output, and some awesome third-party integrations. This will better support concurrent runs with independent callbacks, tracing of deeply nested trees of LangChain components, and callback handlers scoped to a single request (which is super useful for May 1, 2023 3 min read Unleashing the power of AI Collaboration with Parallelized LLM Agent Actor Trees Editor's note: the following is a guest blog post from Cyrus at Shaman AI. We use guest blog posts to highlight interesting and novel applications, and this is certainly that. There's been a lot of talk about agents recently, but most have been discussions around a single agent. If multiple Apr 28, 2023 4 min read Gradio & LLM Agents Editor's note: this is a guest blog post from Freddy Boulton, a software engineer at Gradio. We're excited to share this post because it brings a large number of exciting new tools into the ecosystem. Agents are largely defined by the tools they have, so to be able to equip Apr 23, 2023 4 min read RecAlign - The smart content filter for social media feed [Editor's Note] This is a guest post by Tian Jin. We are highlighting this application as we think it is a novel use case. Specifically, we think recommendation systems are incredibly impactful in our everyday lives and there has not been a ton of discourse on how LLMs will impact Apr 22, 2023 3 min read Improving Document Retrieval with Contextual Compression Note: This post assumes some familiarity with LangChain and is moderately technical. 💡 TL;DR: We’ve introduced a new abstraction and a new document Retriever to facilitate the post-processing of retrieved documents. Specifically, the new abstraction makes it easy to take a set of retrieved documents and extract from them Apr 20, 2023 3 min read Autonomous Agents & Agent Simulations Over the past two weeks, there has been a massive increase in using LLMs in an agentic manner. Specifically, projects like AutoGPT, BabyAGI, CAMEL, and Generative Agents have popped up. The LangChain community has now implemented some parts of all of those projects in the LangChain framework. While researching and Apr 18, 2023 7 min read AI-Powered Medical Knowledge: Revolutionizing Care for Rare Conditions [Editor's Note]: This is a guest post by Jack Simon, who recently participated in a hackathon at Williams College. He built a LangChain-powered chatbot focused on appendiceal cancer, aiming to make specialized knowledge more accessible to those in need. If you are interested in building a chatbot for another rare Apr 17, 2023 3 min read Auto-Eval of Question-Answering Tasks By Lance Martin diff --git a/docs/snippets/modules/data_connection/document_transformers/text_splitters/code_splitter.mdx b/docs/snippets/modules/data_connection/document_transformers/text_splitters/code_splitter.mdx index 2087fc9e4cafd..008b161c0aa2a 100644 --- a/docs/snippets/modules/data_connection/document_transformers/text_splitters/code_splitter.mdx +++ b/docs/snippets/modules/data_connection/document_transformers/text_splitters/code_splitter.mdx @@ -158,7 +158,7 @@ markdown_text = """ pip install langchain ``` -As an open source project in a rapidly developing field, we are extremely open to contributions. +As an open-source project in a rapidly developing field, we are extremely open to contributions. """ ```` @@ -180,7 +180,7 @@ md_docs Document(page_content="```bash\n# Hopefully this code block isn't split", metadata={}), Document(page_content='pip install langchain', metadata={}), Document(page_content='```', metadata={}), - Document(page_content='As an open source project in a rapidly developing field, we', metadata={}), + Document(page_content='As an open-source project in a rapidly developing field, we', metadata={}), Document(page_content='are extremely open to contributions.', metadata={})] ``` @@ -276,7 +276,7 @@ html_text = """

⚡ Building applications with LLMs through composability ⚡

- As an open source project in a rapidly developing field, we are extremely open to contributions. + As an open-source project in a rapidly developing field, we are extremely open to contributions.
@@ -305,7 +305,7 @@ html_docs Document(page_content='
\n

🦜️🔗 LangChain

', metadata={}), Document(page_content='

⚡ Building applications with LLMs through composability ⚡', metadata={}), Document(page_content='

\n
', metadata={}), - Document(page_content='
\n As an open source project in a rapidly dev', metadata={}), + Document(page_content='
\n As an open-source project in a rapidly dev', metadata={}), Document(page_content='eloping field, we are extremely open to contributions.', metadata={}), Document(page_content='
\n \n', metadata={})] ``` diff --git a/docs/snippets/modules/data_connection/vectorstores/async.mdx b/docs/snippets/modules/data_connection/vectorstores/async.mdx index 007f25b14fab7..105a50333467d 100644 --- a/docs/snippets/modules/data_connection/vectorstores/async.mdx +++ b/docs/snippets/modules/data_connection/vectorstores/async.mdx @@ -83,7 +83,7 @@ I spoke with their families and told them that we are forever in debt for their I’ve worked on these issues a long time. -I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. +I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. ```