diff --git a/.github/workflows/_integration_test.yml b/.github/workflows/_integration_test.yml index e6c8296c59c19..e3507c4880c33 100644 --- a/.github/workflows/_integration_test.yml +++ b/.github/workflows/_integration_test.yml @@ -37,12 +37,6 @@ jobs: shell: bash run: poetry install --with test,test_integration - - name: 'Authenticate to Google Cloud' - id: 'auth' - uses: google-github-actions/auth@v2 - with: - credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}' - - name: Run integration tests shell: bash env: @@ -50,7 +44,6 @@ jobs: ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }} TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} run: | make integration_tests diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml index 46a847276ae51..a5837927996b7 100644 --- a/.github/workflows/_release.yml +++ b/.github/workflows/_release.yml @@ -1,5 +1,5 @@ name: release -run-name: Release ${{ inputs.working-directory }} by @${{ github.actor }} + on: workflow_call: inputs: @@ -149,12 +149,6 @@ jobs: run: make tests working-directory: ${{ inputs.working-directory }} - - name: 'Authenticate to Google Cloud' - id: 'auth' - uses: google-github-actions/auth@v2 - with: - credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}' - - name: Run integration tests if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }} env: @@ -162,7 +156,6 @@ jobs: ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }} TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} run: make integration_tests working-directory: ${{ inputs.working-directory }} diff --git a/cookbook/LLaMA2_sql_chat.ipynb b/cookbook/LLaMA2_sql_chat.ipynb index 3b697f314de82..f31a7fd58f4d9 100644 --- a/cookbook/LLaMA2_sql_chat.ipynb +++ b/cookbook/LLaMA2_sql_chat.ipynb @@ -149,7 +149,7 @@ ], "source": [ "# Prompt\n", - "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain.prompts import ChatPromptTemplate\n", "\n", "# Update the template based on the type of SQL Database like MySQL, Microsoft SQL Server and so on\n", "template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n", @@ -278,7 +278,7 @@ "source": [ "# Prompt\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "\n", "template = \"\"\"Given an input question, convert it to a SQL query. No pre-amble. Based on the table schema below, write a SQL query that would answer the user's question:\n", "{schema}\n", diff --git a/cookbook/Multi_modal_RAG.ipynb b/cookbook/Multi_modal_RAG.ipynb index 79f311328a143..c2c12ef87e17a 100644 --- a/cookbook/Multi_modal_RAG.ipynb +++ b/cookbook/Multi_modal_RAG.ipynb @@ -198,9 +198,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import ChatOpenAI\n", "\n", "\n", "# Generate summaries of text elements\n", @@ -355,9 +355,9 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "\n", "def create_multi_vector_retriever(\n", diff --git a/cookbook/Semi_Structured_RAG.ipynb b/cookbook/Semi_Structured_RAG.ipynb index 2429413558ee2..0a9117337df3f 100644 --- a/cookbook/Semi_Structured_RAG.ipynb +++ b/cookbook/Semi_Structured_RAG.ipynb @@ -235,9 +235,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import ChatOpenAI" + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.output_parsers import StrOutputParser" ] }, { @@ -320,9 +320,9 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n", diff --git a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb index 82ce6faf7f3df..ffcf351d092a7 100644 --- a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb +++ b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb @@ -211,9 +211,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import ChatOpenAI" + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.output_parsers import StrOutputParser" ] }, { @@ -375,9 +375,9 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n", diff --git a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb index 19b9218ae76d9..2a57c329bf6aa 100644 --- a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb +++ b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb @@ -209,9 +209,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.prompts import ChatPromptTemplate\n", "from langchain_community.chat_models import ChatOllama\n", - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate" + "from langchain_core.output_parsers import StrOutputParser" ] }, { diff --git a/cookbook/advanced_rag_eval.ipynb b/cookbook/advanced_rag_eval.ipynb index 45d424b452d4c..1f8d84c41be7b 100644 --- a/cookbook/advanced_rag_eval.ipynb +++ b/cookbook/advanced_rag_eval.ipynb @@ -132,8 +132,8 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "baseline = Chroma.from_texts(\n", " texts=all_splits_pypdf_texts,\n", @@ -160,9 +160,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import ChatOpenAI\n", "\n", "# Prompt\n", "prompt_text = \"\"\"You are an assistant tasked with summarizing tables and text for retrieval. \\\n", diff --git a/cookbook/agent_vectorstore.ipynb b/cookbook/agent_vectorstore.ipynb index 388e4702a3a6a..6b9a88e4484ba 100644 --- a/cookbook/agent_vectorstore.ipynb +++ b/cookbook/agent_vectorstore.ipynb @@ -29,8 +29,9 @@ "source": [ "from langchain.chains import RetrievalQA\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores import Chroma\n", - "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "llm = OpenAI(temperature=0)" ] @@ -160,7 +161,7 @@ "source": [ "# Import things that are needed generically\n", "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/analyze_document.ipynb b/cookbook/analyze_document.ipynb index 4b872d823a74e..9b61507c1eccf 100644 --- a/cookbook/analyze_document.ipynb +++ b/cookbook/analyze_document.ipynb @@ -29,7 +29,7 @@ "outputs": [], "source": [ "from langchain.chains import AnalyzeDocumentChain\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)" ] diff --git a/cookbook/autogpt/autogpt.ipynb b/cookbook/autogpt/autogpt.ipynb index 0d4930c4837c7..911c8c3c86c45 100644 --- a/cookbook/autogpt/autogpt.ipynb +++ b/cookbook/autogpt/autogpt.ipynb @@ -62,8 +62,8 @@ "outputs": [], "source": [ "from langchain.docstore import InMemoryDocstore\n", - "from langchain_community.vectorstores import FAISS\n", - "from langchain_openai import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { @@ -100,8 +100,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_experimental.autonomous_agents import AutoGPT\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_experimental.autonomous_agents import AutoGPT" ] }, { diff --git a/cookbook/autogpt/marathon_times.ipynb b/cookbook/autogpt/marathon_times.ipynb index 44f2445e640b5..d998630b7088a 100644 --- a/cookbook/autogpt/marathon_times.ipynb +++ b/cookbook/autogpt/marathon_times.ipynb @@ -41,8 +41,8 @@ "import pandas as pd\n", "from langchain.docstore.document import Document\n", "from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.autonomous_agents import AutoGPT\n", - "from langchain_openai import ChatOpenAI\n", "\n", "# Needed synce jupyter runs an async eventloop\n", "nest_asyncio.apply()" @@ -311,8 +311,8 @@ "# Memory\n", "import faiss\n", "from langchain.docstore import InMemoryDocstore\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings_model = OpenAIEmbeddings()\n", "embedding_size = 1536\n", diff --git a/cookbook/baby_agi.ipynb b/cookbook/baby_agi.ipynb index 9545632a42fcd..9583eadba6d62 100644 --- a/cookbook/baby_agi.ipynb +++ b/cookbook/baby_agi.ipynb @@ -31,8 +31,9 @@ "source": [ "from typing import Optional\n", "\n", - "from langchain_experimental.autonomous_agents import BabyAGI\n", - "from langchain_openai import OpenAI, OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", + "from langchain_experimental.autonomous_agents import BabyAGI" ] }, { diff --git a/cookbook/baby_agi_with_agent.ipynb b/cookbook/baby_agi_with_agent.ipynb index 13476e53196c2..393b026b87b4d 100644 --- a/cookbook/baby_agi_with_agent.ipynb +++ b/cookbook/baby_agi_with_agent.ipynb @@ -29,8 +29,9 @@ "\n", "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_experimental.autonomous_agents import BabyAGI\n", - "from langchain_openai import OpenAI, OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", + "from langchain_experimental.autonomous_agents import BabyAGI" ] }, { @@ -107,8 +108,8 @@ "source": [ "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", + "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities import SerpAPIWrapper\n", - "from langchain_openai import OpenAI\n", "\n", "todo_prompt = PromptTemplate.from_template(\n", " \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n", diff --git a/cookbook/camel_role_playing.ipynb b/cookbook/camel_role_playing.ipynb index ab8f44adf99b0..2feffde2007b3 100644 --- a/cookbook/camel_role_playing.ipynb +++ b/cookbook/camel_role_playing.ipynb @@ -46,7 +46,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/causal_program_aided_language_model.ipynb b/cookbook/causal_program_aided_language_model.ipynb index 0f1e5fb8c32b4..5e5b3c0b57413 100644 --- a/cookbook/causal_program_aided_language_model.ipynb +++ b/cookbook/causal_program_aided_language_model.ipynb @@ -47,9 +47,9 @@ "outputs": [], "source": [ "from IPython.display import SVG\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.cpal.base import CPALChain\n", "from langchain_experimental.pal_chain import PALChain\n", - "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0, max_tokens=512)\n", "cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)\n", diff --git a/cookbook/code-analysis-deeplake.ipynb b/cookbook/code-analysis-deeplake.ipynb index 67c1ecbe39f3f..4b5ea3ae4c50b 100644 --- a/cookbook/code-analysis-deeplake.ipynb +++ b/cookbook/code-analysis-deeplake.ipynb @@ -657,7 +657,7 @@ } ], "source": [ - "from langchain_openai import OpenAIEmbeddings\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "embeddings" @@ -834,7 +834,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model_name=\"gpt-3.5-turbo-0613\"\n", diff --git a/cookbook/custom_agent_with_plugin_retrieval.ipynb b/cookbook/custom_agent_with_plugin_retrieval.ipynb index 9131599da0fab..7046a9fba62bd 100644 --- a/cookbook/custom_agent_with_plugin_retrieval.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval.ipynb @@ -44,8 +44,8 @@ "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", "from langchain_community.agent_toolkits import NLAToolkit\n", - "from langchain_community.tools.plugin import AIPlugin\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import OpenAI\n", + "from langchain_community.tools.plugin import AIPlugin" ] }, { @@ -115,8 +115,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.vectorstores import FAISS\n", - "from langchain_openai import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb index 30fc61712da6b..e8b3611761edf 100644 --- a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb @@ -69,8 +69,8 @@ "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", "from langchain_community.agent_toolkits import NLAToolkit\n", - "from langchain_community.tools.plugin import AIPlugin\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import OpenAI\n", + "from langchain_community.tools.plugin import AIPlugin" ] }, { @@ -139,8 +139,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.vectorstores import FAISS\n", - "from langchain_openai import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/cookbook/custom_agent_with_tool_retrieval.ipynb b/cookbook/custom_agent_with_tool_retrieval.ipynb index 7981a13716ba0..28932f59b22dc 100644 --- a/cookbook/custom_agent_with_tool_retrieval.ipynb +++ b/cookbook/custom_agent_with_tool_retrieval.ipynb @@ -41,8 +41,8 @@ "from langchain.chains import LLMChain\n", "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", - "from langchain_community.utilities import SerpAPIWrapper\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import OpenAI\n", + "from langchain_community.utilities import SerpAPIWrapper" ] }, { @@ -104,8 +104,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.vectorstores import FAISS\n", - "from langchain_openai import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/cookbook/databricks_sql_db.ipynb b/cookbook/databricks_sql_db.ipynb index 08faf009653d4..6cb5da4fe9e2e 100644 --- a/cookbook/databricks_sql_db.ipynb +++ b/cookbook/databricks_sql_db.ipynb @@ -93,7 +93,7 @@ "outputs": [], "source": [ "# Creating a OpenAI Chat LLM wrapper\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")" ] diff --git a/cookbook/deeplake_semantic_search_over_chat.ipynb b/cookbook/deeplake_semantic_search_over_chat.ipynb index 3dd2c920049a7..042cdf7399a40 100644 --- a/cookbook/deeplake_semantic_search_over_chat.ipynb +++ b/cookbook/deeplake_semantic_search_over_chat.ipynb @@ -56,8 +56,9 @@ " CharacterTextSplitter,\n", " RecursiveCharacterTextSplitter,\n", ")\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores import DeepLake\n", - "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", diff --git a/cookbook/docugami_xml_kg_rag.ipynb b/cookbook/docugami_xml_kg_rag.ipynb index a9c8607935e64..2a9837ddecf56 100644 --- a/cookbook/docugami_xml_kg_rag.ipynb +++ b/cookbook/docugami_xml_kg_rag.ipynb @@ -475,8 +475,8 @@ " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", ")\n", - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.output_parsers import StrOutputParser" ] }, { @@ -547,9 +547,9 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores.chroma import Chroma\n", "from langchain_core.documents import Document\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "\n", "def build_retriever(text_elements, tables, table_summaries):\n", diff --git a/cookbook/elasticsearch_db_qa.ipynb b/cookbook/elasticsearch_db_qa.ipynb index 3a38446a30d75..af4bc6d71e6c2 100644 --- a/cookbook/elasticsearch_db_qa.ipynb +++ b/cookbook/elasticsearch_db_qa.ipynb @@ -39,7 +39,7 @@ "source": [ "from elasticsearch import Elasticsearch\n", "from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/extraction_openai_tools.ipynb b/cookbook/extraction_openai_tools.ipynb index dae98315f7fbf..fd251b35d0783 100644 --- a/cookbook/extraction_openai_tools.ipynb +++ b/cookbook/extraction_openai_tools.ipynb @@ -22,8 +22,8 @@ "from typing import List, Optional\n", "\n", "from langchain.chains.openai_tools import create_extraction_chain_pydantic\n", - "from langchain_core.pydantic_v1 import BaseModel\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.pydantic_v1 import BaseModel" ] }, { @@ -153,7 +153,7 @@ "from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n", "from langchain_core.runnables import Runnable\n", "from langchain_core.pydantic_v1 import BaseModel\n", - "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain.prompts import ChatPromptTemplate\n", "from langchain_core.messages import SystemMessage\n", "from langchain_core.language_models import BaseLanguageModel\n", "\n", diff --git a/cookbook/forward_looking_retrieval_augmented_generation.ipynb b/cookbook/forward_looking_retrieval_augmented_generation.ipynb index 0abfe0bfeff60..e7b3ecda227af 100644 --- a/cookbook/forward_looking_retrieval_augmented_generation.ipynb +++ b/cookbook/forward_looking_retrieval_augmented_generation.ipynb @@ -74,8 +74,9 @@ " CallbackManagerForRetrieverRun,\n", ")\n", "from langchain.schema import BaseRetriever, Document\n", - "from langchain_community.utilities import GoogleSerperAPIWrapper\n", - "from langchain_openai import ChatOpenAI, OpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.llms import OpenAI\n", + "from langchain_community.utilities import GoogleSerperAPIWrapper" ] }, { diff --git a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb index e2e6694405844..f7570fd7f2900 100644 --- a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb +++ b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb @@ -49,8 +49,9 @@ "\n", "from langchain.docstore import InMemoryDocstore\n", "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", - "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from termcolor import colored" ] }, diff --git a/cookbook/hugginggpt.ipynb b/cookbook/hugginggpt.ipynb index 751948e88d33a..d94076cbf385f 100644 --- a/cookbook/hugginggpt.ipynb +++ b/cookbook/hugginggpt.ipynb @@ -75,8 +75,8 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.autonomous_agents import HuggingGPT\n", - "from langchain_openai import OpenAI\n", "\n", "# %env OPENAI_API_BASE=http://localhost:8000/v1" ] diff --git a/cookbook/human_approval.ipynb b/cookbook/human_approval.ipynb index 59e46bbc4ef4a..aae360250f182 100644 --- a/cookbook/human_approval.ipynb +++ b/cookbook/human_approval.ipynb @@ -159,7 +159,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/hypothetical_document_embeddings.ipynb b/cookbook/hypothetical_document_embeddings.ipynb index 58cde25fe9cba..ea997869ad274 100644 --- a/cookbook/hypothetical_document_embeddings.ipynb +++ b/cookbook/hypothetical_document_embeddings.ipynb @@ -22,7 +22,8 @@ "source": [ "from langchain.chains import HypotheticalDocumentEmbedder, LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_openai import OpenAI, OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/learned_prompt_optimization.ipynb b/cookbook/learned_prompt_optimization.ipynb index b7894d4482caa..3f4d02dd46126 100644 --- a/cookbook/learned_prompt_optimization.ipynb +++ b/cookbook/learned_prompt_optimization.ipynb @@ -49,7 +49,7 @@ "source": [ "# pick and configure the LLM of your choice\n", "\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")" ] diff --git a/cookbook/llm_bash.ipynb b/cookbook/llm_bash.ipynb index 61a56f17836f6..9a345df74f421 100644 --- a/cookbook/llm_bash.ipynb +++ b/cookbook/llm_bash.ipynb @@ -43,8 +43,8 @@ } ], "source": [ + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.llm_bash.base import LLMBashChain\n", - "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", diff --git a/cookbook/llm_checker.ipynb b/cookbook/llm_checker.ipynb index 4c128fdc2afab..cfc5f2356ab61 100644 --- a/cookbook/llm_checker.ipynb +++ b/cookbook/llm_checker.ipynb @@ -42,7 +42,7 @@ ], "source": [ "from langchain.chains import LLMCheckerChain\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0.7)\n", "\n", diff --git a/cookbook/llm_math.ipynb b/cookbook/llm_math.ipynb index 6260be2f0351b..e0a026ba35cf5 100644 --- a/cookbook/llm_math.ipynb +++ b/cookbook/llm_math.ipynb @@ -46,7 +46,7 @@ ], "source": [ "from langchain.chains import LLMMathChain\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "llm_math = LLMMathChain.from_llm(llm, verbose=True)\n", diff --git a/cookbook/llm_summarization_checker.ipynb b/cookbook/llm_summarization_checker.ipynb index ed3f1087164a8..8501c98daf945 100644 --- a/cookbook/llm_summarization_checker.ipynb +++ b/cookbook/llm_summarization_checker.ipynb @@ -331,7 +331,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2)\n", @@ -822,7 +822,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=3)\n", @@ -1096,7 +1096,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, max_checks=3, verbose=True)\n", diff --git a/cookbook/llm_symbolic_math.ipynb b/cookbook/llm_symbolic_math.ipynb index 69ccbaf072acf..10275f83ca5ac 100644 --- a/cookbook/llm_symbolic_math.ipynb +++ b/cookbook/llm_symbolic_math.ipynb @@ -14,8 +14,8 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.llm_symbolic_math.base import LLMSymbolicMathChain\n", - "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "llm_symbolic_math = LLMSymbolicMathChain.from_llm(llm)" diff --git a/cookbook/meta_prompt.ipynb b/cookbook/meta_prompt.ipynb index 746d3a42032c0..f0e78ab197daa 100644 --- a/cookbook/meta_prompt.ipynb +++ b/cookbook/meta_prompt.ipynb @@ -59,7 +59,7 @@ "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferWindowMemory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/multi_modal_QA.ipynb b/cookbook/multi_modal_QA.ipynb index 160b721116efc..1e316cdb07dbc 100644 --- a/cookbook/multi_modal_QA.ipynb +++ b/cookbook/multi_modal_QA.ipynb @@ -91,8 +91,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.messages import HumanMessage, SystemMessage\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.messages import HumanMessage, SystemMessage" ] }, { diff --git a/cookbook/multi_modal_RAG_chroma.ipynb b/cookbook/multi_modal_RAG_chroma.ipynb index 0af89590bf673..17d49ffe8a3eb 100644 --- a/cookbook/multi_modal_RAG_chroma.ipynb +++ b/cookbook/multi_modal_RAG_chroma.ipynb @@ -315,10 +315,10 @@ "source": [ "from operator import itemgetter\n", "\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.messages import HumanMessage, SystemMessage\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI\n", "\n", "\n", "def prompt_func(data_dict):\n", diff --git a/cookbook/multi_modal_output_agent.ipynb b/cookbook/multi_modal_output_agent.ipynb index e5929ead11c0c..8626c3bcf2d72 100644 --- a/cookbook/multi_modal_output_agent.ipynb +++ b/cookbook/multi_modal_output_agent.ipynb @@ -44,7 +44,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.tools import SteamshipImageGenerationTool\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/multi_player_dnd.ipynb b/cookbook/multi_player_dnd.ipynb index 05c4d45914678..c03bb6ad22e5c 100644 --- a/cookbook/multi_player_dnd.ipynb +++ b/cookbook/multi_player_dnd.ipynb @@ -32,7 +32,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/multiagent_authoritarian.ipynb b/cookbook/multiagent_authoritarian.ipynb index 893b35f7c7868..8e9a82a062d2d 100644 --- a/cookbook/multiagent_authoritarian.ipynb +++ b/cookbook/multiagent_authoritarian.ipynb @@ -41,7 +41,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/multiagent_bidding.ipynb b/cookbook/multiagent_bidding.ipynb index fbb9f03f53d1f..1ee6383d92069 100644 --- a/cookbook/multiagent_bidding.ipynb +++ b/cookbook/multiagent_bidding.ipynb @@ -33,7 +33,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/myscale_vector_sql.ipynb b/cookbook/myscale_vector_sql.ipynb index d26ac19d7350c..b02a19f723942 100644 --- a/cookbook/myscale_vector_sql.ipynb +++ b/cookbook/myscale_vector_sql.ipynb @@ -32,9 +32,9 @@ "\n", "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities import SQLDatabase\n", "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", - "from langchain_openai import OpenAI\n", "from sqlalchemy import MetaData, create_engine\n", "\n", "MYSCALE_HOST = \"msc-4a9e710a.us-east-1.aws.staging.myscale.cloud\"\n", @@ -75,10 +75,10 @@ "outputs": [], "source": [ "from langchain.callbacks import StdOutCallbackHandler\n", + "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities.sql_database import SQLDatabase\n", "from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n", "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", - "from langchain_openai import OpenAI\n", "\n", "chain = VectorSQLDatabaseChain(\n", " llm_chain=LLMChain(\n", @@ -117,6 +117,7 @@ "outputs": [], "source": [ "from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.retrievers.vector_sql_database import (\n", " VectorSQLDatabaseChainRetriever,\n", ")\n", @@ -125,7 +126,6 @@ " VectorSQLDatabaseChain,\n", " VectorSQLRetrieveAllOutputParser,\n", ")\n", - "from langchain_openai import ChatOpenAI\n", "\n", "output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(\n", " output_parser.model\n", diff --git a/cookbook/openai_functions_retrieval_qa.ipynb b/cookbook/openai_functions_retrieval_qa.ipynb index 648b28b5e2c17..c214377e794d7 100644 --- a/cookbook/openai_functions_retrieval_qa.ipynb +++ b/cookbook/openai_functions_retrieval_qa.ipynb @@ -22,8 +22,8 @@ "from langchain.chains import RetrievalQA\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.vectorstores import Chroma\n", - "from langchain_openai import OpenAIEmbeddings" + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma" ] }, { @@ -53,7 +53,7 @@ "from langchain.chains import create_qa_with_sources_chain\n", "from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index 298c6c8aa3650..8e0b95020aecf 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -28,8 +28,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.messages import HumanMessage, SystemMessage\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.messages import HumanMessage, SystemMessage" ] }, { @@ -414,7 +414,7 @@ "BREAKING CHANGES:\n", "- To use Azure embeddings with OpenAI V1, you'll need to use the new `AzureOpenAIEmbeddings` instead of the existing `OpenAIEmbeddings`. `OpenAIEmbeddings` continue to work when using Azure with `openai<1`.\n", "```python\n", - "from langchain_openai import AzureOpenAIEmbeddings\n", + "from langchain_community.embeddings import AzureOpenAIEmbeddings\n", "```\n", "\n", "\n", @@ -456,8 +456,8 @@ "from typing import Literal\n", "\n", "from langchain.output_parsers.openai_tools import PydanticToolsParser\n", + "from langchain.prompts import ChatPromptTemplate\n", "from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n", - "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.pydantic_v1 import BaseModel, Field\n", "\n", "\n", diff --git a/cookbook/petting_zoo.ipynb b/cookbook/petting_zoo.ipynb index c0db7653b0919..5c269b1a67c78 100644 --- a/cookbook/petting_zoo.ipynb +++ b/cookbook/petting_zoo.ipynb @@ -52,7 +52,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/plan_and_execute_agent.ipynb b/cookbook/plan_and_execute_agent.ipynb index d710514658c21..2bbdcc6bb011d 100644 --- a/cookbook/plan_and_execute_agent.ipynb +++ b/cookbook/plan_and_execute_agent.ipynb @@ -30,14 +30,15 @@ "outputs": [], "source": [ "from langchain.chains import LLMMathChain\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n", "from langchain_core.tools import Tool\n", "from langchain_experimental.plan_and_execute import (\n", " PlanAndExecute,\n", " load_agent_executor,\n", " load_chat_planner,\n", - ")\n", - "from langchain_openai import ChatOpenAI, OpenAI" + ")" ] }, { diff --git a/cookbook/press_releases.ipynb b/cookbook/press_releases.ipynb index 30aba0a68db0d..a86927f7afeb0 100644 --- a/cookbook/press_releases.ipynb +++ b/cookbook/press_releases.ipynb @@ -82,7 +82,7 @@ "source": [ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain.retrievers import KayAiRetriever\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "retriever = KayAiRetriever.create(\n", diff --git a/cookbook/program_aided_language_model.ipynb b/cookbook/program_aided_language_model.ipynb index 17320ab8c0583..5eed7766eae52 100644 --- a/cookbook/program_aided_language_model.ipynb +++ b/cookbook/program_aided_language_model.ipynb @@ -17,8 +17,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_experimental.pal_chain import PALChain\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import OpenAI\n", + "from langchain_experimental.pal_chain import PALChain" ] }, { diff --git a/cookbook/qa_citations.ipynb b/cookbook/qa_citations.ipynb index a8dbd1c61330a..2ca389a063ec1 100644 --- a/cookbook/qa_citations.ipynb +++ b/cookbook/qa_citations.ipynb @@ -27,7 +27,7 @@ ], "source": [ "from langchain.chains import create_citation_fuzzy_match_chain\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/rag_fusion.ipynb b/cookbook/rag_fusion.ipynb index 976e8cfab41cb..a340e97ed017e 100644 --- a/cookbook/rag_fusion.ipynb +++ b/cookbook/rag_fusion.ipynb @@ -30,8 +30,8 @@ "outputs": [], "source": [ "import pinecone\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Pinecone\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "pinecone.init(api_key=\"...\", environment=\"...\")" ] @@ -86,8 +86,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.output_parsers import StrOutputParser" ] }, { diff --git a/cookbook/retrieval_in_sql.ipynb b/cookbook/retrieval_in_sql.ipynb index 998e9aa8dd686..32d2384cadbc5 100644 --- a/cookbook/retrieval_in_sql.ipynb +++ b/cookbook/retrieval_in_sql.ipynb @@ -43,7 +43,7 @@ "outputs": [], "source": [ "from langchain.sql_database import SQLDatabase\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "CONNECTION_STRING = \"postgresql+psycopg2://postgres:test@localhost:5432/vectordb\" # Replace with your own\n", "db = SQLDatabase.from_uri(CONNECTION_STRING)" @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_openai import OpenAIEmbeddings\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "embeddings_model = OpenAIEmbeddings()" ] @@ -219,7 +219,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain.prompts import ChatPromptTemplate\n", "\n", "template = \"\"\"You are a Postgres expert. Given an input question, first create a syntactically correct Postgres query to run, then look at the results of the query and return the answer to the input question.\n", "Unless the user specifies in the question a specific number of examples to obtain, query for at most 5 results using the LIMIT clause as per Postgres. You can order the results to return the most informative data in the database.\n", @@ -267,9 +267,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI\n", "\n", "db = SQLDatabase.from_uri(\n", " CONNECTION_STRING\n", diff --git a/cookbook/rewrite.ipynb b/cookbook/rewrite.ipynb index 270d7d964edd5..b60ee96b95b80 100644 --- a/cookbook/rewrite.ipynb +++ b/cookbook/rewrite.ipynb @@ -31,11 +31,11 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.runnables import RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI" + "from langchain_core.runnables import RunnablePassthrough" ] }, { diff --git a/cookbook/sales_agent_with_context.ipynb b/cookbook/sales_agent_with_context.ipynb index 158329a5f09e6..11cb7afd8b353 100644 --- a/cookbook/sales_agent_with_context.ipynb +++ b/cookbook/sales_agent_with_context.ipynb @@ -53,9 +53,10 @@ "from langchain.prompts.base import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.llms import BaseLLM\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import BaseLLM, OpenAI\n", "from langchain_community.vectorstores import Chroma\n", - "from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings\n", "from pydantic import BaseModel, Field" ] }, diff --git a/cookbook/selecting_llms_based_on_context_length.ipynb b/cookbook/selecting_llms_based_on_context_length.ipynb index d4e22100a9306..ae885f5e0b66a 100644 --- a/cookbook/selecting_llms_based_on_context_length.ipynb +++ b/cookbook/selecting_llms_based_on_context_length.ipynb @@ -18,9 +18,9 @@ "outputs": [], "source": [ "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompt_values import PromptValue\n", - "from langchain_openai import ChatOpenAI" + "from langchain_core.prompt_values import PromptValue" ] }, { diff --git a/cookbook/self_query_hotel_search.ipynb b/cookbook/self_query_hotel_search.ipynb index d38192c5a2cb3..a349bd7f9b244 100644 --- a/cookbook/self_query_hotel_search.ipynb +++ b/cookbook/self_query_hotel_search.ipynb @@ -255,7 +255,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model=\"gpt-4\")\n", "res = model.predict(\n", @@ -1083,8 +1083,8 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import ElasticsearchStore\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/cookbook/sharedmemory_for_tools.ipynb b/cookbook/sharedmemory_for_tools.ipynb index 3b8efc7359085..9134a263aeed5 100644 --- a/cookbook/sharedmemory_for_tools.ipynb +++ b/cookbook/sharedmemory_for_tools.ipynb @@ -26,8 +26,8 @@ "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.utilities import GoogleSearchAPIWrapper\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import OpenAI\n", + "from langchain_community.utilities import GoogleSearchAPIWrapper" ] }, { diff --git a/cookbook/smart_llm.ipynb b/cookbook/smart_llm.ipynb index 0e617617e35c3..b8bb31c97e365 100644 --- a/cookbook/smart_llm.ipynb +++ b/cookbook/smart_llm.ipynb @@ -52,8 +52,8 @@ "outputs": [], "source": [ "from langchain.prompts import PromptTemplate\n", - "from langchain_experimental.smart_llm import SmartLLMChain\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_experimental.smart_llm import SmartLLMChain" ] }, { diff --git a/cookbook/sql_db_qa.mdx b/cookbook/sql_db_qa.mdx index 73cdd953f3efd..851fae5202b86 100644 --- a/cookbook/sql_db_qa.mdx +++ b/cookbook/sql_db_qa.mdx @@ -9,7 +9,7 @@ To set it up, follow the instructions on https://database.guide/2-sample-databas ```python -from langchain_openai import OpenAI +from langchain_community.llms import OpenAI from langchain_community.utilities import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain ``` @@ -200,7 +200,7 @@ result["intermediate_steps"] How to add memory to a SQLDatabaseChain: ```python -from langchain_openai import OpenAI +from langchain_community.llms import OpenAI from langchain_community.utilities import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain ``` diff --git a/cookbook/stepback-qa.ipynb b/cookbook/stepback-qa.ipynb index 6827b04da738f..e06652e80ed75 100644 --- a/cookbook/stepback-qa.ipynb +++ b/cookbook/stepback-qa.ipynb @@ -23,10 +23,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n", - "from langchain_core.runnables import RunnableLambda\n", - "from langchain_openai import ChatOpenAI" + "from langchain_core.runnables import RunnableLambda" ] }, { diff --git a/cookbook/tree_of_thought.ipynb b/cookbook/tree_of_thought.ipynb index 63ff323ec6077..6fead2e0cc8b0 100644 --- a/cookbook/tree_of_thought.ipynb +++ b/cookbook/tree_of_thought.ipynb @@ -24,7 +24,7 @@ } ], "source": [ - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=1, max_tokens=512, model=\"gpt-3.5-turbo-instruct\")" ] diff --git a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb index 4f540fa5abdf9..28942b32b4401 100644 --- a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb +++ b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb @@ -37,8 +37,8 @@ "import getpass\n", "import os\n", "\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import DeepLake\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", @@ -3809,7 +3809,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/cookbook/two_agent_debate_tools.ipynb b/cookbook/two_agent_debate_tools.ipynb index b31e769dee17f..9fc9d1a757c5f 100644 --- a/cookbook/two_agent_debate_tools.ipynb +++ b/cookbook/two_agent_debate_tools.ipynb @@ -30,7 +30,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/two_player_dnd.ipynb b/cookbook/two_player_dnd.ipynb index d90e4f9365fe7..c36a59b4774e2 100644 --- a/cookbook/two_player_dnd.ipynb +++ b/cookbook/two_player_dnd.ipynb @@ -28,7 +28,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/wikibase_agent.ipynb b/cookbook/wikibase_agent.ipynb index 692193b0229df..71c5294b8cf05 100644 --- a/cookbook/wikibase_agent.ipynb +++ b/cookbook/wikibase_agent.ipynb @@ -599,7 +599,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)" ] diff --git a/docs/.local_build.sh b/docs/.local_build.sh index 72e15dec90a4a..77b604ededcb7 100755 --- a/docs/.local_build.sh +++ b/docs/.local_build.sh @@ -20,4 +20,4 @@ wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O yarn -poetry run quarto preview docs +quarto preview docs diff --git a/docs/docs/changelog/core.mdx b/docs/docs/changelog/core.mdx deleted file mode 100644 index 9c43d501fcbaf..0000000000000 --- a/docs/docs/changelog/core.mdx +++ /dev/null @@ -1,27 +0,0 @@ -# langchain-core - -## 0.1.7 (Jan 5, 2024) - -#### Deleted - -No deletions. - -#### Deprecated - -- `BaseChatModel` methods `__call__`, `call_as_llm`, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.invoke` instead. -- `BaseChatModel` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.ainvoke` instead. -- `BaseLLM` methods `__call__, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseLLM.invoke` instead. -- `BaseLLM` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseLLM.ainvoke` instead. - -#### Fixed - -- Restrict recursive URL scraping: [#15559](https://github.com/langchain-ai/langchain/pull/15559) - -#### Added - -No additions. - -#### Beta - -- Marked `langchain_core.load.load` and `langchain_core.load.loads` as beta. -- Marked `langchain_core.beta.runnables.context.ContextGet` and `langchain_core.beta.runnables.context.ContextSet` as beta. diff --git a/docs/docs/changelog/langchain.mdx b/docs/docs/changelog/langchain.mdx deleted file mode 100644 index bffcce729a953..0000000000000 --- a/docs/docs/changelog/langchain.mdx +++ /dev/null @@ -1,36 +0,0 @@ -# langchain - -## 0.1.0 (Jan 5, 2024) - -#### Deleted - -No deletions. - -#### Deprecated - -Deprecated classes and methods will be removed in 0.2.0 - -| Deprecated | Alternative | Reason | -|---------------------------------|-----------------------------------|------------------------------------------------| -| ChatVectorDBChain | ConversationalRetrievalChain | More general to all retrievers | -| create_ernie_fn_chain | create_ernie_fn_runnable | Use LCEL under the hood | -| created_structured_output_chain | create_structured_output_runnable | Use LCEL under the hood | -| NatBotChain | | Not used | -| create_openai_fn_chain | create_openai_fn_runnable | Use LCEL under the hood | -| create_structured_output_chain | create_structured_output_runnable | Use LCEL under the hood | -| load_query_constructor_chain | load_query_constructor_runnable | Use LCEL under the hood | -| VectorDBQA | RetrievalQA | More general to all retrievers | -| Sequential Chain | LCEL | Obviated by LCEL | -| SimpleSequentialChain | LCEL | Obviated by LCEL | -| TransformChain | LCEL/RunnableLambda | Obviated by LCEL | -| create_tagging_chain | create_structured_output_runnable | Use LCEL under the hood | -| ChatAgent | create_react_agent | Use LCEL builder over a class | -| ConversationalAgent | create_react_agent | Use LCEL builder over a class | -| ConversationalChatAgent | create_json_chat_agent | Use LCEL builder over a class | -| initialize_agent | Individual create agent methods | Individual create agent methods are more clear | -| ZeroShotAgent | create_react_agent | Use LCEL builder over a class | -| OpenAIFunctionsAgent | create_openai_functions_agent | Use LCEL builder over a class | -| OpenAIMultiFunctionsAgent | create_openai_tools_agent | Use LCEL builder over a class | -| SelfAskWithSearchAgent | create_self_ask_with_search | Use LCEL builder over a class | -| StructuredChatAgent | create_structured_chat_agent | Use LCEL builder over a class | -| XMLAgent | create_xml_agent | Use LCEL builder over a class | \ No newline at end of file diff --git a/docs/docs/community.md b/docs/docs/community.md new file mode 100644 index 0000000000000..81749dffafde7 --- /dev/null +++ b/docs/docs/community.md @@ -0,0 +1,53 @@ +# Community navigator + +Hi! Thanks for being here. We’re lucky to have a community of so many passionate developers building with LangChain–we have so much to teach and learn from each other. Community members contribute code, host meetups, write blog posts, amplify each other’s work, become each other's customers and collaborators, and so much more. + +Whether you’re new to LangChain, looking to go deeper, or just want to get more exposure to the world of building with LLMs, this page can point you in the right direction. + +- **🦜 Contribute to LangChain** + +- **🌍 Meetups, Events, and Hackathons** + +- **📣 Help Us Amplify Your Work** + +- **💬 Stay in the loop** + + +# 🦜 Contribute to LangChain + +LangChain is the product of over 5,000+ contributions by 1,500+ contributors, and there is ******still****** so much to do together. Here are some ways to get involved: + +- **[Open a pull request](https://github.com/langchain-ai/langchain/issues):** We’d appreciate all forms of contributions–new features, infrastructure improvements, better documentation, bug fixes, etc. If you have an improvement or an idea, we’d love to work on it with you. +- **[Read our contributor guidelines:](./contributing/)** We ask contributors to follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow, run a few local checks for formatting, linting, and testing before submitting, and follow certain documentation and testing conventions. + - **First time contributor?** [Try one of these PRs with the “good first issue” tag](https://github.com/langchain-ai/langchain/contribute). +- **Become an expert:** Our experts help the community by answering product questions in Discord. If that’s a role you’d like to play, we’d be so grateful! (And we have some special experts-only goodies/perks we can tell you more about). Send us an email to introduce yourself at hello@langchain.dev and we’ll take it from there! +- **Integrate with LangChain:** If your product integrates with LangChain–or aspires to–we want to help make sure the experience is as smooth as possible for you and end users. Send us an email at hello@langchain.dev and tell us what you’re working on. + - **Become an Integration Maintainer:** Partner with our team to ensure your integration stays up-to-date and talk directly with users (and answer their inquiries) in our Discord. Introduce yourself at hello@langchain.dev if you’d like to explore this role. + + +# 🌍 Meetups, Events, and Hackathons + +One of our favorite things about working in AI is how much enthusiasm there is for building together. We want to help make that as easy and impactful for you as possible! +- **Find a meetup, hackathon, or webinar:** You can find the one for you on our [global events calendar](https://mirror-feeling-d80.notion.site/0bc81da76a184297b86ca8fc782ee9a3?v=0d80342540df465396546976a50cfb3f). + - **Submit an event to our calendar:** Email us at events@langchain.dev with a link to your event page! We can also help you spread the word with our local communities. +- **Host a meetup:** If you want to bring a group of builders together, we want to help! We can publicize your event on our event calendar/Twitter, share it with our local communities in Discord, send swag, or potentially hook you up with a sponsor. Email us at events@langchain.dev to tell us about your event! +- **Become a meetup sponsor:** We often hear from groups of builders that want to get together, but are blocked or limited on some dimension (space to host, budget for snacks, prizes to distribute, etc.). If you’d like to help, send us an email to events@langchain.dev we can share more about how it works! +- **Speak at an event:** Meetup hosts are always looking for great speakers, presenters, and panelists. If you’d like to do that at an event, send us an email to hello@langchain.dev with more information about yourself, what you want to talk about, and what city you’re based in and we’ll try to match you with an upcoming event! +- **Tell us about your LLM community:** If you host or participate in a community that would welcome support from LangChain and/or our team, send us an email at hello@langchain.dev and let us know how we can help. + +# 📣 Help Us Amplify Your Work + +If you’re working on something you’re proud of, and think the LangChain community would benefit from knowing about it, we want to help you show it off. + +- **Post about your work and mention us:** We love hanging out on Twitter to see what people in the space are talking about and working on. If you tag [@langchainai](https://twitter.com/LangChainAI), we’ll almost certainly see it and can show you some love. +- **Publish something on our blog:** If you’re writing about your experience building with LangChain, we’d love to post (or crosspost) it on our blog! E-mail hello@langchain.dev with a draft of your post! Or even an idea for something you want to write about. +- **Get your product onto our [integrations hub](https://integrations.langchain.com/):** Many developers take advantage of our seamless integrations with other products, and come to our integrations hub to find out who those are. If you want to get your product up there, tell us about it (and how it works with LangChain) at hello@langchain.dev. + +# ☀️ Stay in the loop + +Here’s where our team hangs out, talks shop, spotlights cool work, and shares what we’re up to. We’d love to see you there too. + +- **[Twitter](https://twitter.com/LangChainAI):** We post about what we’re working on and what cool things we’re seeing in the space. If you tag @langchainai in your post, we’ll almost certainly see it, and can show you some love! +- **[Discord](https://discord.gg/6adMQxSpJS):** connect with over 30,000 developers who are building with LangChain. +- **[GitHub](https://github.com/langchain-ai/langchain):** Open pull requests, contribute to a discussion, and/or contribute +- **[Subscribe to our bi-weekly Release Notes](https://6w1pwbss0py.typeform.com/to/KjZB1auB):** a twice/month email roundup of the coolest things going on in our orbit diff --git a/docs/docs/packages.mdx b/docs/docs/contributing/packages.mdx similarity index 55% rename from docs/docs/packages.mdx rename to docs/docs/contributing/packages.mdx index a5075335b6a2a..9613699e7146c 100644 --- a/docs/docs/packages.mdx +++ b/docs/docs/contributing/packages.mdx @@ -1,43 +1,38 @@ +--- +sidebar_label: Package Versioning +sidebar_position: 4 +--- + # 📕 Package Versioning As of now, LangChain has an ad hoc release process: releases are cut with high frequency by -a maintainer and published to [PyPI](https://pypi.org/). +a maintainer and published to [PyPI](https://pypi.org/). The different packages are versioned slightly differently. ## `langchain-core` `langchain-core` is currently on version `0.1.x`. -As `langchain-core` contains the base abstractions and runtime for the whole LangChain ecosystem, we will communicate any breaking changes with advance notice and version bumps. The exception for this is anything marked with the `beta` decorator (you can see this in the API reference and will see warnings when using such functionality). The reason for beta features is that given the rate of change of the field, being able to move quickly is still a priority. +As `langchain-core` contains the base abstractions and runtime for the whole LangChain ecosystem, we will communicate any breaking changes with advance notice and version bumps. The exception for this is anything in `langchain_core.beta`. The reason for `langchain_core.beta` is that given the rate of change of the field, being able to move quickly is still a priority, and this module is our attempt to do so. Minor version increases will occur for: -- Breaking changes for any public interfaces marked as `beta`. +- Breaking changes for any public interfaces NOT in `langchain_core.beta` Patch version increases will occur for: - Bug fixes - New features - Any changes to private interfaces -- Any changes to `beta` features +- Any changes to `langchain_core.beta` ## `langchain` -`langchain` is currently on version `0.1.x` - -Minor version increases will occur for: +`langchain` is currently on version `0.0.x` -- Breaking changes for any public interfaces NOT marked as `beta`. - -Patch version increases will occur for: - -- Bug fixes -- New features -- Any changes to private interfaces -- Any changes to `beta` features +All changes will be accompanied by a patch version increase. Any changes to public interfaces are nearly always done in a backwards compatible way and will be communicated ahead of time when they are not backwards compatible. -We are targeting February 2024 for a release of `langchain` v0.2, which will have some breaking changes to legacy Chains and Agents. -Additionally, we will remove `langchain-community` as a dependency and stop re-exporting integrations that have been moved to `langchain-community`. +We are targeting January 2024 for a release of `langchain` v0.1, at which point `langchain` will adopt the same versioning policy as `langchain-core`. ## `langchain-community` diff --git a/docs/docs/expression_language/cookbook/code_writing.ipynb b/docs/docs/expression_language/cookbook/code_writing.ipynb index ae3e348e695b1..e3f59f39ea20e 100644 --- a/docs/docs/expression_language/cookbook/code_writing.ipynb +++ b/docs/docs/expression_language/cookbook/code_writing.ipynb @@ -20,9 +20,9 @@ "from langchain.prompts import (\n", " ChatPromptTemplate,\n", ")\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_experimental.utilities import PythonREPL\n", - "from langchain_openai import ChatOpenAI" + "from langchain_experimental.utilities import PythonREPL" ] }, { diff --git a/docs/docs/expression_language/cookbook/embedding_router.ipynb b/docs/docs/expression_language/cookbook/embedding_router.ipynb index 8ee6515f29a29..51e193709de39 100644 --- a/docs/docs/expression_language/cookbook/embedding_router.ipynb +++ b/docs/docs/expression_language/cookbook/embedding_router.ipynb @@ -21,9 +21,10 @@ "source": [ "from langchain.prompts import PromptTemplate\n", "from langchain.utils.math import cosine_similarity\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "physics_template = \"\"\"You are a very smart physics professor. \\\n", "You are great at answering questions about physics in a concise and easy to understand manner. \\\n", diff --git a/docs/docs/expression_language/cookbook/memory.ipynb b/docs/docs/expression_language/cookbook/memory.ipynb index efeb25f42d963..6fd8a69264800 100644 --- a/docs/docs/expression_language/cookbook/memory.ipynb +++ b/docs/docs/expression_language/cookbook/memory.ipynb @@ -20,9 +20,9 @@ "from operator import itemgetter\n", "\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "prompt = ChatPromptTemplate.from_messages(\n", diff --git a/docs/docs/expression_language/cookbook/moderation.ipynb b/docs/docs/expression_language/cookbook/moderation.ipynb index f18153850db54..1ab1c117dee03 100644 --- a/docs/docs/expression_language/cookbook/moderation.ipynb +++ b/docs/docs/expression_language/cookbook/moderation.ipynb @@ -18,8 +18,8 @@ "outputs": [], "source": [ "from langchain.chains import OpenAIModerationChain\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import OpenAI" + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/expression_language/cookbook/multiple_chains.ipynb b/docs/docs/expression_language/cookbook/multiple_chains.ipynb index d9bb7ba07e0f6..2aecbb9cb3203 100644 --- a/docs/docs/expression_language/cookbook/multiple_chains.ipynb +++ b/docs/docs/expression_language/cookbook/multiple_chains.ipynb @@ -39,9 +39,9 @@ "source": [ "from operator import itemgetter\n", "\n", + "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n", "prompt2 = ChatPromptTemplate.from_template(\n", diff --git a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb index e8f33a5037268..6abaf835b335f 100644 --- a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb +++ b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb @@ -42,8 +42,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\"tell me a joke about {foo}\")\n", "model = ChatOpenAI()\n", diff --git a/docs/docs/expression_language/cookbook/prompt_size.ipynb b/docs/docs/expression_language/cookbook/prompt_size.ipynb index f2a7132dfbb3a..b19f027829f9f 100644 --- a/docs/docs/expression_language/cookbook/prompt_size.ipynb +++ b/docs/docs/expression_language/cookbook/prompt_size.ipynb @@ -26,12 +26,12 @@ "from langchain.agents import AgentExecutor, load_tools\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", "from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n", + "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain.prompts.chat import ChatPromptValue\n", "from langchain.tools import WikipediaQueryRun\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n", - "from langchain_community.utilities import WikipediaAPIWrapper\n", - "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.utilities import WikipediaAPIWrapper" ] }, { diff --git a/docs/docs/expression_language/cookbook/retrieval.ipynb b/docs/docs/expression_language/cookbook/retrieval.ipynb index a64a970f75f1c..6cca1d011a2ac 100644 --- a/docs/docs/expression_language/cookbook/retrieval.ipynb +++ b/docs/docs/expression_language/cookbook/retrieval.ipynb @@ -38,11 +38,12 @@ "source": [ "from operator import itemgetter\n", "\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" + "from langchain_core.runnables import RunnableLambda, RunnablePassthrough" ] }, { diff --git a/docs/docs/expression_language/cookbook/sql_db.ipynb b/docs/docs/expression_language/cookbook/sql_db.ipynb index f2ed565ead8c2..039d2f26ba0a5 100644 --- a/docs/docs/expression_language/cookbook/sql_db.ipynb +++ b/docs/docs/expression_language/cookbook/sql_db.ipynb @@ -26,7 +26,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain.prompts import ChatPromptTemplate\n", "\n", "template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n", "{schema}\n", @@ -93,9 +93,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "\n", diff --git a/docs/docs/expression_language/cookbook/tools.ipynb b/docs/docs/expression_language/cookbook/tools.ipynb index cd7fdff3289fd..8d5ab06005809 100644 --- a/docs/docs/expression_language/cookbook/tools.ipynb +++ b/docs/docs/expression_language/cookbook/tools.ipynb @@ -27,10 +27,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.prompts import ChatPromptTemplate\n", "from langchain.tools import DuckDuckGoSearchRun\n", - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.output_parsers import StrOutputParser" ] }, { diff --git a/docs/docs/expression_language/get_started.ipynb b/docs/docs/expression_language/get_started.ipynb index f4f963a62ace2..d44947de19b81 100644 --- a/docs/docs/expression_language/get_started.ipynb +++ b/docs/docs/expression_language/get_started.ipynb @@ -32,28 +32,28 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 7, "id": "466b65b3", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "\"Why don't ice creams ever get invited to parties?\\n\\nBecause they always drip when things heat up!\"" + "\"Why did the ice cream go to therapy?\\n\\nBecause it had too many toppings and couldn't find its cone-fidence!\"" ] }, - "execution_count": 1, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n", - "model = ChatOpenAI(model=\"gpt-4\")\n", + "model = ChatOpenAI()\n", "output_parser = StrOutputParser()\n", "\n", "chain = prompt | model | output_parser\n", @@ -89,7 +89,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 8, "id": "b8656990", "metadata": {}, "outputs": [ @@ -99,7 +99,7 @@ "ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])" ] }, - "execution_count": 2, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -111,7 +111,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 9, "id": "e6034488", "metadata": {}, "outputs": [ @@ -121,7 +121,7 @@ "[HumanMessage(content='tell me a short joke about ice cream')]" ] }, - "execution_count": 3, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -132,7 +132,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 10, "id": "60565463", "metadata": {}, "outputs": [ @@ -142,7 +142,7 @@ "'Human: tell me a short joke about ice cream'" ] }, - "execution_count": 4, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -163,17 +163,17 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 11, "id": "33cf5f72", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content=\"Why don't ice creams ever get invited to parties?\\n\\nBecause they always bring a melt down!\")" + "AIMessage(content=\"Why did the ice cream go to therapy? \\n\\nBecause it had too many toppings and couldn't find its cone-fidence!\")" ] }, - "execution_count": 5, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -193,23 +193,23 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 12, "id": "8feb05da", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'\\n\\nRobot: Why did the ice cream truck break down? Because it had a meltdown!'" + "'\\n\\nRobot: Why did the ice cream go to therapy? Because it had a rocky road.'" ] }, - "execution_count": 6, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain_openai.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n", "llm.invoke(prompt_value)" @@ -324,12 +324,12 @@ "# Requires:\n", "# pip install langchain docarray tiktoken\n", "\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import DocArrayInMemorySearch\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n", - "from langchain_openai.chat_models import ChatOpenAI\n", - "from langchain_openai.embeddings import OpenAIEmbeddings\n", "\n", "vectorstore = DocArrayInMemorySearch.from_texts(\n", " [\"harrison worked at kensho\", \"bears like to eat honey\"],\n", @@ -486,7 +486,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.1" + "version": "3.9.1" } }, "nbformat": 4, diff --git a/docs/docs/expression_language/how_to/binding.ipynb b/docs/docs/expression_language/how_to/binding.ipynb index f81709d627fc6..087850902fd12 100644 --- a/docs/docs/expression_language/how_to/binding.ipynb +++ b/docs/docs/expression_language/how_to/binding.ipynb @@ -19,10 +19,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.runnables import RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.runnables import RunnablePassthrough" ] }, { diff --git a/docs/docs/expression_language/how_to/configure.ipynb b/docs/docs/expression_language/how_to/configure.ipynb index 120ac3c54b68c..9e208ad25ce0f 100644 --- a/docs/docs/expression_language/how_to/configure.ipynb +++ b/docs/docs/expression_language/how_to/configure.ipynb @@ -42,8 +42,8 @@ "outputs": [], "source": [ "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import ConfigurableField\n", - "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(temperature=0).configurable_fields(\n", " temperature=ConfigurableField(\n", @@ -264,9 +264,8 @@ "outputs": [], "source": [ "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatAnthropic\n", - "from langchain_core.runnables import ConfigurableField\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI\n", + "from langchain_core.runnables import ConfigurableField" ] }, { diff --git a/docs/docs/expression_language/how_to/decorator.ipynb b/docs/docs/expression_language/how_to/decorator.ipynb deleted file mode 100644 index 700354c8f5c7a..0000000000000 --- a/docs/docs/expression_language/how_to/decorator.ipynb +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "b45110ef", - "metadata": {}, - "source": [ - "# Create a runnable with the `@chain` decorator\n", - "\n", - "You can also turn an arbitrary function into a chain by adding a `@chain` decorator. This is functionaly equivalent to wrapping in a [`RunnableLambda`](./functions).\n", - "\n", - "This will have the benefit of improved observability by tracing your chain correctly. Any calls to runnables inside this function will be traced as nested childen.\n", - "\n", - "It will also allow you to use this as any other runnable, compose it in chain, etc.\n", - "\n", - "Let's take a look at this in action!" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "d9370420", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.runnables import chain\n", - "from langchain_openai import ChatOpenAI" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "b7f74f7e", - "metadata": {}, - "outputs": [], - "source": [ - "prompt1 = ChatPromptTemplate.from_template(\"Tell me a joke about {topic}\")\n", - "prompt2 = ChatPromptTemplate.from_template(\"What is the subject of this joke: {joke}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "2b0365c4", - "metadata": {}, - "outputs": [], - "source": [ - "@chain\n", - "def custom_chain(text):\n", - " prompt_val1 = prompt1.invoke({\"topic\": text})\n", - " output1 = ChatOpenAI().invoke(prompt_val1)\n", - " parsed_output1 = StrOutputParser().invoke(output1)\n", - " chain2 = prompt2 | ChatOpenAI() | StrOutputParser()\n", - " return chain2.invoke({\"joke\": parsed_output1})" - ] - }, - { - "cell_type": "markdown", - "id": "904d6872", - "metadata": {}, - "source": [ - "`custom_chain` is now a runnable, meaning you will need to use `invoke`" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "6448bdd3", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'The subject of this joke is bears.'" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "custom_chain.invoke(\"bears\")" - ] - }, - { - "cell_type": "markdown", - "id": "aa767ea9", - "metadata": {}, - "source": [ - "If you check out your LangSmith traces, you should see a `custom_chain` trace in there, with the calls to OpenAI nested underneath" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f1245bdc", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.1" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/docs/expression_language/how_to/fallbacks.ipynb b/docs/docs/expression_language/how_to/fallbacks.ipynb index 42a94667de95d..e736c984e48cf 100644 --- a/docs/docs/expression_language/how_to/fallbacks.ipynb +++ b/docs/docs/expression_language/how_to/fallbacks.ipynb @@ -31,8 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatAnthropic\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI" ] }, { @@ -142,7 +141,7 @@ } ], "source": [ - "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -242,7 +241,7 @@ "source": [ "# Now lets create a chain with the normal OpenAI model\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n", "\n", @@ -292,7 +291,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/docs/docs/expression_language/how_to/functions.ipynb b/docs/docs/expression_language/how_to/functions.ipynb index ede22edeebca3..d26caac51cdd1 100644 --- a/docs/docs/expression_language/how_to/functions.ipynb +++ b/docs/docs/expression_language/how_to/functions.ipynb @@ -33,9 +33,9 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import RunnableLambda\n", - "from langchain_openai import ChatOpenAI\n", "\n", "\n", "def length_function(text):\n", @@ -190,7 +190,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.1" + "version": "3.11.5" } }, "nbformat": 4, diff --git a/docs/docs/expression_language/how_to/generators.ipynb b/docs/docs/expression_language/how_to/generators.ipynb index 73fd2a099ac22..caf1bade4b2e2 100644 --- a/docs/docs/expression_language/how_to/generators.ipynb +++ b/docs/docs/expression_language/how_to/generators.ipynb @@ -33,8 +33,8 @@ "from typing import Iterator, List\n", "\n", "from langchain.prompts.chat import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\n", " \"Write a comma-separated list of 5 animals similar to: {animal}\"\n", diff --git a/docs/docs/expression_language/how_to/inspect.ipynb b/docs/docs/expression_language/how_to/inspect.ipynb deleted file mode 100644 index e61654b1f08aa..0000000000000 --- a/docs/docs/expression_language/how_to/inspect.ipynb +++ /dev/null @@ -1,236 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "8c5eb99a", - "metadata": {}, - "source": [ - "# Inspect your runnables\n", - "\n", - "Once you create a runnable with LCEL, you may often want to inspect it to get a better sense for what is going on. This notebook covers some methods for doing so.\n", - "\n", - "First, let's create an example LCEL. We will create one that does retrieval" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8bc5d235", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install langchain openai faiss-cpu tiktoken" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "a88f4b24", - "metadata": {}, - "outputs": [], - "source": [ - "from operator import itemgetter\n", - "\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import FAISS\n", - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "139228c2", - "metadata": {}, - "outputs": [], - "source": [ - "vectorstore = FAISS.from_texts(\n", - " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", - ")\n", - "retriever = vectorstore.as_retriever()\n", - "\n", - "template = \"\"\"Answer the question based only on the following context:\n", - "{context}\n", - "\n", - "Question: {question}\n", - "\"\"\"\n", - "prompt = ChatPromptTemplate.from_template(template)\n", - "\n", - "model = ChatOpenAI()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "70e3fe93", - "metadata": {}, - "outputs": [], - "source": [ - "chain = (\n", - " {\"context\": retriever, \"question\": RunnablePassthrough()}\n", - " | prompt\n", - " | model\n", - " | StrOutputParser()\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "849e3c42", - "metadata": {}, - "source": [ - "## Get a graph\n", - "\n", - "You can get a graph of the runnable" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2448b6c2", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Graph(nodes={'7308e6063c6d40818c5a0cc1cc7444f2': Node(id='7308e6063c6d40818c5a0cc1cc7444f2', data=Input'>), '292bbd8021d44ec3a31fbe724d9002c1': Node(id='292bbd8021d44ec3a31fbe724d9002c1', data=Output'>), '9212f219cf05488f95229c56ea02b192': Node(id='9212f219cf05488f95229c56ea02b192', data=VectorStoreRetriever(tags=['FAISS', 'OpenAIEmbeddings'], vectorstore=)), 'c7a8e65fa5cf44b99dbe7d1d6e36886f': Node(id='c7a8e65fa5cf44b99dbe7d1d6e36886f', data=RunnablePassthrough()), '818b9bfd40a341008373d5b9f9d0784b': Node(id='818b9bfd40a341008373d5b9f9d0784b', data=ChatPromptTemplate(input_variables=['context', 'question'], messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template='Answer the question based only on the following context:\\n{context}\\n\\nQuestion: {question}\\n'))])), 'b9f1d3ddfa6b4334a16ea439df22b11e': Node(id='b9f1d3ddfa6b4334a16ea439df22b11e', data=ChatOpenAI(client=, openai_api_key='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', openai_proxy='')), '2bf84f6355c44731848345ca7d0f8ab9': Node(id='2bf84f6355c44731848345ca7d0f8ab9', data=StrOutputParser()), '1aeb2da5da5a43bb8771d3f338a473a2': Node(id='1aeb2da5da5a43bb8771d3f338a473a2', data=)}, edges=[Edge(source='7308e6063c6d40818c5a0cc1cc7444f2', target='9212f219cf05488f95229c56ea02b192'), Edge(source='9212f219cf05488f95229c56ea02b192', target='292bbd8021d44ec3a31fbe724d9002c1'), Edge(source='7308e6063c6d40818c5a0cc1cc7444f2', target='c7a8e65fa5cf44b99dbe7d1d6e36886f'), Edge(source='c7a8e65fa5cf44b99dbe7d1d6e36886f', target='292bbd8021d44ec3a31fbe724d9002c1'), Edge(source='292bbd8021d44ec3a31fbe724d9002c1', target='818b9bfd40a341008373d5b9f9d0784b'), Edge(source='818b9bfd40a341008373d5b9f9d0784b', target='b9f1d3ddfa6b4334a16ea439df22b11e'), Edge(source='2bf84f6355c44731848345ca7d0f8ab9', target='1aeb2da5da5a43bb8771d3f338a473a2'), Edge(source='b9f1d3ddfa6b4334a16ea439df22b11e', target='2bf84f6355c44731848345ca7d0f8ab9')])" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "chain.get_graph()" - ] - }, - { - "cell_type": "markdown", - "id": "065b02fb", - "metadata": {}, - "source": [ - "## Print a graph\n", - "\n", - "While that is not super legible, you can print it to get a display that's easier to understand" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "d5ab1515", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " +---------------------------------+ \n", - " | ParallelInput | \n", - " +---------------------------------+ \n", - " ** ** \n", - " *** *** \n", - " ** ** \n", - "+----------------------+ +-------------+ \n", - "| VectorStoreRetriever | | Passthrough | \n", - "+----------------------+ +-------------+ \n", - " ** ** \n", - " *** *** \n", - " ** ** \n", - " +----------------------------------+ \n", - " | ParallelOutput | \n", - " +----------------------------------+ \n", - " * \n", - " * \n", - " * \n", - " +--------------------+ \n", - " | ChatPromptTemplate | \n", - " +--------------------+ \n", - " * \n", - " * \n", - " * \n", - " +------------+ \n", - " | ChatOpenAI | \n", - " +------------+ \n", - " * \n", - " * \n", - " * \n", - " +-----------------+ \n", - " | StrOutputParser | \n", - " +-----------------+ \n", - " * \n", - " * \n", - " * \n", - " +-----------------------+ \n", - " | StrOutputParserOutput | \n", - " +-----------------------+ \n" - ] - } - ], - "source": [ - "chain.get_graph().print_ascii()" - ] - }, - { - "cell_type": "markdown", - "id": "2babf851", - "metadata": {}, - "source": [ - "## Get the prompts\n", - "\n", - "An important part of every chain is the prompts that are used. You can get the graphs present in the chain:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "34b2118d", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[ChatPromptTemplate(input_variables=['context', 'question'], messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template='Answer the question based only on the following context:\\n{context}\\n\\nQuestion: {question}\\n'))])]" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "chain.get_prompts()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed965769", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.1" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/docs/expression_language/how_to/map.ipynb b/docs/docs/expression_language/how_to/map.ipynb index d5dbaa76ca1d7..dd3f15aca947a 100644 --- a/docs/docs/expression_language/how_to/map.ipynb +++ b/docs/docs/expression_language/how_to/map.ipynb @@ -44,11 +44,12 @@ } ], "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "vectorstore = FAISS.from_texts(\n", " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", @@ -127,11 +128,12 @@ "source": [ "from operator import itemgetter\n", "\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "vectorstore = FAISS.from_texts(\n", " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", @@ -190,9 +192,9 @@ } ], "source": [ - "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import RunnableParallel\n", - "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "joke_chain = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\") | model\n", diff --git a/docs/docs/expression_language/how_to/message_history.ipynb b/docs/docs/expression_language/how_to/message_history.ipynb index 55c631e82fdf7..929c2c53d21cb 100644 --- a/docs/docs/expression_language/how_to/message_history.ipynb +++ b/docs/docs/expression_language/how_to/message_history.ipynb @@ -131,10 +131,10 @@ "source": [ "from typing import Optional\n", "\n", + "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_community.chat_message_histories import RedisChatMessageHistory\n", "from langchain_community.chat_models import ChatAnthropic\n", "from langchain_core.chat_history import BaseChatMessageHistory\n", - "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_core.runnables.history import RunnableWithMessageHistory" ] }, diff --git a/docs/docs/expression_language/how_to/passthrough.ipynb b/docs/docs/expression_language/how_to/passthrough.ipynb index dac8dd60d80ca..2399eb338f41d 100644 --- a/docs/docs/expression_language/how_to/passthrough.ipynb +++ b/docs/docs/expression_language/how_to/passthrough.ipynb @@ -97,11 +97,12 @@ } ], "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", - "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "vectorstore = FAISS.from_texts(\n", " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", diff --git a/docs/docs/expression_language/interface.ipynb b/docs/docs/expression_language/interface.ipynb index 701fe9208d73e..1d0a92c7ec602 100644 --- a/docs/docs/expression_language/interface.ipynb +++ b/docs/docs/expression_language/interface.ipynb @@ -57,8 +57,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "prompt = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\")\n", @@ -659,10 +659,10 @@ } ], "source": [ + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "template = \"\"\"Answer the question based only on the following context:\n", "{context}\n", diff --git a/docs/docs/expression_language/why.ipynb b/docs/docs/expression_language/why.ipynb index 9ee71c4e77108..80f5c40e47941 100644 --- a/docs/docs/expression_language/why.ipynb +++ b/docs/docs/expression_language/why.ipynb @@ -42,8 +42,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_openai import ChatOpenAI\n", - "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", "\n", "\n", @@ -389,7 +389,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n", "llm_chain = (\n", @@ -1002,12 +1002,11 @@ "source": [ "import os\n", "\n", - "from langchain_community.chat_models import ChatAnthropic\n", - "from langchain_openai import ChatOpenAI\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.runnables import RunnablePassthrough, ConfigurableField\n", + "from langchain_core.runnables import RunnablePassthrough\n", "\n", "os.environ[\"LANGCHAIN_API_KEY\"] = \"...\"\n", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", diff --git a/docs/docs/get_started/quickstart.mdx b/docs/docs/get_started/quickstart.mdx index 9279c62a6746b..fd9b5b2371a3d 100644 --- a/docs/docs/get_started/quickstart.mdx +++ b/docs/docs/get_started/quickstart.mdx @@ -70,10 +70,10 @@ For this getting started guide, we will provide two options: using OpenAI (a pop -First we'll need to import the LangChain x OpenAI integration package. +First we'll need to install their Python package: ```shell -pip install langchain_openai +pip install openai ``` Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running: @@ -85,7 +85,7 @@ export OPENAI_API_KEY="..." We can then initialize the model: ```python -from langchain_openai import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI llm = ChatOpenAI() ``` @@ -93,7 +93,7 @@ llm = ChatOpenAI() If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: ```python -from langchain_openai import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI llm = ChatOpenAI(openai_api_key="...") ``` @@ -128,7 +128,7 @@ We can also guide it's response with a prompt template. Prompt templates are used to convert raw user input to a better input to the LLM. ```python -from langchain_core.prompts import ChatPromptTemplate +from langchain.prompts import ChatPromptTemplate prompt = ChatPromptTemplate.from_messages([ ("system", "You are world class technical documentation writer."), ("user", "{input}") @@ -199,10 +199,10 @@ For embedding models, we once again provide examples for accessing via OpenAI or -Make sure you have the `langchain_openai` package installed an the appropriate environment variables set (these are the same as needed for the LLM). +Make sure you have the openai package installed an the appropriate environment variables set (these are the same as needed for the LLM). ```python -from langchain_openai import OpenAIEmbeddings +from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() ``` @@ -416,7 +416,7 @@ pip install langchainhub Now we can use it to get a predefined prompt ```python -from langchain_openai import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain import hub from langchain.agents import create_openai_functions_agent from langchain.agents import AgentExecutor @@ -479,15 +479,15 @@ To create a server for our application we'll make a `serve.py` file. This will c from typing import List from fastapi import FastAPI -from langchain_core.prompts import ChatPromptTemplate -from langchain_openai import ChatOpenAI +from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatOpenAI from langchain_community.document_loaders import WebBaseLoader -from langchain_openai import OpenAIEmbeddings +from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import DocArrayInMemorySearch from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.tools.retriever import create_retriever_tool from langchain_community.tools.tavily_search import TavilySearchResults -from langchain_openai import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain import hub from langchain.agents import create_openai_functions_agent from langchain.agents import AgentExecutor diff --git a/docs/docs/guides/debugging.md b/docs/docs/guides/debugging.md index e2607ad847c93..ba4c3ffc4cabf 100644 --- a/docs/docs/guides/debugging.md +++ b/docs/docs/guides/debugging.md @@ -25,7 +25,7 @@ Let's suppose we have a simple agent, and want to visualize the actions it takes ```python from langchain.agents import AgentType, initialize_agent, load_tools -from langchain_openai import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI llm = ChatOpenAI(model_name="gpt-4", temperature=0) tools = load_tools(["ddg-search", "llm-math"], llm=llm) @@ -656,6 +656,6 @@ agent.run("Who directed the 2023 film Oppenheimer and what is their age? What is ## Other callbacks -`Callbacks` are what we use to execute any functionality within a component outside the primary component logic. All of the above solutions use `Callbacks` under the hood to log intermediate steps of components. There are a number of `Callbacks` relevant for debugging that come with LangChain out of the box, like the [FileCallbackHandler](/docs/modules/callbacks/filecallbackhandler). You can also implement your own callbacks to execute custom functionality. +`Callbacks` are what we use to execute any functionality within a component outside the primary component logic. All of the above solutions use `Callbacks` under the hood to log intermediate steps of components. There are a number of `Callbacks` relevant for debugging that come with LangChain out of the box, like the [FileCallbackHandler](/docs/modules/callbacks/how_to/filecallbackhandler). You can also implement your own callbacks to execute custom functionality. See here for more info on [Callbacks](/docs/modules/callbacks/), how to use them, and customize them. diff --git a/docs/docs/guides/deployments/index.mdx b/docs/docs/guides/deployments/index.mdx index c075c3b92ee92..92bf63641408e 100644 --- a/docs/docs/guides/deployments/index.mdx +++ b/docs/docs/guides/deployments/index.mdx @@ -20,11 +20,11 @@ This guide aims to provide a comprehensive overview of the requirements for depl Understanding these components is crucial when assessing serving systems. LangChain integrates with several open-source projects designed to tackle these issues, providing a robust framework for productionizing your LLM applications. Some notable frameworks include: -- [Ray Serve](/docs/integrations/providers/ray_serve) +- [Ray Serve](/docs/ecosystem/integrations/ray_serve) - [BentoML](https://github.com/bentoml/BentoML) -- [OpenLLM](/docs/integrations/providers/openllm) -- [Modal](/docs/integrations/providers/modal) -- [Jina](/docs/integrations/providers/jina) +- [OpenLLM](/docs/ecosystem/integrations/openllm) +- [Modal](/docs/ecosystem/integrations/modal) +- [Jina](/docs/ecosystem/integrations/jina#deployment) These links will provide further information on each ecosystem, assisting you in finding the best fit for your LLM deployment needs. diff --git a/docs/docs/guides/evaluation/examples/comparisons.ipynb b/docs/docs/guides/evaluation/examples/comparisons.ipynb index 2d5105896b349..b49776d971bb2 100644 --- a/docs/docs/guides/evaluation/examples/comparisons.ipynb +++ b/docs/docs/guides/evaluation/examples/comparisons.ipynb @@ -99,8 +99,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, Tool, initialize_agent\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.utilities import SerpAPIWrapper\n", - "from langchain_openai import ChatOpenAI\n", "\n", "# Initialize the language model\n", "# You can add your own OpenAI API key by adding openai_api_key=\"\"\n", diff --git a/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb b/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb index 88ab1685ce626..f7c42d5392e4c 100644 --- a/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb +++ b/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb @@ -25,7 +25,7 @@ "outputs": [], "source": [ "from langchain.evaluation import load_evaluator\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "evaluator = load_evaluator(\"labeled_score_string\", llm=ChatOpenAI(model=\"gpt-4\"))" ] diff --git a/docs/docs/guides/evaluation/trajectory/custom.ipynb b/docs/docs/guides/evaluation/trajectory/custom.ipynb index 08c706552ee4d..11823f4035410 100644 --- a/docs/docs/guides/evaluation/trajectory/custom.ipynb +++ b/docs/docs/guides/evaluation/trajectory/custom.ipynb @@ -26,7 +26,7 @@ "from langchain.chains import LLMChain\n", "from langchain.evaluation import AgentTrajectoryEvaluator\n", "from langchain.schema import AgentAction\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "\n", "class StepNecessityEvaluator(AgentTrajectoryEvaluator):\n", diff --git a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb index ef5e75a6da2c8..8c7a04cf0c7ba 100644 --- a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb +++ b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb @@ -76,7 +76,7 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain.tools import tool\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from pydantic import HttpUrl\n", "\n", "\n", diff --git a/docs/docs/guides/fallbacks.ipynb b/docs/docs/guides/fallbacks.ipynb index 57a201f844257..2f354be3c2b04 100644 --- a/docs/docs/guides/fallbacks.ipynb +++ b/docs/docs/guides/fallbacks.ipynb @@ -33,8 +33,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatAnthropic\n", - "from langchain_openai import ChatOpenAI" + "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI" ] }, { @@ -144,7 +143,7 @@ } ], "source": [ - "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -207,7 +206,7 @@ "source": [ "# Now lets create a chain with the normal OpenAI model\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n", "\n", diff --git a/docs/docs/guides/model_laboratory.ipynb b/docs/docs/guides/model_laboratory.ipynb index 785ac8dd90bc9..538d4942814cb 100644 --- a/docs/docs/guides/model_laboratory.ipynb +++ b/docs/docs/guides/model_laboratory.ipynb @@ -21,8 +21,7 @@ "source": [ "from langchain.model_laboratory import ModelLaboratory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import Cohere, HuggingFaceHub\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import Cohere, HuggingFaceHub, OpenAI" ] }, { diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb index 7a973beb7c7c8..539658b1ecc35 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb @@ -130,7 +130,7 @@ ], "source": [ "from langchain.prompts.prompt import PromptTemplate\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "anonymizer = PresidioAnonymizer()\n", "\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb index c1bd2028a920c..728ef65cd9f50 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb @@ -638,8 +638,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", - "from langchain_openai import OpenAIEmbeddings\n", "\n", "# 2. Load the data: In our case data's already loaded\n", "# 3. Anonymize the data before indexing\n", @@ -664,14 +664,14 @@ "source": [ "from operator import itemgetter\n", "\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models.openai import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import (\n", " RunnableLambda,\n", " RunnableParallel,\n", " RunnablePassthrough,\n", ")\n", - "from langchain_openai import ChatOpenAI\n", "\n", "# 6. Create anonymizer chain\n", "template = \"\"\"Answer the question based only on the following context:\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb index bf49bba76223b..bcc66c08298ef 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb @@ -208,7 +208,7 @@ ], "source": [ "from langchain.prompts.prompt import PromptTemplate\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "anonymizer = PresidioReversibleAnonymizer()\n", "\n", diff --git a/docs/docs/guides/safety/constitutional_chain.mdx b/docs/docs/guides/safety/constitutional_chain.mdx index 4b982501315f4..38356470adae3 100644 --- a/docs/docs/guides/safety/constitutional_chain.mdx +++ b/docs/docs/guides/safety/constitutional_chain.mdx @@ -12,7 +12,7 @@ content that may violate guidelines, be offensive, or deviate from the desired c ```python # Imports -from langchain_openai import OpenAI +from langchain_community.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langchain.chains.constitutional_ai.base import ConstitutionalChain diff --git a/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb b/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb index f7c30d3e1eb24..fc648f81a024b 100644 --- a/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb +++ b/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb @@ -28,7 +28,9 @@ "cell_type": "code", "execution_count": null, "id": "9bdbfdc7c949a9c1", - "metadata": {}, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "!pip install \"optimum[onnxruntime]\"" @@ -42,7 +44,8 @@ "ExecuteTime": { "end_time": "2023-12-18T11:41:24.738278Z", "start_time": "2023-12-18T11:41:20.842567Z" - } + }, + "collapsed": false }, "outputs": [], "source": [ @@ -77,9 +80,7 @@ "outputs": [ { "data": { - "text/plain": [ - "'hugging_face_injection_identifier'" - ] + "text/plain": "'hugging_face_injection_identifier'" }, "execution_count": 10, "metadata": {}, @@ -118,9 +119,7 @@ "outputs": [ { "data": { - "text/plain": [ - "'Name 5 cities with the biggest number of inhabitants'" - ] + "text/plain": "'Name 5 cities with the biggest number of inhabitants'" }, "execution_count": 11, "metadata": {}, @@ -207,7 +206,7 @@ ], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "agent = initialize_agent(\n", @@ -375,7 +374,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.1" + "version": "3.9.1" } }, "nbformat": 4, diff --git a/docs/docs/guides/safety/index.mdx b/docs/docs/guides/safety/index.mdx index b5d047d771ed4..8b97fdda7865b 100644 --- a/docs/docs/guides/safety/index.mdx +++ b/docs/docs/guides/safety/index.mdx @@ -4,6 +4,6 @@ One of the key concerns with using LLMs is that they may generate harmful or une - [Amazon Comprehend moderation chain](/docs/guides/safety/amazon_comprehend_chain): Use [Amazon Comprehend](https://aws.amazon.com/comprehend/) to detect and handle Personally Identifiable Information (PII) and toxicity. - [Constitutional chain](/docs/guides/safety/constitutional_chain): Prompt the model with a set of principles which should guide the model behavior. -- [Hugging Face prompt injection identification](/docs/guides/safety/hugging_face_prompt_injection): Detect and handle prompt injection attacks. +- [Hugging Face prompt injection identification](/docs/guides/safety/huggingface_prompt_injection_identification): Detect and handle prompt injection attacks. - [Logical Fallacy chain](/docs/guides/safety/logical_fallacy_chain): Checks the model output against logical fallacies to correct any deviation. - [Moderation chain](/docs/guides/safety/moderation): Check if any output text is harmful and flag it. diff --git a/docs/docs/guides/safety/logical_fallacy_chain.mdx b/docs/docs/guides/safety/logical_fallacy_chain.mdx index d25dd37cd3a47..108a8520616b0 100644 --- a/docs/docs/guides/safety/logical_fallacy_chain.mdx +++ b/docs/docs/guides/safety/logical_fallacy_chain.mdx @@ -21,7 +21,7 @@ Therefore, it is crucial that model developers proactively address logical falla ```python # Imports -from langchain_openai import OpenAI +from langchain_community.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langchain_experimental.fallacy_removal.base import FallacyChain diff --git a/docs/docs/guides/safety/moderation.mdx b/docs/docs/guides/safety/moderation.mdx index 94b6a7dc642e1..72353fa231179 100644 --- a/docs/docs/guides/safety/moderation.mdx +++ b/docs/docs/guides/safety/moderation.mdx @@ -22,7 +22,7 @@ We'll show: ```python -from langchain_openai import OpenAI +from langchain_community.llms import OpenAI from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain from langchain.prompts import PromptTemplate ``` diff --git a/docs/docs/integrations/callbacks/argilla.ipynb b/docs/docs/integrations/callbacks/argilla.ipynb index 8953805e7661a..0fed211592c61 100644 --- a/docs/docs/integrations/callbacks/argilla.ipynb +++ b/docs/docs/integrations/callbacks/argilla.ipynb @@ -215,7 +215,7 @@ ], "source": [ "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", @@ -281,7 +281,7 @@ "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", @@ -363,7 +363,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", diff --git a/docs/docs/integrations/callbacks/confident.ipynb b/docs/docs/integrations/callbacks/confident.ipynb index af880da0b6b93..6ef644b33e5c9 100644 --- a/docs/docs/integrations/callbacks/confident.ipynb +++ b/docs/docs/integrations/callbacks/confident.ipynb @@ -152,7 +152,7 @@ } ], "source": [ - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(\n", " temperature=0,\n", @@ -217,8 +217,9 @@ "from langchain.chains import RetrievalQA\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores import Chroma\n", - "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "text_file_url = \"https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt\"\n", "\n", diff --git a/docs/docs/integrations/callbacks/context.ipynb b/docs/docs/integrations/callbacks/context.ipynb index 015c636633428..ed65e084c5049 100644 --- a/docs/docs/integrations/callbacks/context.ipynb +++ b/docs/docs/integrations/callbacks/context.ipynb @@ -104,7 +104,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "token = os.environ[\"CONTEXT_API_TOKEN\"]\n", "\n", @@ -162,7 +162,7 @@ " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", ")\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "token = os.environ[\"CONTEXT_API_TOKEN\"]\n", "\n", diff --git a/docs/docs/integrations/callbacks/infino.ipynb b/docs/docs/integrations/callbacks/infino.ipynb index 4b318b1378eb1..a148c8823438b 100644 --- a/docs/docs/integrations/callbacks/infino.ipynb +++ b/docs/docs/integrations/callbacks/infino.ipynb @@ -54,7 +54,7 @@ "import matplotlib.pyplot as plt\n", "from infinopy import InfinoClient\n", "from langchain.callbacks import InfinoCallbackHandler\n", - "from langchain_openai import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { @@ -316,8 +316,8 @@ "# os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n", "\n", "from langchain.chains.summarize import load_summarize_chain\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_openai import ChatOpenAI\n", "\n", "# Create callback handler. This logs latency, errors, token usage, prompts, as well as prompt responses to Infino.\n", "handler = InfinoCallbackHandler(\n", diff --git a/docs/docs/integrations/callbacks/labelstudio.ipynb b/docs/docs/integrations/callbacks/labelstudio.ipynb index 4e59c91dc1bfc..9ca111ffd09d1 100644 --- a/docs/docs/integrations/callbacks/labelstudio.ipynb +++ b/docs/docs/integrations/callbacks/labelstudio.ipynb @@ -171,7 +171,7 @@ "outputs": [], "source": [ "from langchain.callbacks import LabelStudioCallbackHandler\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(\n", " temperature=0, callbacks=[LabelStudioCallbackHandler(project_name=\"My Project\")]\n", @@ -243,7 +243,7 @@ "source": [ "from langchain.callbacks import LabelStudioCallbackHandler\n", "from langchain.schema import HumanMessage, SystemMessage\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "chat_llm = ChatOpenAI(\n", " callbacks=[\n", diff --git a/docs/docs/integrations/callbacks/lunary.md b/docs/docs/integrations/callbacks/lunary.md new file mode 100644 index 0000000000000..49405d9112cb4 --- /dev/null +++ b/docs/docs/integrations/callbacks/lunary.md @@ -0,0 +1,140 @@ +# Lunary + +>[Lunary](https://lunary.ai?utm_source=langchain&utm_medium=py&utm_campaign=docs) is an open-source platform that provides observability (cost and usage analytics, logs and tracing), prompt management and evaluations. + + + +## Setup + +Create an account on [lunary.ai](https://lunary.ai?utm_source=langchain&utm_medium=py&utm_campaign=docs), then copy your new app's `tracking id`. + +Once you have it, set it as an environment variable by running: + +```bash +export LUNARY_APP_ID="..." +``` + +If you'd prefer not to set an environment variable, you can pass the key directly when initializing the callback handler: + +```python +from langchain_community.callbacks.lunary_callback import LunaryCallbackHandler + +handler = LunaryCallbackHandler(app_id="...") +``` + +## Usage with LLM/Chat models + +```python +from langchain.llms import OpenAI +from langchain.chat_models import ChatOpenAI +from langchain_community.callbacks.lunary_callback import LunaryCallbackHandler + +handler = LunaryCallbackHandler() + +llm = OpenAI( + callbacks=[handler], +) + +chat = ChatOpenAI(callbacks=[handler]) + +llm("Tell me a joke") + +``` + +## Usage with chains and agents + +Make sure to pass the callback handler to the `run` method so that all related chains and llm calls are correctly tracked. + +It is also recommended to pass `agent_name` in the metadata to be able to distinguish between agents in the dashboard. + +Example: + +```python +from langchain.chat_models import ChatOpenAI +from langchain.schema import SystemMessage, HumanMessage +from langchain.agents import OpenAIFunctionsAgent, AgentExecutor, tool +from langchain_community.callbacks.lunary_callback import LunaryCallbackHandler + +llm = ChatOpenAI(temperature=0) + +handler = LunaryCallbackHandler() + +@tool +def get_word_length(word: str) -> int: + """Returns the length of a word.""" + return len(word) + +tools = [get_word_length] + +prompt = OpenAIFunctionsAgent.create_prompt( + system_message=SystemMessage( + content="You are very powerful assistant, but bad at calculating lengths of words." + ) +) + +agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt, verbose=True) +agent_executor = AgentExecutor( + agent=agent, + tools=tools, + verbose=True, + metadata={"agent_name": "WordCount"}, # <- recommended, assign a custom name +) +agent_executor.run("how many letters in the word educa?", callbacks=[handler]) + +``` + +Another example: + +```python +from langchain.agents import load_tools, initialize_agent, AgentType +from langchain.llms import OpenAI +from langchain_community.callbacks.lunary_callback import LunaryCallbackHandler + +handler = LunaryCallbackHandler() + +llm = OpenAI(temperature=0) +tools = load_tools(["serpapi", "llm-math"], llm=llm) +agent = initialize_agent( + tools, + llm, + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, + metadata={"agent_name": "GirlfriendAgeFinder"}, +) # <- recommended, assign a custom name + +agent.run( + "Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?", + callbacks=[handler], +) +``` + +## User Tracking +User tracking allows you to identify your users, track their cost, conversations and more. + +```python +from langchain.agents import load_tools, initialize_agent, AgentType +from langchain.llms import OpenAI +from langchain_community.callbacks.lunary_callback import LunaryCallbackHandler + +handler = LunaryCallbackHandler() + +llm = OpenAI(temperature=0) +tools = load_tools(["serpapi", "llm-math"], llm=llm) +agent = initialize_agent( + tools, + llm, + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, + metadata={"agent_name": "GirlfriendAgeFinder"}, +) + +with identify("user-123"): + llm("Tell me a joke") + +with identify("user-456", user_props={"email": "user456@test.com"}): + agent.run("Who is Leo DiCaprio's girlfriend?") + +``` +## Support + +For any question or issue with integration you can reach out to the Lunary team via [email](mailto:hello@lunary.ai). diff --git a/docs/docs/integrations/callbacks/promptlayer.ipynb b/docs/docs/integrations/callbacks/promptlayer.ipynb index 1b847528bc3b4..de7f52ffd3171 100644 --- a/docs/docs/integrations/callbacks/promptlayer.ipynb +++ b/docs/docs/integrations/callbacks/promptlayer.ipynb @@ -79,7 +79,7 @@ "from langchain.schema import (\n", " HumanMessage,\n", ")\n", - "from langchain_openai import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "chat_llm = ChatOpenAI(\n", " temperature=0,\n", @@ -142,7 +142,7 @@ "source": [ "import promptlayer # Don't forget this 🍰\n", "from langchain.callbacks import PromptLayerCallbackHandler\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "\n", "def pl_id_callback(promptlayer_request_id):\n", diff --git a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb index 9203d353510c6..82ecc3298b697 100644 --- a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb +++ b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb @@ -83,7 +83,7 @@ "from langchain.callbacks import SageMakerCallbackHandler\n", "from langchain.chains import LLMChain, SimpleSequentialChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_openai import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "from sagemaker.analytics import ExperimentAnalytics\n", "from sagemaker.experiments.run import Run\n", "from sagemaker.session import Session" diff --git a/docs/docs/integrations/callbacks/streamlit.md b/docs/docs/integrations/callbacks/streamlit.md index 776f0f6d9c26a..28a83daf3ae2e 100644 --- a/docs/docs/integrations/callbacks/streamlit.md +++ b/docs/docs/integrations/callbacks/streamlit.md @@ -7,7 +7,7 @@ [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/langchain-ai/streamlit-agent?quickstart=1) In this guide we will demonstrate how to use `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an -interactive Streamlit app. Try it out with the running app below using the MRKL agent: +interactive Streamlit app. Try it out with the running app below using the [MRKL agent](/docs/modules/agents/how_to/mrkl/):