diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb index 15012d6f0dec2..5a678da1c1a68 100644 --- a/docs/docs/integrations/tools/azure_content_safety.ipynb +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -64,7 +64,7 @@ "outputs": [], "source": [ "import os\n", - "\n", + "import getpass\n", "from langchain import hub" ] }, @@ -72,34 +72,25 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We will use a prompt to instruct the model. LangChain prompts can be configured, but for simplicity, we will use a premade prompt from LangSmith. This requires an API key, which can be set up [here](https://www.langchain.com/langsmith) after registration." + "Now we can use the `AzureContentSafetyTextTool` combined with a model, using `create_react_agent`." ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ - "LANGSMITH_KEY = os.environ[\"LANGSMITH_KEY\"]\n", - "prompt = hub.pull(\"hwchase17/structured-chat-agent\", api_key=LANGSMITH_KEY)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can use the `AzureContentSafetyTextTool` combined with a model, using `create_structured_chat_agent`." + "from langgraph.prebuilt import create_react_agent" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentExecutor, create_structured_chat_agent\n", - "from langchain_community.tools.azure_ai_services.content_safety import (\n", + "from libs.community.langchain_community.tools.azure_ai_services.content_safety import (\n", " AzureContentSafetyTextTool,\n", ")\n", "from langchain_openai import AzureChatOpenAI" @@ -121,12 +112,22 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "os.environ[\"CONTENT_SAFETY_ENDPOINT\"] = getpass.getpass()\n", + "os.environ[\"CONTENT_SAFETY_KEY\"] = getpass.getpass()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "content_endpoint = os.environ[\"CONTENT_SAFETY_ENDPOINT\"]\n", - "content_key = os.environ[\"CONTENT_SAFETY_KEY\"]" + "content_key = os.environ[\"CONTENT_SAFETY_API_KEY\"]" ] }, { @@ -145,37 +146,49 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "cs = AzureContentSafetyTextTool(\n", - " content_safety_key=content_key,\n", - " content_safety_endpoint=content_endpoint,\n", - ")" + "os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()\n", + "os.environ[\"OPENAI_API_VERSION\"] = getpass.getpass()\n", + "os.environ[\"GPT_MODEL\"] = getpass.getpass()\n", + "os.environ[\"AZURE_OPENAI_ENDPOINT\"] = getpass.getpass()" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ - "tools = [cs]\n", - "\n", "model = AzureChatOpenAI(\n", " openai_api_version=os.environ[\"OPENAI_API_VERSION\"],\n", - " azure_deployment=os.environ[\"COMPLETIONS_MODEL\"],\n", + " azure_deployment=os.environ[\"GPT_MODEL\"],\n", " azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"],\n", " api_key=os.environ[\"AZURE_OPENAI_API_KEY\"],\n", ")" ] }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "cs = AzureContentSafetyTextTool(\n", + " content_safety_key=content_key,\n", + " content_safety_endpoint=content_endpoint,\n", + ")\n", + "\n", + "tools = [cs]" + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Creating an `AgentExecutor` chain allows a model to use tools to assist in it's response." + "Create a react agent to invoke the tool. " ] }, { @@ -184,11 +197,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = create_structured_chat_agent(model, tools, prompt)\n", - "\n", - "agent_executor = AgentExecutor(\n", - " agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n", - ")" + "agent = create_react_agent(model, tools)" ] }, { @@ -230,20 +239,9 @@ }, { "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Hate: 2\\nSelfHarm: 0\\nSexual: 0\\nViolence: 0\\n'" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "cs.invoke({\"query\": input})" ] @@ -264,76 +262,11 @@ }, { "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", - "\u001b[32;1m\u001b[1;3m{\n", - " \"action\": \"azure_content_safety_tool\",\n", - " \"action_input\": {\n", - " \"query\": \"I hate you\"\n", - " }\n", - "}\u001b[0m\u001b[36;1m\u001b[1;3mHate: 2\n", - "SelfHarm: 0\n", - "Sexual: 0\n", - "Violence: 0\n", - "\u001b[0m\u001b[32;1m\u001b[1;3m{\n", - " \"action\": \"Final Answer\",\n", - " \"action_input\": \"The text contains hate sentiment with a severity level of 2. There is no indication of self-harm, sexual content, or violence.\"\n", - "}\u001b[0m\n", - "\n", - "\u001b[1m> Finished chain.\u001b[0m\n" - ] - }, - { - "data": { - "text/plain": [ - "{'input': 'Can you check the following text for harmful content : I hate you',\n", - " 'output': 'The text contains hate sentiment with a severity level of 2. There is no indication of self-harm, sexual content, or violence.'}" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "agent_executor.invoke(\n", - " {\"input\": f\"Can you check the following text for harmful content : {input}\"}\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Chaining" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When creating an `AgentExecutor` as described earlier, an execution chain is formed. The sequence of events will be printed using the given prompt, and actions will occur in a chain-like manner." - ] - }, - { - "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "agent = create_structured_chat_agent(model, tools, prompt)\n", - "\n", - "agent_executor = AgentExecutor(\n", - " agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n", - ")" + "agent.invoke({\"messages\": [(\"user\", f\"Can you check the following text for harmful content : {input}\")]})" ] }, { @@ -347,7 +280,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "[Azure AI Content Safety Overview](https://learn.microsoft.com/azure/ai-services/content-safety/overview) | [Azure AI Content Safety Python SDK](https://learn.microsoft.com/python/api/overview/azure/ai-contentsafety-readme?view=azure-python)" + "[Azure AI Content Safety Overview](https://learn.microsoft.com/azure/ai-services/content-safety/overview) | [Azure AI Content Safety Python API](https://learn.microsoft.com/python/api/overview/azure/ai-contentsafety-readme?view=azure-python)" ] } ], @@ -367,7 +300,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.12.8" } }, "nbformat": 4, diff --git a/libs/community/langchain_community/tools/azure_ai_services/content_safety.py b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py index 67b6de44124f4..2d478afb45678 100644 --- a/libs/community/langchain_community/tools/azure_ai_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py @@ -31,8 +31,8 @@ class AzureContentSafetyTextTool(BaseTool): requests. """ - content_safety_key: str = "" #: :meta private: - content_safety_endpoint: str = "" #: :meta private: + content_safety_key: Optional[str] = None #: :meta private: + content_safety_endpoint: Optional[str] = None #: :meta private: content_safety_client: Any = None #: :meta private: name: str = "azure_content_safety_tool"