diff --git a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb index e0eb5f55..666c80b9 100644 --- a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb +++ b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb @@ -137,28 +137,6 @@ "llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta/llama3-8b-instruct\")" ] }, - { - "cell_type": "code", - "execution_count": 4, - "id": "7d4a4e2e", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/ubuntu/raspawar/langchain-nvidia/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py:583: UserWarning: Default model is set as: meta/llama3-8b-instruct. \n", - "Set model using model parameter. \n", - "To get available models use available_models property.\n", - " UserWarning,\n" - ] - } - ], - "source": [ - "# OR connect to an embedding NIM running at localhost:8000, with default model(first available model)\n", - "llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\")" - ] - }, { "cell_type": "markdown", "id": "71d37987-d568-4a73-9d2a-8bd86323f8bf", @@ -594,7 +572,12 @@ "from langchain.chains import ConversationChain\n", "from langchain.memory import ConversationBufferMemory\n", "\n", - "chat = ChatNVIDIA(model=\"mistralai/mixtral-8x22b-instruct-v0.1\", temperature=0.1, max_tokens=100, top_p=1.0)\n", + "chat = ChatNVIDIA(\n", + " model=\"mistralai/mixtral-8x22b-instruct-v0.1\",\n", + " temperature=0.1,\n", + " max_tokens=100,\n", + " top_p=1.0,\n", + ")\n", "\n", "conversation = ConversationChain(llm=chat, memory=ConversationBufferMemory())" ] @@ -649,9 +632,7 @@ }, "outputs": [], "source": [ - "conversation.invoke(\"Tell me about yourself.\")[\n", - " \"response\"\n", - "]\n" + "conversation.invoke(\"Tell me about yourself.\")[\"response\"]" ] } ],