From 70c4e0dbdd2e4d735015894e546b81de173af921 Mon Sep 17 00:00:00 2001 From: Daniel Glogowski Date: Sat, 15 Jun 2024 12:09:17 -0400 Subject: [PATCH] nim docs model name update --- libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb | 2 +- libs/ai-endpoints/docs/providers/nvidia.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb index 90f7ffe9..08e360e8 100644 --- a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb +++ b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb @@ -134,7 +134,7 @@ "from langchain_nvidia_ai_endpoints import ChatNVIDIA\n", "\n", "# connect to an embedding NIM running at localhost:8000, specifying a specific model\n", - "llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta-llama3-8b-instruct\")" + "llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta/llama3-8b-instruct\")" ] }, { diff --git a/libs/ai-endpoints/docs/providers/nvidia.mdx b/libs/ai-endpoints/docs/providers/nvidia.mdx index 0e9fa2e9..70f1123c 100644 --- a/libs/ai-endpoints/docs/providers/nvidia.mdx +++ b/libs/ai-endpoints/docs/providers/nvidia.mdx @@ -62,7 +62,7 @@ When ready to deploy, you can self-host models with NVIDIA NIM—which is includ from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank # connect to an chat NIM running at localhost:8000, specifyig a specific model -llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta-llama3-8b-instruct") +llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta/llama3-8b-instruct") # connect to an embedding NIM running at localhost:8080 embedder = NVIDIAEmbeddings(base_url="http://localhost:8080/v1")