Skip to content

Commit

Permalink
Merge pull request #55 from langchain-ai/dglogo/nim-model-name-update
Browse files Browse the repository at this point in the history
nim docs model name update
  • Loading branch information
dglogo authored Jun 15, 2024
2 parents a1a8172 + 70c4e0d commit 4aedd90
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@
"from langchain_nvidia_ai_endpoints import ChatNVIDIA\n",
"\n",
"# connect to an embedding NIM running at localhost:8000, specifying a specific model\n",
"llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta-llama3-8b-instruct\")"
"llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta/llama3-8b-instruct\")"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion libs/ai-endpoints/docs/providers/nvidia.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ When ready to deploy, you can self-host models with NVIDIA NIM—which is includ
from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank

# connect to an chat NIM running at localhost:8000, specifyig a specific model
llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta-llama3-8b-instruct")
llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta/llama3-8b-instruct")

# connect to an embedding NIM running at localhost:8080
embedder = NVIDIAEmbeddings(base_url="http://localhost:8080/v1")
Expand Down

0 comments on commit 4aedd90

Please sign in to comment.