diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index 85078191..c88c1934 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -423,6 +423,19 @@ def validate_client(self) -> "Model": model_type="chat", client="ChatNVIDIA", ), + "meta/llama-3.2-1b-instruct": Model( + id="meta/llama-3.2-1b-instruct", + model_type="chat", + client="ChatNVIDIA", + supports_structured_output=True, + ), + "meta/llama-3.2-3b-instruct": Model( + id="meta/llama-3.2-3b-instruct", + model_type="chat", + client="ChatNVIDIA", + supports_tools=True, + supports_structured_output=True, + ), } QA_MODEL_TABLE = { @@ -494,6 +507,18 @@ def validate_client(self) -> "Model": client="ChatNVIDIA", endpoint="https://ai.api.nvidia.com/v1/vlm/nvidia/vila", ), + "meta/llama-3.2-11b-vision-instruct": Model( + id="meta/llama-3.2-11b-vision-instruct", + model_type="vlm", + client="ChatNVIDIA", + endpoint="https://ai.api.nvidia.com/v1/gr/meta/llama-3.2-11b-vision-instruct/chat/completions", + ), + "meta/llama-3.2-90b-vision-instruct": Model( + id="meta/llama-3.2-90b-vision-instruct", + model_type="vlm", + client="ChatNVIDIA", + endpoint="https://ai.api.nvidia.com/v1/gr/meta/llama-3.2-90b-vision-instruct/chat/completions", + ), } EMBEDDING_MODEL_TABLE = {