From bed1e960e4cef3f2dd5c312b4d3a06d43943c031 Mon Sep 17 00:00:00 2001 From: Hitendra <88222037+hitendraa@users.noreply.github.com> Date: Tue, 12 Nov 2024 12:13:09 +0530 Subject: [PATCH] Added Nvidia Models --- .env.example | 5 +++++ app/lib/.server/llm/api-key.ts | 4 ++++ app/lib/.server/llm/model.ts | 12 ++++++++++++ app/utils/constants.ts | 3 +++ 4 files changed, 24 insertions(+) diff --git a/.env.example b/.env.example index 46a21e892..1b15a8699 100644 --- a/.env.example +++ b/.env.example @@ -54,5 +54,10 @@ LMSTUDIO_API_BASE_URL= # You only need this environment variable set if you want to use xAI models XAI_API_KEY= +#Get your Nvidia APIkey +#https://build.nvidia.com/ +# You only need this environment variable set if you want to use Nvidia models +NVIDIA_API_KEY= + # Include this environment variable if you want more logging for debugging locally VITE_LOG_LEVEL=debug diff --git a/app/lib/.server/llm/api-key.ts b/app/lib/.server/llm/api-key.ts index 464e334dd..3fa13681e 100644 --- a/app/lib/.server/llm/api-key.ts +++ b/app/lib/.server/llm/api-key.ts @@ -33,6 +33,8 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY; case "xAI": return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY; + case 'Nvidia': + return env.NVIDIA_API_KEY || cloudflareEnv.NVIDIA_API_KEY; default: return ""; } @@ -40,6 +42,8 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re export function getBaseURL(cloudflareEnv: Env, provider: string) { switch (provider) { + case 'Nvidia': + return 'https://integrate.api.nvidia.com/v1'; case 'OpenAILike': return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL; case 'LMStudio': diff --git a/app/lib/.server/llm/model.ts b/app/lib/.server/llm/model.ts index 1babe7a2c..5f31c7d70 100644 --- a/app/lib/.server/llm/model.ts +++ b/app/lib/.server/llm/model.ts @@ -9,6 +9,16 @@ import { createOpenRouter } from "@openrouter/ai-sdk-provider"; import { mistral } from '@ai-sdk/mistral'; import { createMistral } from '@ai-sdk/mistral'; + +export function getNvidiaModel(apiKey: string, model: string) { + const openai = createOpenAI({ + baseURL: 'https://integrate.api.nvidia.com/v1', + apiKey, + }); + + return openai(model); +} + export function getAnthropicModel(apiKey: string, model: string) { const anthropic = createAnthropic({ apiKey, @@ -105,6 +115,8 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re const baseURL = getBaseURL(env, provider); switch (provider) { + case 'Nvidia': + return getNvidiaModel(apiKey, model); case 'Anthropic': return getAnthropicModel(apiKey, model); case 'OpenAI': diff --git a/app/utils/constants.ts b/app/utils/constants.ts index 8ac4151d5..fb1c5a920 100644 --- a/app/utils/constants.ts +++ b/app/utils/constants.ts @@ -37,6 +37,9 @@ const staticModels: ModelInfo[] = [ { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI' }, { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI' }, { name: 'grok-beta', label: "xAI Grok Beta", provider: 'xAI' }, + { name: 'nvidia/llama-3.1-nemotron-70b-instruct', label: 'Llama 3.1 Nemotron 70B Instruct', provider: 'Nvidia' }, + { name: 'meta/llama-3.1-405b-instruct', label: 'Llama 3.1 405B Instruct', provider: 'Nvidia'}, + { name: 'meta/codellama-70b', label: 'Codellama 70B', provider: 'Nvidia'}, { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek'}, { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek'}, { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral' },