Skip to content

Commit

Permalink
Adding LM Studio Integration
Browse files Browse the repository at this point in the history
  • Loading branch information
csikosjanos committed Nov 7, 2024
1 parent a6d81b1 commit e3fd1ee
Show file tree
Hide file tree
Showing 10 changed files with 98 additions and 57 deletions.
6 changes: 6 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,18 @@ OLLAMA_API_BASE_URL=
# You only need this environment variable set if you want to use OpenAI Like models
OPENAI_LIKE_API_BASE_URL=

# You only need this environment variable set if you want to use LM Studio models
LM_STUDIO_API_BASE_URL=

# You only need this environment variable set if you want to use DeepSeek models through their API
DEEPSEEK_API_KEY=

# Get your OpenAI Like API Key
OPENAI_LIKE_API_KEY=

# Get your LM Studio API Key
LM_STUDIO_API_KEY=

# Get your Mistral API Key by following these instructions -
# https://console.mistral.ai/api-keys/
# You only need this environment variable set if you want to use Mistral models
Expand Down
8 changes: 8 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ ARG ANTHROPIC_API_KEY
ARG OPEN_ROUTER_API_KEY
ARG GOOGLE_GENERATIVE_AI_API_KEY
ARG OLLAMA_API_BASE_URL
ARG LM_STUDIO_API_BASE_URL
ARG LM_STUDIO_API_KEY
ARG VITE_LOG_LEVEL=debug

ENV WRANGLER_SEND_METRICS=false \
Expand All @@ -33,6 +35,8 @@ ENV WRANGLER_SEND_METRICS=false \
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
LM_STUDIO_API_BASE_URL=${LM_STUDIO_API_BASE_URL} \
LM_STUDIO_API_KEY=${LM_STUDIO_API_KEY} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL}

# Pre-configure wrangler to disable metrics
Expand All @@ -53,6 +57,8 @@ ARG ANTHROPIC_API_KEY
ARG OPEN_ROUTER_API_KEY
ARG GOOGLE_GENERATIVE_AI_API_KEY
ARG OLLAMA_API_BASE_URL
ARG LM_STUDIO_API_BASE_URL
ARG LM_STUDIO_API_KEY
ARG VITE_LOG_LEVEL=debug

ENV GROQ_API_KEY=${GROQ_API_KEY} \
Expand All @@ -61,6 +67,8 @@ ENV GROQ_API_KEY=${GROQ_API_KEY} \
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
LM_STUDIO_API_BASE_URL=${LM_STUDIO_API_BASE_URL} \
LM_STUDIO_API_KEY=${LM_STUDIO_API_KEY} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL}

RUN mkdir -p ${WORKDIR}/run
Expand Down
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ This fork of Bolt.new allows you to choose the LLM that you use for each prompt!
-**HIGH PRIORITY** Load local projects into the app
-**HIGH PRIORITY** - Attach images to prompts
-**HIGH PRIORITY** - Run agents in the backend as opposed to a single model call
- LM Studio Integration
- LM Studio Integration
- ⬜ Together Integration
- ⬜ Azure Open AI API Integration
- ⬜ HuggingFace Integration
Expand Down Expand Up @@ -67,9 +67,9 @@ Many of you are new users to installing software from Github. If you have any in

1. Install Git from https://git-scm.com/downloads

2. Install Node.js from https://nodejs.org/en/download/
2. Install Node.js from https://nodejs.org/en/download/

Pay attention to the installer notes after completion.
Pay attention to the installer notes after completion.

On all operating systems, the path to Node.js should automatically be added to your system path. But you can check your path if you want to be sure. On Windows, you can search for "edit the system environment variables" in your system, select "Environment Variables..." once you are in the system properties, and then check for a path to Node in your "Path" system variable. On a Mac or Linux machine, it will tell you to check if /usr/local/bin is in your $PATH. To determine if usr/local/bin is included in $PATH open your Terminal and run:

Expand Down Expand Up @@ -199,7 +199,7 @@ FROM [Ollama model ID such as qwen2.5-coder:7b]
PARAMETER num_ctx 32768
```

- Run the command:
- Run the command:

```
ollama create -f Modelfile [your new model ID, can be whatever you want (example: qwen2.5-coder-extra-ctx:7b)]
Expand All @@ -210,7 +210,7 @@ You'll see this new model in the list of Ollama models along with all the others

## Adding New LLMs:

To make new LLMs available to use in this version of Bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider.
To make new LLMs available to use in this version of Bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider.

By default, Anthropic, OpenAI, Groq, and Ollama are implemented as providers, but the YouTube video for this repo covers how to extend this to work with more providers if you wish!

Expand Down
3 changes: 3 additions & 0 deletions app/components/chat/BaseChat.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,9 @@ const ModelSelector = ({ model, setModel, modelList, providerList }) => {
<option key="OpenAILike" value="OpenAILike">
OpenAILike
</option>
<option key="LMStudio" value="LMStudio">
LMStudio
</option>
</select>
<select
value={model}
Expand Down
24 changes: 14 additions & 10 deletions app/lib/.server/llm/api-key.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,27 +20,31 @@ export function getAPIKey(cloudflareEnv: Env, provider: string) {
case 'OpenRouter':
return env.OPEN_ROUTER_API_KEY || cloudflareEnv.OPEN_ROUTER_API_KEY;
case 'Deepseek':
return env.DEEPSEEK_API_KEY || cloudflareEnv.DEEPSEEK_API_KEY
return env.DEEPSEEK_API_KEY || cloudflareEnv.DEEPSEEK_API_KEY;
case 'Mistral':
return env.MISTRAL_API_KEY || cloudflareEnv.MISTRAL_API_KEY;
case "OpenAILike":
return env.MISTRAL_API_KEY || cloudflareEnv.MISTRAL_API_KEY;
case 'OpenAILike':
return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
case 'LMStudio':
return env.LM_STUDIO_API_KEY || cloudflareEnv.LM_STUDIO_API_KEY;
default:
return "";
return '';
}
}

export function getBaseURL(cloudflareEnv: Env, provider: string) {
switch (provider) {
case 'OpenAILike':
return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
case 'LMStudio':
return env.LM_STUDIO_API_BASE_URL || cloudflareEnv.LM_STUDIO_API_BASE_URL || 'http://localhost:1234';
case 'Ollama':
let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || "http://localhost:11434";
if (env.RUNNING_IN_DOCKER === 'true') {
baseUrl = baseUrl.replace("localhost", "host.docker.internal");
}
return baseUrl;
let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || 'http://localhost:11434';
if (env.RUNNING_IN_DOCKER === 'true') {
baseUrl = baseUrl.replace('localhost', 'host.docker.internal');
}
return baseUrl;
default:
return "";
return '';
}
}
24 changes: 12 additions & 12 deletions app/lib/.server/llm/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import { createAnthropic } from '@ai-sdk/anthropic';
import { createOpenAI } from '@ai-sdk/openai';
import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { ollama } from 'ollama-ai-provider';
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
import { mistral } from '@ai-sdk/mistral';
import { createMistral } from '@ai-sdk/mistral';

Expand All @@ -16,7 +16,7 @@ export function getAnthropicModel(apiKey: string, model: string) {

return anthropic(model);
}
export function getOpenAILikeModel(baseURL:string,apiKey: string, model: string) {
export function getOpenAILikeModel(baseURL: string, apiKey: string, model: string) {
const openai = createOpenAI({
baseURL,
apiKey,
Expand All @@ -34,16 +34,14 @@ export function getOpenAIModel(apiKey: string, model: string) {

export function getMistralModel(apiKey: string, model: string) {
const mistral = createMistral({
apiKey
apiKey,
});

return mistral(model);
}

export function getGoogleModel(apiKey: string, model: string) {
const google = createGoogleGenerativeAI(
apiKey,
);
const google = createGoogleGenerativeAI(apiKey);

return google(model);
}
Expand All @@ -63,7 +61,7 @@ export function getOllamaModel(baseURL: string, model: string) {
return Ollama;
}

export function getDeepseekModel(apiKey: string, model: string){
export function getDeepseekModel(apiKey: string, model: string) {
const openai = createOpenAI({
baseURL: 'https://api.deepseek.com/beta',
apiKey,
Expand All @@ -74,7 +72,7 @@ export function getDeepseekModel(apiKey: string, model: string){

export function getOpenRouterModel(apiKey: string, model: string) {
const openRouter = createOpenRouter({
apiKey
apiKey,
});

return openRouter.chat(model);
Expand All @@ -94,13 +92,15 @@ export function getModel(provider: string, model: string, env: Env) {
case 'OpenRouter':
return getOpenRouterModel(apiKey, model);
case 'Google':
return getGoogleModel(apiKey, model)
return getGoogleModel(apiKey, model);
case 'OpenAILike':
return getOpenAILikeModel(baseURL,apiKey, model);
return getOpenAILikeModel(baseURL, apiKey, model);
case 'LMStudio':
return getOpenAILikeModel(baseURL, apiKey, model);
case 'Deepseek':
return getDeepseekModel(apiKey, model)
return getDeepseekModel(apiKey, model);
case 'Mistral':
return getMistralModel(apiKey, model);
return getMistralModel(apiKey, model);
default:
return getOllamaModel(baseURL, model);
}
Expand Down
66 changes: 40 additions & 26 deletions app/utils/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ const staticModels: ModelInfo[] = [
{ name: 'qwen/qwen-110b-chat', label: 'OpenRouter Qwen 110b Chat (OpenRouter)', provider: 'OpenRouter' },
{ name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter' },
{ name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google' },
{ name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google'},
{ name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google' },
{ name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq' },
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq' },
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq' },
Expand All @@ -32,8 +32,8 @@ const staticModels: ModelInfo[] = [
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI' },
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI' },
{ name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI' },
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek'},
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek'},
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek' },
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek' },
{ name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral' },
{ name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral' },
{ name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral' },
Expand All @@ -54,20 +54,18 @@ const getOllamaBaseUrl = () => {
// Frontend always uses localhost
return defaultBaseUrl;
}

// Backend: Check if we're running in Docker
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';

return isDocker
? defaultBaseUrl.replace("localhost", "host.docker.internal")
: defaultBaseUrl;

return isDocker ? defaultBaseUrl.replace('localhost', 'host.docker.internal') : defaultBaseUrl;
};

async function getOllamaModels(): Promise<ModelInfo[]> {
try {
const base_url = getOllamaBaseUrl();
const response = await fetch(`${base_url}/api/tags`);
const data = await response.json() as OllamaApiResponse;
const data = (await response.json()) as OllamaApiResponse;

return data.models.map((model: OllamaModel) => ({
name: model.name,
Expand All @@ -79,33 +77,49 @@ async function getOllamaModels(): Promise<ModelInfo[]> {
}
}

async function getOpenAILikeModels(): Promise<ModelInfo[]> {
try {
const base_url =import.meta.env.OPENAI_LIKE_API_BASE_URL || "";
if (!base_url) {
async function getOAILikeModels(baseURL: string, apiKey: string, provider: string): Promise<ModelInfo[]> {
try {
if (!baseURL) {
return [];
}
const api_key = import.meta.env.OPENAI_LIKE_API_KEY ?? "";
const response = await fetch(`${base_url}/models`, {
headers: {
Authorization: `Bearer ${api_key}`,
}
});
const res = await response.json() as any;
}
const response = await fetch(`${baseURL}/models`, {
headers: {
Authorization: `Bearer ${apiKey}`,
},
});
const res = (await response.json()) as any;
return res.data.map((model: any) => ({
name: model.id,
label: model.id,
provider: 'OpenAILike',
provider,
}));
}catch (e) {
return []
}
} catch (e) {
return [];
}
}

async function getOpenAILikeModels(): Promise<ModelInfo[]> {
const baseURL = import.meta.env.OPENAI_LIKE_API_BASE_URL || '';
const apiKey = import.meta.env.OPENAI_LIKE_API_KEY ?? '';
const provider = 'OpenAILike';

return getOAILikeModels(baseURL, apiKey, provider);
}

async function getLMStudioModels(): Promise<ModelInfo[]> {
const baseURL = import.meta.env.LM_STUDIO_API_BASE_URL || 'http://localhost:1234';
const apiKey = import.meta.env.LM_STUDIO_API_KEY ?? 'lm-studio';
const provider = 'LMStudio';

return getOAILikeModels(baseURL, apiKey, provider);
}

async function initializeModelList(): Promise<void> {
const ollamaModels = await getOllamaModels();
const openAiLikeModels = await getOpenAILikeModels();
MODEL_LIST = [...ollamaModels,...openAiLikeModels, ...staticModels];
const lmStudioModels = await getLMStudioModels();
MODEL_LIST = [...ollamaModels, ...openAiLikeModels, ...lmStudioModels, ...staticModels];
}

initializeModelList().then();
export { getOllamaModels, getOpenAILikeModels, initializeModelList };
10 changes: 7 additions & 3 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,12 @@ services:
- OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY}
- GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY}
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
- LM_STUDIO_API_BASE_URL=${LM_STUDIO_API_BASE_URL}
- LM_STUDIO_API_KEY= ${LM_STUDIO_API_KEY}
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
- RUNNING_IN_DOCKER=true
extra_hosts:
- "host.docker.internal:host-gateway"
- "host.docker.internal:host-gateway"
command: pnpm run dockerstart
profiles:
- production # This service only runs in the production profile
Expand All @@ -37,18 +39,20 @@ services:
- VITE_HMR_HOST=localhost
- VITE_HMR_PORT=5173
- CHOKIDAR_USEPOLLING=true
- WATCHPACK_POLLING=true
- WATCHPACK_POLLING=true
- PORT=5173
- GROQ_API_KEY=${GROQ_API_KEY}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY}
- GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY}
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
- LM_STUDIO_API_BASE_URL=${LM_STUDIO_API_BASE_URL}
- LM_STUDIO_API_KEY= ${LM_STUDIO_API_KEY}
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
- RUNNING_IN_DOCKER=true
extra_hosts:
- "host.docker.internal:host-gateway"
- "host.docker.internal:host-gateway"
volumes:
- type: bind
source: .
Expand Down
2 changes: 1 addition & 1 deletion vite.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ export default defineConfig((config) => {
chrome129IssuePlugin(),
config.mode === 'production' && optimizeCssModules({ apply: 'build' }),
],
envPrefix:["VITE_","OPENAI_LIKE_API_","OLLAMA_API_BASE_URL"],
envPrefix:["VITE_","OPENAI_LIKE_API_","LM_STUDIO_API_","OLLAMA_API_BASE_URL"],
css: {
preprocessorOptions: {
scss: {
Expand Down
2 changes: 2 additions & 0 deletions worker-configuration.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,7 @@ interface Env {
OLLAMA_API_BASE_URL: string;
OPENAI_LIKE_API_KEY: string;
OPENAI_LIKE_API_BASE_URL: string;
LM_STUDIO_API_KEY: string;
LM_STUDIO_API_BASE_URL: string;
DEEPSEEK_API_KEY: string;
}

0 comments on commit e3fd1ee

Please sign in to comment.