Skip to content

Commit

Permalink
feat(models): Add GitHub Models provider
Browse files Browse the repository at this point in the history
- Implement GitHub Models provider using Azure ML endpoints
- Add comprehensive model selection including Llama, Mistral, GPT-4O, and Phi series
- Add documentation for the provider in CONTRIBUTING.md
- Configure OpenAI-compatible interface for Azure ML integration
- Fix linting issues in action-runner.ts

Contributed-by: ThePsyberSleuth
  • Loading branch information
ThePsyberSleuth committed Nov 30, 2024
1 parent fe45651 commit 27934b1
Show file tree
Hide file tree
Showing 11 changed files with 279 additions and 25 deletions.
7 changes: 6 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,16 @@ LMSTUDIO_API_BASE_URL=
# You only need this environment variable set if you want to use xAI models
XAI_API_KEY=

# Get your GitHub Models API Token here -
# https://github.com/settings/tokens
# You only need this environment variable set if you want to use GitHub models
GITHUB_API_KEY=

# Include this environment variable if you want more logging for debugging locally
VITE_LOG_LEVEL=debug

# Example Context Values for qwen2.5-coder:32b
#
#
# DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM
# DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM
# DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM
Expand Down
6 changes: 5 additions & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ ARG ANTHROPIC_API_KEY
ARG OPEN_ROUTER_API_KEY
ARG GOOGLE_GENERATIVE_AI_API_KEY
ARG OLLAMA_API_BASE_URL
ARG GITHUB_API_KEY
ARG VITE_LOG_LEVEL=debug
ARG DEFAULT_NUM_CTX

Expand All @@ -36,6 +37,7 @@ ENV WRANGLER_SEND_METRICS=false \
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
GITHUB_API_KEY=${GITHUB_API_KEY} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}

Expand All @@ -52,12 +54,13 @@ FROM base AS bolt-ai-development

# Define the same environment variables for development
ARG GROQ_API_KEY
ARG HuggingFace
ARG HuggingFace_API_KEY
ARG OPENAI_API_KEY
ARG ANTHROPIC_API_KEY
ARG OPEN_ROUTER_API_KEY
ARG GOOGLE_GENERATIVE_AI_API_KEY
ARG OLLAMA_API_BASE_URL
ARG GITHUB_API_KEY
ARG VITE_LOG_LEVEL=debug
ARG DEFAULT_NUM_CTX

Expand All @@ -68,6 +71,7 @@ ENV GROQ_API_KEY=${GROQ_API_KEY} \
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
GITHUB_API_KEY=${GITHUB_API_KEY} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}

Expand Down
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

# Bolt.new Fork by Cole Medin - oTToDev

This fork of Bolt.new (oTToDev) allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
This fork of Bolt.new (oTToDev) allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, Groq, or GitHub Models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.

Join the community for oTToDev!

Expand Down Expand Up @@ -31,6 +31,8 @@ https://thinktank.ottomator.ai
- ✅ Ability to revert code to earlier version (@wonderwhy-er)
- ✅ Cohere Integration (@hasanraiyan)
- ✅ Dynamic model max token length (@hasanraiyan)
- ✅ GitHub Models Integration (@ThePsyberSleuth)

-**HIGH PRIORITY** - Prevent Bolt from rewriting files as often (file locking and diffs)
-**HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start)
-**HIGH PRIORITY** - Load local projects into the app
Expand Down
2 changes: 2 additions & 0 deletions app/lib/.server/llm/api-key.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re
return env.COHERE_API_KEY;
case 'AzureOpenAI':
return env.AZURE_OPENAI_API_KEY;
case 'GitHub Models':
return env.GITHUB_API_KEY || cloudflareEnv.GITHUB_API_KEY;
default:
return '';
}
Expand Down
38 changes: 25 additions & 13 deletions app/lib/.server/llm/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,47 +18,48 @@ type OptionalApiKey = string | undefined;

export function getAnthropicModel(apiKey: OptionalApiKey, model: string) {
const anthropic = createAnthropic({
apiKey,
apiKey
});

return anthropic(model);
}

export function getOpenAILikeModel(baseURL: string, apiKey: OptionalApiKey, model: string) {
const openai = createOpenAI({
baseURL,
apiKey,
apiKey
});

return openai(model);
}

export function getCohereAIModel(apiKey: OptionalApiKey, model: string) {
const cohere = createCohere({
apiKey,
apiKey
});

return cohere(model);
}

export function getOpenAIModel(apiKey: OptionalApiKey, model: string) {
const openai = createOpenAI({
apiKey,
apiKey
});

return openai(model);
}

export function getMistralModel(apiKey: OptionalApiKey, model: string) {
const mistral = createMistral({
apiKey,
apiKey
});

return mistral(model);
}

export function getGoogleModel(apiKey: OptionalApiKey, model: string) {
const google = createGoogleGenerativeAI({
apiKey,
apiKey
});

return google(model);
Expand All @@ -67,7 +68,7 @@ export function getGoogleModel(apiKey: OptionalApiKey, model: string) {
export function getGroqModel(apiKey: OptionalApiKey, model: string) {
const openai = createOpenAI({
baseURL: 'https://api.groq.com/openai/v1',
apiKey,
apiKey
});

return openai(model);
Expand All @@ -76,15 +77,15 @@ export function getGroqModel(apiKey: OptionalApiKey, model: string) {
export function getHuggingFaceModel(apiKey: OptionalApiKey, model: string) {
const openai = createOpenAI({
baseURL: 'https://api-inference.huggingface.co/v1/',
apiKey,
apiKey
});

return openai(model);
}

export function getOllamaModel(baseURL: string, model: string) {
const ollamaInstance = ollama(model, {
numCtx: DEFAULT_NUM_CTX,
numCtx: DEFAULT_NUM_CTX
}) as LanguageModelV1 & { config: any };

ollamaInstance.config.baseURL = `${baseURL}/api`;
Expand All @@ -95,15 +96,15 @@ export function getOllamaModel(baseURL: string, model: string) {
export function getDeepseekModel(apiKey: OptionalApiKey, model: string) {
const openai = createOpenAI({
baseURL: 'https://api.deepseek.com/beta',
apiKey,
apiKey
});

return openai(model);
}

export function getOpenRouterModel(apiKey: OptionalApiKey, model: string) {
const openRouter = createOpenRouter({
apiKey,
apiKey
});

return openRouter.chat(model);
Expand All @@ -112,7 +113,7 @@ export function getOpenRouterModel(apiKey: OptionalApiKey, model: string) {
export function getLMStudioModel(baseURL: string, model: string) {
const lmstudio = createOpenAI({
baseUrl: `${baseURL}/v1`,
apiKey: '',
apiKey: ''
});

return lmstudio(model);
Expand All @@ -121,7 +122,16 @@ export function getLMStudioModel(baseURL: string, model: string) {
export function getXAIModel(apiKey: OptionalApiKey, model: string) {
const openai = createOpenAI({
baseURL: 'https://api.x.ai/v1',
apiKey,
apiKey
});

return openai(model);
}

export function getGitHubModel(apiKey: OptionalApiKey, model: string) {
const openai = createOpenAI({
baseURL: 'https://models.inference.ai.azure.com',
apiKey
});

return openai(model);
Expand Down Expand Up @@ -156,6 +166,8 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re
return getXAIModel(apiKey, model);
case 'Cohere':
return getCohereAIModel(apiKey, model);
case 'GitHub Models':
return getGitHubModel(apiKey, model);
default:
return getOllamaModel(baseURL, model);
}
Expand Down
1 change: 0 additions & 1 deletion app/lib/runtime/action-runner.ts
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ export class ActionRunner {
.catch((error) => {
console.error('Action failed:', error);
});
return this.#currentExecutionPromise;
}

async #executeAction(actionId: string, isStreaming: boolean = false) {
Expand Down
Loading

0 comments on commit 27934b1

Please sign in to comment.