Skip to content

Commit

Permalink
Update and fix environment setup for Ollama model (#304)
Browse files Browse the repository at this point in the history
* Change provider and model split options

* Change `.env.developemnt` descriptions

* Change `docker-compose` settings

* Change error description in `langchain.module.ts`
  • Loading branch information
devleejb authored and minai621 committed Nov 5, 2024
1 parent f92e505 commit 653d263
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 11 deletions.
8 changes: 4 additions & 4 deletions backend/.env.development
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,15 @@ YORKIE_PROJECT_NAME=default
YORKIE_PROJECT_SECRET_KEY=""

# YORKIE_INTELLIGENCE: Whether to enable Yorkie Intelligence for collaborative editing.
# Set to ollama modelname if Yorkie Intelligence is required
# you can find llm model in https://ollama.com/library
# If set to false, OPENAI_API_KEY is not required.
# Available options: false, ollama:lamma3.1, ollama:gemma2, ollama:gemma2:2b, ollama:phi3, ollama:mistral, ollama:neural-chat, ollama:starling-lm, ollama:solar, openai:gpt-3.5-turbo, openai:gpt-4o-mini
# If set to openai:gpt-3.5-turbo or openai:gpt-4o-mini, OPENAI_API_KEY is not required.
YORKIE_INTELLIGENCE="ollama:gemma2:2b"

# OLLAMA_HOST_URL: yorkie-intelligence ollama url
OLLAMA_HOST_URL=http://localhost:11434

# OPENAI_API_KEY: API key for using the gpt-3.5-turbo model by Yorkie Intelligence.
# OPENAI_API_KEY: API key for using OpenAI model by Yorkie Intelligence.
# This key is required when the YORKIE_INTELLIGENCE is set to openai:gpt-3.5-turbo or openai:gpt-4o-mini.
# To obtain an API key, visit OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-api-key
OPENAI_API_KEY=your_openai_api_key_here

Expand Down
7 changes: 4 additions & 3 deletions backend/docker/docker-compose-full.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ services:
YORKIE_PROJECT_NAME: "default"
YORKIE_PROJECT_SECRET_KEY: ""
YORKIE_PROJECT_TOKEN: ""
YORKIE_INTELLIGENCE: false
YORKIE_INTELLIGENCE: "ollama:gemma2:2b"
OLLAMA_HOST_URL: http://yorkie-intelligence:11434
OPENAI_API_KEY: "your_openai_api_key_here"
LANGCHAIN_ENDPOINT: "https://www.langchain.com/langsmith"
LANGCHAIN_API_KEY: "your_langsmith_api_key_here"
Expand Down Expand Up @@ -63,8 +64,8 @@ services:
MONGO_REPLICA_PORT: 27017
MONGO_INITDB_DATABASE: "codepair"
MONGO_COMMAND: "mongo"
ports:
- "27017:27017"
expose:
- "27017"
restart: unless-stopped
healthcheck:
test:
Expand Down
4 changes: 2 additions & 2 deletions backend/docker/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ services:
yorkie-intelligence:
image: "ollama/ollama:latest"
restart: always
expose:
- "11434"
ports:
- "11434:11434"

mongo:
build:
Expand Down
6 changes: 4 additions & 2 deletions backend/src/langchain/langchain.module.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@ const chatModelFactory = {
useFactory: () => {
const modelType = process.env.YORKIE_INTELLIGENCE;
try {
const [provider, model] = modelType.split(":", 2);
// Split the modelType string into provider and model
// ex) "ollama:gemma2:2b" => ["ollama", "gemma2:2b"]
const [provider, model] = modelType.split(/:(.+)/);
let chatModel: BaseChatModel | ChatOllama;

if (modelList[provider]?.includes(model)) {
Expand All @@ -46,7 +48,7 @@ const chatModelFactory = {

return chatModel;
} catch {
throw new Error(`${modelType} is not found. please check your model name`);
throw new Error(`${modelType} is not found. Please check your model name`);
}
},
};
Expand Down

0 comments on commit 653d263

Please sign in to comment.