From f595c97c14fa6476b5bb2a482edda296d17523b0 Mon Sep 17 00:00:00 2001 From: LeeJongBeom <52884648+devleejb@users.noreply.github.com> Date: Wed, 21 Aug 2024 00:02:20 +0900 Subject: [PATCH] Update and fix environment setup for Ollama model (#304) * Change provider and model split options * Change `.env.developemnt` descriptions * Change `docker-compose` settings * Change error description in `langchain.module.ts` --- backend/.env.development | 8 ++++---- backend/docker/docker-compose-full.yml | 7 ++++--- backend/docker/docker-compose.yml | 4 ++-- backend/src/langchain/langchain.module.ts | 6 ++++-- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/backend/.env.development b/backend/.env.development index 0e67ee63..8c8d82a1 100644 --- a/backend/.env.development +++ b/backend/.env.development @@ -37,15 +37,15 @@ YORKIE_PROJECT_NAME=default YORKIE_PROJECT_SECRET_KEY="" # YORKIE_INTELLIGENCE: Whether to enable Yorkie Intelligence for collaborative editing. -# Set to ollama modelname if Yorkie Intelligence is required -# you can find llm model in https://ollama.com/library -# If set to false, OPENAI_API_KEY is not required. +# Available options: false, ollama:lamma3.1, ollama:gemma2, ollama:gemma2:2b, ollama:phi3, ollama:mistral, ollama:neural-chat, ollama:starling-lm, ollama:solar, openai:gpt-3.5-turbo, openai:gpt-4o-mini +# If set to openai:gpt-3.5-turbo or openai:gpt-4o-mini, OPENAI_API_KEY is not required. YORKIE_INTELLIGENCE="ollama:gemma2:2b" # OLLAMA_HOST_URL: yorkie-intelligence ollama url OLLAMA_HOST_URL=http://localhost:11434 -# OPENAI_API_KEY: API key for using the gpt-3.5-turbo model by Yorkie Intelligence. +# OPENAI_API_KEY: API key for using OpenAI model by Yorkie Intelligence. +# This key is required when the YORKIE_INTELLIGENCE is set to openai:gpt-3.5-turbo or openai:gpt-4o-mini. # To obtain an API key, visit OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-api-key OPENAI_API_KEY=your_openai_api_key_here diff --git a/backend/docker/docker-compose-full.yml b/backend/docker/docker-compose-full.yml index 2fddd53f..bc74372c 100644 --- a/backend/docker/docker-compose-full.yml +++ b/backend/docker/docker-compose-full.yml @@ -16,7 +16,8 @@ services: YORKIE_PROJECT_NAME: "default" YORKIE_PROJECT_SECRET_KEY: "" YORKIE_PROJECT_TOKEN: "" - YORKIE_INTELLIGENCE: false + YORKIE_INTELLIGENCE: "ollama:gemma2:2b" + OLLAMA_HOST_URL: http://yorkie-intelligence:11434 OPENAI_API_KEY: "your_openai_api_key_here" LANGCHAIN_ENDPOINT: "https://www.langchain.com/langsmith" LANGCHAIN_API_KEY: "your_langsmith_api_key_here" @@ -56,8 +57,8 @@ services: MONGO_REPLICA_PORT: 27017 MONGO_INITDB_DATABASE: "codepair" MONGO_COMMAND: "mongo" - ports: - - "27017:27017" + expose: + - "27017" restart: unless-stopped healthcheck: test: diff --git a/backend/docker/docker-compose.yml b/backend/docker/docker-compose.yml index a4845797..018df278 100644 --- a/backend/docker/docker-compose.yml +++ b/backend/docker/docker-compose.yml @@ -12,8 +12,8 @@ services: yorkie-intelligence: image: "ollama/ollama:latest" restart: always - expose: - - "11434" + ports: + - "11434:11434" mongo: build: diff --git a/backend/src/langchain/langchain.module.ts b/backend/src/langchain/langchain.module.ts index 1a9e49b0..63e381c5 100644 --- a/backend/src/langchain/langchain.module.ts +++ b/backend/src/langchain/langchain.module.ts @@ -26,7 +26,9 @@ const chatModelFactory = { useFactory: () => { const modelType = process.env.YORKIE_INTELLIGENCE; try { - const [provider, model] = modelType.split(":", 2); + // Split the modelType string into provider and model + // ex) "ollama:gemma2:2b" => ["ollama", "gemma2:2b"] + const [provider, model] = modelType.split(/:(.+)/); let chatModel: BaseChatModel | ChatOllama; if (modelList[provider]?.includes(model)) { @@ -46,7 +48,7 @@ const chatModelFactory = { return chatModel; } catch { - throw new Error(`${modelType} is not found. please check your model name`); + throw new Error(`${modelType} is not found. Please check your model name`); } }, };