diff --git a/README.md b/README.md index ae6642d2..62f4c76d 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ We offer two options. Choose the one that best suits your needs: 2. Run `./backend/docker/docker-compose-full.yml`. ```bash - docker-compose -f ./backend/docker/docker-compose-full.yml up -d + docker-compose -f ./backend/docker/docker-compose-full.yml --env-file ./backend/.env.development up -d ``` 3. Run the Frontend application: @@ -88,7 +88,7 @@ We offer two options. Choose the one that best suits your needs: 2. Run `./backend/docker/docker-compose.yml`. ```bash - docker-compose -f ./backend/docker/docker-compose.yml up -d + docker-compose -f ./backend/docker/docker-compose.yml --env-file ./backend/.env.development up -d ``` 3. Run the Backend application: diff --git a/backend/.env.development b/backend/.env.development index aa5e2bc4..fb7ed73d 100644 --- a/backend/.env.development +++ b/backend/.env.development @@ -5,10 +5,10 @@ DATABASE_URL=mongodb://localhost:27017/codepair # GITHUB_CLIENT_ID: Client ID for authenticating with GitHub. # To obtain a client ID, create an OAuth app at: https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/creating-an-oauth-app -GITHUB_CLIENT_ID="" +GITHUB_CLIENT_ID=your_github_client_id_here # GITHUB_CLIENT_SECRET: Client secret for authenticating with GitHub. # To obtain a client ID, create an OAuth app at: https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/creating-an-oauth-app -GITHUB_CLIENT_SECRET="" +GITHUB_CLIENT_SECRET=your_github_client_secret_here # GITHUB_CALLBACK_URL: Callback URL for handling GitHub authentication response. # Format: https:///auth/login/github # Example: http://localhost:3000/auth/login/github (For development mode) @@ -41,6 +41,10 @@ YORKIE_PROJECT_SECRET_KEY="" # you can find llm model in https://ollama.com/library # If set to false, OPENAI_API_KEY is not required. YORKIE_INTELLIGENCE="gemma2:2b" + +# OLLAMA_HOST_PORT: yorkie-intelligence conatiner port +OLLAMA_HOST_PORT=11434 + # OPENAI_API_KEY: API key for using the gpt-3.5-turbo model by Yorkie Intelligence. # To obtain an API key, visit OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-api-key OPENAI_API_KEY=your_openai_api_key_here diff --git a/backend/README.md b/backend/README.md index 9db6d20d..2b57266e 100644 --- a/backend/README.md +++ b/backend/README.md @@ -24,7 +24,7 @@ This project is the backend part of the CodePair service developed using NestJS. 3. Run `.backend/docker/docker-compose.yml`. ```bash - docker-compose -f ./backend/docker/docker-compose.yml up -d + docker-compose -f ./backend/docker/docker-compose.yml --env-file ./backend/.env.development up -d ``` 4. Run the Backend application: diff --git a/backend/docker/docker-compose-full.yml b/backend/docker/docker-compose-full.yml index 816bf7e4..4596518c 100644 --- a/backend/docker/docker-compose-full.yml +++ b/backend/docker/docker-compose-full.yml @@ -44,7 +44,7 @@ services: image: "ollama/ollama:latest" restart: always ports: - - "11434:11434" + - ${OLLAMA_HOST_PORT}:11434 mongo: build: diff --git a/backend/docker/docker-compose.yml b/backend/docker/docker-compose.yml index 018df278..667a33d0 100644 --- a/backend/docker/docker-compose.yml +++ b/backend/docker/docker-compose.yml @@ -13,7 +13,7 @@ services: image: "ollama/ollama:latest" restart: always ports: - - "11434:11434" + - ${OLLAMA_HOST_PORT}:11434 mongo: build: diff --git a/backend/src/langchain/langchain.module.ts b/backend/src/langchain/langchain.module.ts index 00e35da4..b047c9a7 100644 --- a/backend/src/langchain/langchain.module.ts +++ b/backend/src/langchain/langchain.module.ts @@ -6,10 +6,21 @@ import { BaseChatModel } from "@langchain/core/language_models/chat_models"; const chatModelFactory = { provide: "ChatModel", useFactory: () => { + let modelList: string[] = [ + "lamma3.1", + "gemma2", + "gemma2:2b", + "phi3", + "mistral", + "neural-chat", + "starling-lm", + "solar", + ]; const modelType = process.env.YORKIE_INTELLIGENCE; - if (modelType === "gemma2:2b") { + if (modelType in modelList) { return new ChatOllama({ model: modelType, + baseUrl: process.env.OLLAMA_HOST_PORT, checkOrPullModel: true, streaming: true, });