diff --git a/backend/.env.development b/backend/.env.development index 0ecfa7ba..a05e2938 100644 --- a/backend/.env.development +++ b/backend/.env.development @@ -42,9 +42,9 @@ YORKIE_PROJECT_NAME=default YORKIE_PROJECT_SECRET_KEY="" # YORKIE_INTELLIGENCE: Whether to enable Yorkie Intelligence for collaborative editing. -# Available options: false, ollama:lamma3.1, ollama:gemma2, ollama:gemma2:2b, ollama:phi3, ollama:mistral, ollama:neural-chat, ollama:starling-lm, ollama:solar, openai:gpt-3.5-turbo, openai:gpt-4o-mini +# Available options: false, ollama:llama3.1, ollama:gemma2, ollama:gemma2:2b, ollama:phi3, ollama:mistral, ollama:neural-chat, ollama:starling-lm, ollama:solar, openai:gpt-3.5-turbo, openai:gpt-4o-mini, etc. # If set to openai:gpt-3.5-turbo or openai:gpt-4o-mini, OPENAI_API_KEY is not required. -YORKIE_INTELLIGENCE="ollama:gemma2:2b" +YORKIE_INTELLIGENCE="ollama:llama3.2:1b" # OLLAMA_HOST_URL: yorkie-intelligence ollama url OLLAMA_HOST_URL=http://localhost:11434 diff --git a/backend/src/langchain/langchain.module.ts b/backend/src/langchain/langchain.module.ts index 63e381c5..712c3b1c 100644 --- a/backend/src/langchain/langchain.module.ts +++ b/backend/src/langchain/langchain.module.ts @@ -2,46 +2,26 @@ import { Module } from "@nestjs/common"; import { ChatOpenAI } from "@langchain/openai"; import { ChatOllama } from "@langchain/ollama"; import { BaseChatModel } from "@langchain/core/language_models/chat_models"; - -type ModelList = { - [key: string]: string[]; -}; - -const modelList: ModelList = { - ollama: [ - "lamma3.1", - "gemma2", - "gemma2:2b", - "phi3", - "mistral", - "neural-chat", - "starling-lm", - "solar", - ], - openai: ["gpt-3.5-turbo", "gpt-4o-mini"], -}; +import { ConfigService } from "@nestjs/config"; const chatModelFactory = { provide: "ChatModel", - useFactory: () => { - const modelType = process.env.YORKIE_INTELLIGENCE; + useFactory: (configService: ConfigService) => { + const modelType = configService.get("YORKIE_INTELLIGENCE"); try { // Split the modelType string into provider and model // ex) "ollama:gemma2:2b" => ["ollama", "gemma2:2b"] const [provider, model] = modelType.split(/:(.+)/); let chatModel: BaseChatModel | ChatOllama; - - if (modelList[provider]?.includes(model)) { - if (provider === "ollama") { - chatModel = new ChatOllama({ - model: model, - baseUrl: process.env.OLLAMA_HOST_URL, - checkOrPullModel: true, - streaming: true, - }); - } else if (provider === "openai") { - chatModel = new ChatOpenAI({ modelName: model }); - } + if (provider === "ollama") { + chatModel = new ChatOllama({ + model: model, + baseUrl: configService.get("OLLAMA_HOST_URL"), + checkOrPullModel: true, + streaming: true, + }); + } else if (provider === "openai") { + chatModel = new ChatOpenAI({ modelName: model }); } if (!chatModel) throw new Error(); @@ -51,6 +31,7 @@ const chatModelFactory = { throw new Error(`${modelType} is not found. Please check your model name`); } }, + inject: [ConfigService], }; @Module({