Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Ollama Model for local usage of yorkie intelligence #297

Merged
merged 12 commits into from
Aug 20, 2024
Merged
9 changes: 7 additions & 2 deletions backend/.env.development
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,14 @@ YORKIE_PROJECT_NAME=default
YORKIE_PROJECT_SECRET_KEY=""

# YORKIE_INTELLIGENCE: Whether to enable Yorkie Intelligence for collaborative editing.
# Set to true if Yorkie Intelligence is required.
# Set to ollama modelname if Yorkie Intelligence is required
# you can find llm model in https://ollama.com/library
# If set to false, OPENAI_API_KEY is not required.
YORKIE_INTELLIGENCE=false
YORKIE_INTELLIGENCE="ollama:gemma2:2b"

# OLLAMA_HOST_URL: yorkie-intelligence ollama url
OLLAMA_HOST_URL=http://localhost:11434

# OPENAI_API_KEY: API key for using the gpt-3.5-turbo model by Yorkie Intelligence.
# To obtain an API key, visit OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-api-key
OPENAI_API_KEY=your_openai_api_key_here
Expand Down
6 changes: 6 additions & 0 deletions backend/docker/docker-compose-full.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,12 @@ services:
- "8080:8080"
- "8081:8081"

yorkie-intelligence:
image: "ollama/ollama:latest"
restart: always
expose:
- "11434"

mongo:
build:
context: ./mongodb_replica
Expand Down
6 changes: 6 additions & 0 deletions backend/docker/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,12 @@ services:
- "8080:8080"
- "8081:8081"

yorkie-intelligence:
image: "ollama/ollama:latest"
restart: always
expose:
- "11434"

mongo:
build:
context: ./mongodb_replica
Expand Down
159 changes: 134 additions & 25 deletions backend/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions backend/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
"@aws-sdk/s3-request-presigner": "^3.509.0",
"@langchain/community": "^0.0.21",
"@langchain/core": "^0.1.18",
"@langchain/ollama": "^0.0.4",
"@nestjs/common": "^10.0.0",
"@nestjs/config": "^3.1.1",
"@nestjs/core": "^10.0.0",
Expand Down
46 changes: 45 additions & 1 deletion backend/src/langchain/langchain.module.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,54 @@
import { Module } from "@nestjs/common";
import { ChatOpenAI } from "@langchain/openai";
import { ChatOllama } from "@langchain/ollama";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";

type ModelList = {
[key: string]: string[];
};

const modelList: ModelList = {
ollama: [
"lamma3.1",
"gemma2",
"gemma2:2b",
"phi3",
"mistral",
"neural-chat",
"starling-lm",
"solar",
],
openai: ["gpt-3.5-turbo", "gpt-4o-mini"],
};

const chatModelFactory = {
provide: "ChatModel",
useFactory: () => new ChatOpenAI({ modelName: "gpt-4o-mini" }) as BaseChatModel,
useFactory: () => {
const modelType = process.env.YORKIE_INTELLIGENCE;
try {
const [provider, model] = modelType.split(":", 2);
let chatModel: BaseChatModel | ChatOllama;

if (modelList[provider]?.includes(model)) {
if (provider === "ollama") {
chatModel = new ChatOllama({
model: model,
baseUrl: process.env.OLLAMA_HOST_URL,
checkOrPullModel: true,
streaming: true,
});
} else if (provider === "openai") {
chatModel = new ChatOpenAI({ modelName: model });
}
}

if (!chatModel) throw new Error();

return chatModel;
} catch {
throw new Error(`${modelType} is not found. please check your model name`);
}
},
};

@Module({
Expand Down
2 changes: 1 addition & 1 deletion backend/src/settings/settings.service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ export class SettingsService {
async getSettings(): Promise<GetSettingsResponse> {
return {
yorkieIntelligence: {
enable: this.configService.get("YORKIE_INTELLIGENCE") === "true",
enable: this.configService.get("YORKIE_INTELLIGENCE") !== "false",
config: {
features: generateFeatureList(this.configService),
},
Expand Down