-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.tsx
73 lines (62 loc) · 2.03 KB
/
index.tsx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
/** @jsxImportSource ai-jsx */
import * as AI from "ai-jsx";
import ModelSelector from "micro-agi/core/models/model-selector";
import {
ChatCompletion,
SystemMessage,
UserMessage,
} from "ai-jsx/core/completion";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { OllamaEmbeddings } from "langchain/embeddings/ollama";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
import readline from "readline";
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 4000,
chunkOverlap: 30,
});
const embeddings = new OllamaEmbeddings({
model: "openhermes",
baseUrl: "http://localhost:11434",
});
console.log("Indexing docs...");
const loader = new TextLoader("docs.md");
const rawDocuments = await loader.load();
const docs = await textSplitter.splitDocuments(rawDocuments);
const vectorStore = await MemoryVectorStore.fromDocuments(docs, embeddings);
console.log("✨ Ready! ✨\n");
const Question = async ({ question }: { question: string }) => {
const result = await vectorStore.similaritySearch(question, 5);
return (
<ModelSelector provider="ollama" model="openhermes">
<ChatCompletion>
<SystemMessage>
You are a knowledge base agent who answers questions based on these
docs: {JSON.stringify(result)}
</SystemMessage>
<UserMessage>{question}</UserMessage>
</ChatCompletion>
</ModelSelector>
);
};
const askQuestion = () => {
rl.question("User: ", async (question) => {
if (question.toLowerCase() === "exit") {
rl.close();
console.log("Exiting...");
} else {
const renderContext = AI.createRenderContext();
console.log("Loading...\n");
const result = await renderContext.render(
<Question question={question} />
);
console.log(`Assistant: ${result}\n`);
askQuestion();
}
});
};
askQuestion();