Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: rename models #20

Merged
merged 2 commits into from
Jul 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ RAGChat supports both Upstash-hosted models and all OpenAI and OpenAI-compatible
To use an OpenAI model, first initialize RAGChat:

```typescript
import { RAGChat, openaiModel } from "@upstash/rag-chat";
import { RAGChat, openai } from "@upstash/rag-chat";

export const ragChat = new RAGChat({
model: openaiModel("gpt-4-turbo"),
model: openai("gpt-4-turbo"),
});
```

Expand All @@ -82,10 +82,10 @@ OPENAI_API_KEY=...
To use an Upstash model, first initialize RAGChat:

```typescript
import { RAGChat, upstashModel } from "@upstash/rag-chat";
import { RAGChat, upstash } from "@upstash/rag-chat";

export const ragChat = new RAGChat({
model: upstashModel("mistralai/Mistral-7B-Instruct-v0.2"),
model: upstash("mistralai/Mistral-7B-Instruct-v0.2"),
});
```

Expand All @@ -94,10 +94,10 @@ export const ragChat = new RAGChat({
To use an Upstash model, first initialize RAGChat:

````typescript
import { RAGChat, upstashModel } from "@upstash/rag-chat";
import { RAGChat, custom } from "@upstash/rag-chat";

export const ragChat = new RAGChat({
model: customModel("codellama/CodeLlama-70b-Instruct-hf", {
model: custom("codellama/CodeLlama-70b-Instruct-hf", {
apiKey: "TOGETHER_AI_TOKEN",
baseUrl: "https://api.together.xyz/v1",
}),
Expand All @@ -123,14 +123,14 @@ QSTASH_TOKEN=...
Customize your RAGChat instance with advanced options:

```typescript
import { RAGChat, openaiModel } from "@upstash/rag-chat";
import { RAGChat, openai } from "@upstash/rag-chat";

// 👇 Optional: For built-in rate limiting
import { Ratelimit } from "@upstash/ratelimit";
import { Redis } from "@upstash/redis";

export const ragChat = new RAGChat({
model: openaiModel("gpt-4-turbo"),
model: openai("gpt-4-turbo"),

prompt: ({ context, question, chatHistory }) =>
`You are an AI assistant with access to an Upstash Vector Store.
Expand Down
4 changes: 2 additions & 2 deletions examples/nextjs/server-actions/src/app/rag-chat.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { RAGChat, upstashModel } from "@upstash/rag-chat";
import { RAGChat, upstash } from "@upstash/rag-chat";

export const ragChat = new RAGChat({
model: upstashModel("meta-llama/Meta-Llama-3-8B-Instruct"),
model: upstash("meta-llama/Meta-Llama-3-8B-Instruct"),
});
6 changes: 3 additions & 3 deletions examples/nextjs/vercel-ai-sdk/src/app/api/chat/route.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { RAGChat, upstashModel } from "@upstash/rag-chat";
import { RAGChat, upstash } from "@upstash/rag-chat";
import { aiUseChatAdapter } from "@upstash/rag-chat/nextjs";
import type { Message } from "ai";

Expand All @@ -12,10 +12,10 @@ export async function POST(request: Request) {
if (!question) throw new Error("No question in the request");

const ragChat = new RAGChat({
model: upstashModel("meta-llama/Meta-Llama-3-8B-Instruct"),
model: upstash("meta-llama/Meta-Llama-3-8B-Instruct"),

// 👇 ALTERNATIVE
// model: openaiModel("gpt-4-turbo")
// model: openai("gpt-4-turbo")
});

const response = await ragChat.chat(question, { streaming: true });
Expand Down
6 changes: 3 additions & 3 deletions examples/nodejs/src/server.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { RAGChat, upstashModel } from "@upstash/rag-chat";
import { RAGChat, upstash } from "@upstash/rag-chat";
import { streamToResponse } from "ai";
import dotenv from "dotenv";
import { createServer } from "http";
Expand All @@ -8,10 +8,10 @@ dotenv.config();

const server = createServer(async (req, res) => {
const ragChat = new RAGChat({
model: upstashModel("meta-llama/Meta-Llama-3-8B-Instruct"),
model: upstash("meta-llama/Meta-Llama-3-8B-Instruct"),

// 👇 ALTERNATIVE
// model: openaiModel("gpt-4-turbo")
// model: openai("gpt-4-turbo")
});

await ragChat.context.add({
Expand Down
4 changes: 2 additions & 2 deletions examples/nuxt/server/api/add-data.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { RAGChat, upstashModel } from "@upstash/rag-chat";
import { RAGChat, upstash } from "@upstash/rag-chat";
import { Index } from "@upstash/vector";

export default defineLazyEventHandler(async () => {
Expand All @@ -9,7 +9,7 @@ export default defineLazyEventHandler(async () => {
token: apiKey.UPSTASH_VECTOR_REST_TOKEN,
url: apiKey.UPSTASH_VECTOR_REST_URL,
}),
model: upstashModel("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: apiKey.QSTASH_TOKEN }),
model: upstash("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: apiKey.QSTASH_TOKEN }),
});

return defineEventHandler(async (event: any) => {
Expand Down
6 changes: 3 additions & 3 deletions examples/nuxt/server/api/chat.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { RAGChat, upstashModel } from "@upstash/rag-chat";
import { RAGChat, upstash } from "@upstash/rag-chat";
import { aiUseChatAdapter } from "@upstash/rag-chat/nextjs";
import { Index } from "@upstash/vector";
import { Message } from "ai";
Expand All @@ -19,10 +19,10 @@ export default defineLazyEventHandler(async () => {
token: apiKey.UPSTASH_VECTOR_REST_TOKEN,
url: apiKey.UPSTASH_VECTOR_REST_URL,
}),
model: upstashModel("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: apiKey.QSTASH_TOKEN }),
model: upstash("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: apiKey.QSTASH_TOKEN }),

// 👇 ALTERNATIVE
// model: openaiModel("gpt-4"),
// model: openai("gpt-4"),
});

const response = await ragChat.chat(question, { streaming: true });
Expand Down
6 changes: 3 additions & 3 deletions examples/solidjs/src/routes/api/chat/index.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { APIEvent } from "@solidjs/start/server";
import { RAGChat, upstashModel } from "@upstash/rag-chat";
import { RAGChat, upstash } from "@upstash/rag-chat";
import { aiUseChatAdapter } from "@upstash/rag-chat/nextjs";
import { Index } from "@upstash/vector";
import { Message } from "ai";
Expand All @@ -15,12 +15,12 @@ export const POST = async (event: APIEvent) => {
token: process.env.UPSTASH_VECTOR_REST_TOKEN,
url: process.env.UPSTASH_VECTOR_REST_URL,
}),
model: upstashModel("meta-llama/Meta-Llama-3-8B-Instruct", {
model: upstash("meta-llama/Meta-Llama-3-8B-Instruct", {
apiKey: process.env.QSTASH_TOKEN!,
}),

// 👇 ALTERNATIVE
// model: openaiModel("gpt-4-turbo")
// model: openai("gpt-4-turbo")
});

const response = await ragChat.chat(question, { streaming: true });
Expand Down
4 changes: 2 additions & 2 deletions examples/sveltekit/src/routes/api/add-data/+server.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { env } from "$env/dynamic/private";
import type { RequestHandler } from "@sveltejs/kit";
import { RAGChat, upstashModel } from "@upstash/rag-chat";
import { RAGChat, upstash } from "@upstash/rag-chat";
import { Index } from "@upstash/vector";

function sleep(ms: number) {
Expand All @@ -10,7 +10,7 @@ function sleep(ms: number) {
export const POST = (async () => {
const ragChat = new RAGChat({
vector: new Index({ token: env.UPSTASH_VECTOR_REST_TOKEN, url: env.UPSTASH_VECTOR_REST_URL }),
model: upstashModel("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: env.QSTASH_TOKEN }),
model: upstash("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: env.QSTASH_TOKEN }),
});
await Promise.all([
ragChat.context.add({
Expand Down
6 changes: 3 additions & 3 deletions examples/sveltekit/src/routes/api/chat/+server.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { env } from "$env/dynamic/private";
import type { Message } from "@ai-sdk/svelte";
import { RAGChat, upstashModel } from "@upstash/rag-chat";
import { RAGChat, upstash } from "@upstash/rag-chat";
import { aiUseChatAdapter } from "@upstash/rag-chat/nextjs";
import { Index } from "@upstash/vector";
import type { RequestHandler } from "./$types";
Expand All @@ -13,11 +13,11 @@ export const POST = (async ({ request }) => {

const ragChat = new RAGChat({
vector: new Index({ token: env.UPSTASH_VECTOR_REST_TOKEN, url: env.UPSTASH_VECTOR_REST_URL }),
model: upstashModel("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: env.QSTASH_TOKEN }),
model: upstash("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: env.QSTASH_TOKEN }),
});
//OR
// const ragChat = new RAGChat({
// model: openaiModel("gpt-4"),
// model: openai("gpt-4"),
// });

const response = await ragChat.chat(question, { streaming: true });
Expand Down
7 changes: 3 additions & 4 deletions src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import type { Ratelimit } from "@upstash/ratelimit";
import { Redis } from "@upstash/redis";
import { Index } from "@upstash/vector";
import { DEFAULT_PROMPT } from "./constants";
import { openaiModel, upstashModel } from "./models";
import { upstash, openai } from "./models";
import type { CustomPrompt } from "./rag-chat-base";
import type { RAGChatConfig } from "./types";

Expand Down Expand Up @@ -47,11 +47,10 @@ const initializeModel = () => {
const qstashToken = process.env.QSTASH_TOKEN;
const openAIToken = process.env.OPENAI_API_KEY;

if (qstashToken)
return upstashModel("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: qstashToken });
if (qstashToken) return upstash("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: qstashToken });

if (openAIToken) {
return openaiModel("gpt-4o", { apiKey: openAIToken });
return openai("gpt-4o", { apiKey: openAIToken });
}

throw new Error(
Expand Down
2 changes: 1 addition & 1 deletion src/error/model.ts
joschan21 marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
export class UpstashModelError extends Error {
export class UpstashError extends Error {
constructor(message: string) {
super(message);
this.name = "ModelError";
Expand Down
6 changes: 3 additions & 3 deletions src/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ export type UpstashChatModel =

type ModelOptions = Omit<LLMClientConfig, "model">;

export const upstashModel = (model: UpstashChatModel, options?: Omit<ModelOptions, "baseUrl">) => {
export const upstash = (model: UpstashChatModel, options?: Omit<ModelOptions, "baseUrl">) => {
return new LLMClient({
model,
baseUrl: "https://qstash.upstash.io/llm/v1",
Expand All @@ -39,7 +39,7 @@ export const upstashModel = (model: UpstashChatModel, options?: Omit<ModelOption
});
};

export const customModel = (model: string, options?: ModelOptions) => {
export const custom = (model: string, options?: ModelOptions) => {
if (!options?.baseUrl) throw new Error("baseUrl cannot be empty or undefined.");

return new LLMClient({
Expand All @@ -48,7 +48,7 @@ export const customModel = (model: string, options?: ModelOptions) => {
});
};

export const openaiModel = (model: OpenAIChatModel, options?: Omit<ModelOptions, "baseUrl">) => {
export const openai = (model: OpenAIChatModel, options?: Omit<ModelOptions, "baseUrl">) => {
return new ChatOpenAI({
modelName: model,
temperature: 0,
Expand Down
4 changes: 2 additions & 2 deletions src/rag-chat.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import { Redis } from "@upstash/redis";
import { Index } from "@upstash/vector";
import { LangChainAdapter, StreamingTextResponse } from "ai";
import { afterAll, beforeAll, describe, expect, test } from "bun:test";
import { customModel } from "./models";
import { custom } from "./models";
import { RAGChat } from "./rag-chat";
import { awaitUntilIndexed } from "./test-utils";
import { RatelimitUpstashError } from "./error";
Expand Down Expand Up @@ -480,7 +480,7 @@ describe("RAGChat init with custom model", () => {

const ragChat = new RAGChat({
vector,
model: customModel("meta-llama/Meta-Llama-3-8B-Instruct", {
model: custom("meta-llama/Meta-Llama-3-8B-Instruct", {
apiKey: process.env.QSTASH_TOKEN!,
baseUrl: "https://qstash.upstash.io/llm/v1",
}),
Expand Down
4 changes: 2 additions & 2 deletions src/rag-chat.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { UpstashModelError } from "./error/model";
import { UpstashError } from "./error/model";

import { Config } from "./config";
import { Database } from "./database";
Expand Down Expand Up @@ -37,7 +37,7 @@ export class RAGChat extends RAGChatBase {
});

if (!model) {
throw new UpstashModelError("Model can not be undefined!");
throw new UpstashError("Model can not be undefined!");
}

super(vectorService, historyService, {
Expand Down
Loading