Skip to content

Commit

Permalink
chore: Update AI chat example to latest AI SDK v4 (#2313)
Browse files Browse the repository at this point in the history
Closes #2283
  • Loading branch information
davidmytton authored Nov 25, 2024
1 parent 439c1d5 commit a3fd423
Show file tree
Hide file tree
Showing 4 changed files with 148 additions and 492 deletions.
23 changes: 8 additions & 15 deletions examples/nextjs-openai/app/api/chat/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
tokens withdrawn from the bucket with every request.
This example is adapted from
https://sdk.vercel.ai/docs/guides/frameworks/nextjs-app and calculates as
https://sdk.vercel.ai/docs/getting-started/nextjs-app-router and calculates as
estimate of the number of tokens required to process the request. It then uses
a token bucket rate limit algorithm to limit the number of tokens consumed,
keeping costs under control.
Expand All @@ -16,8 +16,8 @@
users instead. See the `chat_userid` example for an example of this.
*/
import arcjet, { shield, tokenBucket } from "@arcjet/next";
import { OpenAIStream, StreamingTextResponse } from "ai";
import OpenAI from "openai";
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
import { promptTokensEstimate } from "openai-chat-tokens";

const aj = arcjet({
Expand All @@ -39,10 +39,8 @@ const aj = arcjet({
],
});

// OpenAI client
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY ?? "OPENAI_KEY_MISSING",
});
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;

// Edge runtime allows for streaming responses
export const runtime = "edge";
Expand Down Expand Up @@ -79,15 +77,10 @@ export async function POST(req: Request) {
}

// If the request is allowed, continue to use OpenAI
// Ask OpenAI for a streaming chat completion given the prompt
const response = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
stream: true,
const result = await streamText({
model: openai('gpt-4-turbo'),
messages,
});

// Convert the response into a friendly text-stream
const stream = OpenAIStream(response);
// Respond with the stream
return new StreamingTextResponse(stream);
return result.toDataStreamResponse();
}
23 changes: 8 additions & 15 deletions examples/nextjs-openai/app/api/chat_userid/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
tokens withdrawn from the bucket with every request.
This example is adapted from
https://sdk.vercel.ai/docs/guides/frameworks/nextjs-app and calculates as
https://sdk.vercel.ai/docs/getting-started/nextjs-app-router and calculates as
estimate of the number of tokens required to process the request. It then uses
a token bucket rate limit algorithm to limit the number of tokens consumed,
keeping costs under control.
Expand All @@ -17,8 +17,8 @@
calling the protect method. You can use any string value for the key.
*/
import arcjet, { shield, tokenBucket } from "@arcjet/next";
import { OpenAIStream, StreamingTextResponse } from "ai";
import OpenAI from "openai";
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
import { promptTokensEstimate } from "openai-chat-tokens";

const aj = arcjet({
Expand All @@ -40,10 +40,8 @@ const aj = arcjet({
],
});

// OpenAI client
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY ?? "OPENAI_KEY_MISSING",
});
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;

// Edge runtime allows for streaming responses
export const runtime = "edge";
Expand Down Expand Up @@ -84,15 +82,10 @@ export async function POST(req: Request) {
}

// If the request is allowed, continue to use OpenAI
// Ask OpenAI for a streaming chat completion given the prompt
const response = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
stream: true,
const result = await streamText({
model: openai('gpt-4-turbo'),
messages,
});

// Convert the response into a friendly text-stream
const stream = OpenAIStream(response);
// Respond with the stream
return new StreamingTextResponse(stream);
return result.toDataStreamResponse();
}
Loading

0 comments on commit a3fd423

Please sign in to comment.