Skip to content

Commit 8404e3a

Browse files
authored
Merge pull request #669 from siddharthsambharia-portkey/main
Add Vercel Cookbook
2 parents 5d247d9 + 00cd047 commit 8404e3a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+13421
-3
lines changed
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import { streamText } from 'ai';
2+
import { openai } from '@ai-sdk/openai';
3+
4+
export async function POST(request: Request) {
5+
const { messages } = await request.json();
6+
const stream = await streamText({
7+
model: openai('gpt-4o'),
8+
system: 'You are a helpful assistant.',
9+
messages,
10+
});
11+
return stream.toAIStreamResponse();
12+
}
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
'use client';
2+
3+
import { useChat } from 'ai/react';
4+
5+
export default function Chat() {
6+
const { messages, input, handleInputChange, handleSubmit } = useChat();
7+
return (
8+
<div>
9+
{messages.map((m) => (
10+
<div key={m.id}>
11+
{m.role === 'user' ? 'User: ' : 'AI: '}
12+
{m.content}
13+
</div>
14+
))}
15+
<form onSubmit={handleSubmit}>
16+
<input
17+
value={input}
18+
placeholder="Say something..."
19+
onChange={handleInputChange}
20+
/>
21+
</form>
22+
</div>
23+
);
24+
}
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
'use server';
2+
3+
import { generateText } from 'ai';
4+
import { createPortkey } from '@portkey-ai/vercel-provider';
5+
6+
export const generateTextAction = async () => {
7+
const llmClient = createPortkey({
8+
apiKey: 'PORTKEY_API_KEY',
9+
virtualKey: 'YOUR_OPENAI_VIRTUAL_KEY', //head over to https://app.portkey.ai to create Virtual key
10+
11+
//Portkey's config allows you to use- loadbalance, fallback, retires, timeouts, semantic caching, conditional routing, guardrails,etc. Head over to portkey docs to learn more
12+
});
13+
14+
// Learn more at docs.portkey.ai
15+
16+
const result = await generateText({
17+
model: llmClient.completionModel('gpt-3.5-turbo'), //choose model of choice
18+
prompt: 'tell me a joke',
19+
});
20+
21+
return result.text;
22+
};
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
'use server';
2+
3+
import { generateText } from 'ai';
4+
import { createPortkey } from '@portkey-ai/vercel-provider';
5+
6+
export const generateTextAction = async () => {
7+
// Conditional routing config
8+
const portkey_config = {
9+
strategy: {
10+
mode: 'conditional',
11+
conditions: [
12+
{
13+
query: { 'metadata.user_plan': { $eq: 'paid' } },
14+
then: 'anthropic-claude',
15+
},
16+
{
17+
query: { 'metadata.user_plan': { $eq: 'free' } },
18+
then: 'openai-gpt-4',
19+
},
20+
],
21+
default: 'openai-gpt-4',
22+
},
23+
targets: [
24+
{
25+
name: 'anthropic-claude',
26+
provider: 'anthropic',
27+
api_key: 'YOUR_ANTHROPIC_API_KEY',
28+
override_params: {
29+
model: 'claude-3-5-sonnet-20240620',
30+
},
31+
},
32+
{
33+
name: 'openai-gpt-4',
34+
provider: 'openai',
35+
api_key: 'YOUR_OPENAI_API_KEY',
36+
override_params: {
37+
model: 'gpt-4o',
38+
},
39+
},
40+
],
41+
};
42+
43+
const llmClient = createPortkey({
44+
apiKey: 'PORTKEY_API_KEY',
45+
config: portkey_config,
46+
//Portkey's config allows you to use- loadbalance, fallback, retires, timeouts, semantic caching, conditional routing, guardrails,etc. Head over to portkey docs to learn more
47+
//we are using API keys inside config, that's why no virtual keys needed
48+
});
49+
50+
const result = await generateText({
51+
model: llmClient.completionModel('gpt-3.5-turbo'), //choose model of choice
52+
prompt: 'tell me a joke',
53+
});
54+
55+
return result.text;
56+
};
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
'use server';
2+
3+
import { generateText } from 'ai';
4+
import { createPortkey } from '@portkey-ai/vercel-provider';
5+
6+
export const generateTextAction = async () => {
7+
// Fallback config
8+
const portkey_config = {
9+
strategy: {
10+
mode: 'fallback',
11+
},
12+
targets: [
13+
{
14+
provider: 'anthropic',
15+
api_key: 'YOUR_ANTHROPIC_API_KEY',
16+
override_params: {
17+
model: 'claude-3-5-sonnet-20240620',
18+
},
19+
},
20+
{
21+
provider: 'openai',
22+
api_key: 'YOUR_OPENAI_API_KEY',
23+
override_params: {
24+
model: 'gpt-4o',
25+
},
26+
},
27+
],
28+
};
29+
30+
const llmClient = createPortkey({
31+
apiKey: 'PORTKEY_API_KEY',
32+
config: portkey_config,
33+
//Portkey's config allows you to use- loadbalance, fallback, retires, timeouts, semantic caching, conditional routing, guardrails,etc. Head over to portkey docs to learn more
34+
//we are using API keys inside config, that's why no virtual keys needed
35+
});
36+
37+
// Learn more at docs.portkey.ai
38+
39+
const result = await generateText({
40+
model: llmClient.completionModel('gpt-3.5-turbo'), //choose model of choice
41+
prompt: 'tell me a joke',
42+
});
43+
44+
return result.text;
45+
};
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
'use server';
2+
3+
import { generateText } from 'ai';
4+
import { createPortkey } from '@portkey-ai/vercel-provider';
5+
6+
export const generateTextAction = async () => {
7+
const portkey_config = {
8+
retry: {
9+
attempts: 3,
10+
},
11+
cache: {
12+
mode: 'simple',
13+
},
14+
virtual_key: 'openai-xxx',
15+
before_request_hooks: [
16+
{
17+
id: 'input-guardrail-id-xx',
18+
},
19+
],
20+
after_request_hooks: [
21+
{
22+
id: 'output-guardrail-id-xx',
23+
},
24+
],
25+
};
26+
27+
const llmClient = createPortkey({
28+
apiKey: 'PORTKEY_API_KEY',
29+
config: portkey_config,
30+
//Portkey's config allows you to use- loadbalance, fallback, retires, timeouts, semantic caching, conditional routing, guardrails,etc. Head over to portkey docs to learn more
31+
//we are using API keys inside config, that's why no virtual keys needed
32+
});
33+
34+
// Learn more at docs.portkey.ai
35+
36+
const result = await generateText({
37+
model: llmClient.completionModel('gpt-3.5-turbo'), //choose model of choice
38+
prompt: 'tell me a joke',
39+
});
40+
41+
return result.text;
42+
};
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
'use server';
2+
3+
import { generateText } from 'ai';
4+
import { createPortkey } from '@portkey-ai/vercel-provider';
5+
6+
export const generateTextAction = async () => {
7+
// Fallback config
8+
const portkey_config = {
9+
strategy: {
10+
mode: 'loadbalance',
11+
},
12+
targets: [
13+
{
14+
provider: 'anthropic',
15+
api_key: 'YOUR_ANTHROPIC_API_KEY',
16+
override_params: {
17+
model: 'claude-3-5-sonnet-20240620',
18+
},
19+
weight: 0.25,
20+
},
21+
{
22+
provider: 'openai',
23+
api_key: 'YOUR_OPENAI_API_KEY',
24+
override_params: {
25+
model: 'gpt-4o',
26+
},
27+
weight: 0.75,
28+
},
29+
],
30+
};
31+
32+
const llmClient = createPortkey({
33+
apiKey: 'PORTKEY_API_KEY',
34+
config: portkey_config,
35+
//Portkey's config allows you to use- loadbalance, fallback, retires, timeouts, semantic caching, conditional routing, guardrails,etc. Head over to portkey docs to learn more
36+
//we are using API keys inside config, that's why no virtual keys needed
37+
});
38+
39+
// Learn more at docs.portkey.ai
40+
41+
const result = await generateText({
42+
model: llmClient.completionModel('gpt-3.5-turbo'), //choose model of choice
43+
prompt: 'tell me a joke',
44+
});
45+
46+
return result.text;
47+
};
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
'use client';
2+
3+
import { Button } from '@/components/ui/button';
4+
import { generateTextAction } from './action';
5+
import { useState } from 'react';
6+
7+
export default function Page() {
8+
const [generation, setGeneration] = useState('');
9+
return (
10+
<div className="space-y-4">
11+
<h1 className="text-xl font-semibold">Generate Text Example</h1>
12+
<Button
13+
onClick={async () => {
14+
const result = await generateTextAction();
15+
setGeneration(result);
16+
}}
17+
>
18+
Tell me a joke
19+
</Button>
20+
<pre>{JSON.stringify(generation, null, 2)}</pre>
21+
</div>
22+
);
23+
}
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
'use server';
2+
3+
import { createAI, getMutableAIState, streamUI } from 'ai/rsc';
4+
import { openai } from '@ai-sdk/openai';
5+
import { ReactNode } from 'react';
6+
import { z } from 'zod';
7+
import { nanoid } from 'nanoid';
8+
import { JokeComponent } from './joke-component';
9+
import { generateObject } from 'ai';
10+
import { jokeSchema } from './joke';
11+
12+
export interface ServerMessage {
13+
role: 'user' | 'assistant';
14+
content: string;
15+
}
16+
17+
export interface ClientMessage {
18+
id: string;
19+
role: 'user' | 'assistant';
20+
display: ReactNode;
21+
}
22+
23+
export async function continueConversation(
24+
input: string
25+
): Promise<ClientMessage> {
26+
'use server';
27+
28+
const history = getMutableAIState();
29+
30+
const result = await streamUI({
31+
model: openai('gpt-4o'),
32+
messages: [...history.get(), { role: 'user', content: input }],
33+
text: ({ content, done }) => {
34+
if (done) {
35+
history.done((messages: ServerMessage[]) => [
36+
...messages,
37+
{ role: 'assistant', content },
38+
]);
39+
}
40+
41+
return <div>{content}</div>;
42+
},
43+
tools: {
44+
tellAJoke: {
45+
description: 'Tell a joke',
46+
parameters: z.object({
47+
location: z.string().describe('the users location'),
48+
}),
49+
generate: async function* ({ location }) {
50+
yield <div>loading...</div>;
51+
const joke = await generateObject({
52+
model: openai('gpt-4o'),
53+
schema: jokeSchema,
54+
prompt:
55+
'Generate a joke that incorporates the following location:' +
56+
location,
57+
});
58+
return <JokeComponent joke={joke.object} />;
59+
},
60+
},
61+
},
62+
});
63+
64+
return {
65+
id: nanoid(),
66+
role: 'assistant',
67+
display: result.value,
68+
};
69+
}
70+
71+
export const AI = createAI<ServerMessage[], ClientMessage[]>({
72+
actions: {
73+
continueConversation,
74+
},
75+
initialAIState: [],
76+
initialUIState: [],
77+
});
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
'use client';
2+
3+
import { useState } from 'react';
4+
import { Button } from '@/components/ui/button';
5+
import { Joke } from './joke';
6+
7+
export const JokeComponent = ({ joke }: { joke?: Joke }) => {
8+
const [showPunchline, setShowPunchline] = useState(false);
9+
return (
10+
<div className="bg-neutral-100 p-4 rounded-md m-4 max-w-prose flex items-center justify-between">
11+
<p>{showPunchline ? joke?.punchline : joke?.setup}</p>
12+
<Button
13+
onClick={() => setShowPunchline(true)}
14+
disabled={showPunchline}
15+
variant="outline"
16+
>
17+
Show Punchline!
18+
</Button>
19+
</div>
20+
);
21+
};
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import { DeepPartial } from 'ai';
2+
import { z } from 'zod';
3+
4+
export const jokeSchema = z.object({
5+
setup: z.string().describe('the setup of the joke'),
6+
punchline: z.string().describe('the punchline of the joke'),
7+
});
8+
9+
export type Joke = DeepPartial<typeof jokeSchema>;

0 commit comments

Comments
 (0)