Skip to content

Commit

Permalink
refactor: adapt to vercel/ai v3 (#12)
Browse files Browse the repository at this point in the history
  • Loading branch information
himself65 authored Aug 29, 2024
1 parent 4b63739 commit 1fcf1df
Show file tree
Hide file tree
Showing 23 changed files with 8,045 additions and 4,700 deletions.
29 changes: 7 additions & 22 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,38 +2,23 @@ name: Build

on:
push:
branches: [ main ]
branches: [main]
pull_request:
branches: [ main ]
branches: [main]

jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
name: Install pnpm
id: pnpm-install
- name: Use Node.js LTS
- name: Install pnpm
uses: pnpm/action-setup@v4
- name: Use Node.js
uses: actions/setup-node@v3
with:
node-version-file: '.nvmrc'
cache: 'pnpm'
- name: Get pnpm store directory
id: pnpm-cache
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT
- uses: actions/cache@v3
name: Setup pnpm cache
with:
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
node-version-file: ".nvmrc"
cache: "pnpm"
- name: Install dependencies
run: pnpm install
- name: Build
Expand Down
23 changes: 23 additions & 0 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
name: Lint

on:
push:
pull_request:

jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install pnpm
uses: pnpm/action-setup@v4
- name: Use Node.js
uses: actions/setup-node@v3
with:
node-version-file: ".nvmrc"
cache: "pnpm"
- name: Install dependencies
run: pnpm install
- name: Lint
run: pnpm run lint
2 changes: 1 addition & 1 deletion .nvmrc
Original file line number Diff line number Diff line change
@@ -1 +1 @@
18
20
176 changes: 81 additions & 95 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,55 +17,50 @@ yarn add ai jotai-ai
`chatAtoms` is a collection of atoms for a chatbot like [`useChat`](https://sdk.vercel.ai/docs/api-reference/use-chat).

```js
import { useAtomValue, useAtom, useSetAtom } from 'jotai'
import { chatAtoms } from 'jotai-ai'

const {
messagesAtom,
inputAtom,
submitAtom,
isLoadingAtom,
} = chatAtoms()

function Messages () {
const messages = useAtomValue(messagesAtom)
import { useAtomValue, useAtom, useSetAtom } from "jotai";
import { chatAtoms } from "jotai-ai";

const { messagesAtom, inputAtom, submitAtom, isLoadingAtom } = chatAtoms();

function Messages() {
const messages = useAtomValue(messagesAtom);
return (
<>
{messages.length > 0
? messages.map(m => (
<div key={m.id} className='whitespace-pre-wrap'>
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.content}
</div>
))
? messages.map((m) => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === "user" ? "User: " : "AI: "}
{m.content}
</div>
))
: null}
</>
)
);
}

function ChatInput () {
const [input, handleInputChange] = useAtom(inputAtom)
const handleSubmit = useSetAtom(submitAtom)
function ChatInput() {
const [input, handleInputChange] = useAtom(inputAtom);
const handleSubmit = useSetAtom(submitAtom);
return (
<form onSubmit={handleSubmit}>
<input
value={input}
placeholder='Say something...'
placeholder="Say something..."
onChange={handleInputChange}
/>
</form>
)
);
}

function App () {
const isLoading = useAtomValue(isLoadingAtom)
function App() {
const isLoading = useAtomValue(isLoadingAtom);
return (
<main>
<Messages/>
<ChatInput/>
<Messages />
<ChatInput />
{isLoading ? <div>Loading...</div> : null}
</main>
)
);
}
```

Expand All @@ -89,53 +84,50 @@ offers an atomic global state management system that is both powerful and flexib
For example, you can customize the `messagesAtom` to add more functionality, such as `clearMessagesAtom`:

```js
const { messagesAtom } = chatAtoms()
const { messagesAtom } = chatAtoms();

const clearMessagesAtom = atom(
null,
async (get, set) => set(messagesAtom, [])
)
const clearMessagesAtom = atom(null, async (get, set) => set(messagesAtom, []));

const Actions = () => {
const clear = useSetAtom(clearMessagesAtom)
return (
<button onClick={clear}>Clear Messages</button>
)
}
const clear = useSetAtom(clearMessagesAtom);
return <button onClick={clear}>Clear Messages</button>;
};
```

Also, `chatAtoms` is created out of the Component lifecycle,
so you can share the state between different components easily.

```js
const { messagesAtom } = chatAtoms()
const { messagesAtom } = chatAtoms();

const Messages = () => {
const messages = useAtomValue(messagesAtom)
const messages = useAtomValue(messagesAtom);
return (
<div>
{messages.map(m => (
<div key={m.id} className='whitespace-pre-wrap'>
{m.role === 'user' ? 'User: ' : 'AI: '}
{messages.map((m) => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === "user" ? "User: " : "AI: "}
{m.content}
</div>
))}
</div>
)
}
);
};

const UserMessages = () => {
const messages = useAtomValue(messagesAtom)
const messages = useAtomValue(messagesAtom);
return (
<div>
{messages.filter(m => m.role === 'user').map(m => (
<div key={m.id} className='whitespace-pre-wrap'>
User: {m.content}
</div>
))}
{messages
.filter((m) => m.role === "user")
.map((m) => (
<div key={m.id} className="whitespace-pre-wrap">
User: {m.content}
</div>
))}
</div>
)
}
);
};
```

#### Load messages on demand with React Suspense
Expand All @@ -144,80 +136,74 @@ const UserMessages = () => {
by `useChat`.

```js
const {
messagesAtom,
inputAtom,
submitAtom
} = chatAtoms({
const { messagesAtom, inputAtom, submitAtom } = chatAtoms({
initialMessages: async () => {
// fetch messages from anywhere
const messages = await fetchMessages()
return messages
}
})
const messages = await fetchMessages();
return messages;
},
});
```

With the combination with [`jotai-effect`](https://github.com/jotaijs/jotai-effect),
you can create a chatbot with local storage support.

```js
const {
messagesAtom
} = chatAtoms({
const { messagesAtom } = chatAtoms({
initialMessages: async () => {
/**
* call `noSSR` function if you are using next.js.
* @link https://foxact.skk.moe/no-ssr
*/
// noSSR()
const idb = await import('idb-keyval')
return (await idb.get('messages')) ?? []
}
})
// noSSR()
const idb = await import("idb-keyval");
return (await idb.get("messages")) ?? [];
},
});

import { atomEffect } from 'jotai-effect'
import { atomEffect } from "jotai-effect";

const saveMessagesEffectAtom = atomEffect((get, set) => {
const messages = get(messagesAtom)
const idbPromise = import('idb-keyval')
const abortController = new AbortController()
idbPromise.then(async idb => {
const messages = get(messagesAtom);
const idbPromise = import("idb-keyval");
const abortController = new AbortController();
idbPromise.then(async (idb) => {
if (abortController.signal.aborted) {
return
return;
}
await idb.set('messages', await messages)
})
await idb.set("messages", await messages);
});
return () => {
abortController.abort()
}
})
abortController.abort();
};
});

const Messages = () => {
const messages = useAtomValue(messagesAtom)
const messages = useAtomValue(messagesAtom);
return (
<>
{messages.length > 0
? messages.map(m => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.content}
</div>
))
? messages.map((m) => (
<div key={m.id} className="whitespace-pre-wrap">
{m.role === "user" ? "User: " : "AI: "}
{m.content}
</div>
))
: null}
</>
)
}
);
};

const App = () => {
useAtomValue(saveMessagesEffectAtom)
useAtomValue(saveMessagesEffectAtom);
return (
<main>
<Suspense fallback="loading messages...">
<Messages/>
<Messages />
</Suspense>
</main>
)
}
);
};
```
## LICENSE
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import {
trimStartOfStreamHelper,
type AIStreamCallbacksAndOptions,
} from "ai";
import type { Response } from 'llamaindex'
import type { Response } from "llamaindex";

function createParser(res: AsyncIterable<Response>) {
const trimStartOfStream = trimStartOfStreamHelper();
Expand Down
10 changes: 5 additions & 5 deletions examples/llamaindex-straming/app/api/chat/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import { OpenAI } from "llamaindex";
import { NextRequest, NextResponse } from "next/server";
import { createChatEngine } from "./engine";
import { LlamaIndexStream } from "./llamaindex-stream";
import type { MessageType } from 'llamaindex/llm/types'
import type { MessageType } from "llamaindex/llm/types";

export const runtime = "nodejs";
export const dynamic = "force-dynamic";
Expand All @@ -30,13 +30,13 @@ export async function POST(request: NextRequest) {
const chatEngine = await createChatEngine(llm);

const response = await chatEngine.chat({
message : lastMessage.content,
chatHistory : messages.map((message) => ({
message: lastMessage.content,
chatHistory: messages.map((message) => ({
content: message.content,
role: message.role as MessageType,
})),
stream: true
})
stream: true,
});

// Transform the response into a readable stream
const stream = LlamaIndexStream(response);
Expand Down
Loading

0 comments on commit 1fcf1df

Please sign in to comment.