From 660a8622c2fd7edd231e971111526e322df00211 Mon Sep 17 00:00:00 2001 From: Adam Fields Date: Wed, 21 Feb 2024 12:26:30 -0500 Subject: [PATCH] Improve loading (#6) * Disable input when no model loaded (115e780) * Disable Shiki (f803f7f) * Fix mobile height (075a611) * Update README (4da1500) * 0.1.2 (962cb82) --- index.html | 23 ++++++++++++++---- package.json | 2 +- readme.md | 5 +--- src/components/App.tsx | 28 ++++++++++++++++++---- src/components/MessageList.tsx | 41 +++++++++++++++++++++++---------- src/components/PromptInput.tsx | 23 ++++++++++++------ src/components/RuntimeStats.tsx | 10 ++++---- 7 files changed, 94 insertions(+), 38 deletions(-) diff --git a/index.html b/index.html index 27cb41e..35435b6 100644 --- a/index.html +++ b/index.html @@ -1,5 +1,5 @@ - + Chat @@ -10,8 +10,23 @@ - -
- + +
+ + + + + + + + + + + + + + +
+ diff --git a/package.json b/package.json index fdf9bff..c3bdd89 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "private": true, "name": "chat", - "version": "0.1.1", + "version": "0.1.2", "type": "module", "scripts": { "start": "vite", diff --git a/readme.md b/readme.md index 8654a30..5b13304 100644 --- a/readme.md +++ b/readme.md @@ -2,7 +2,7 @@ [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/adamelliotfields/chat?devcontainer_path=.devcontainer/devcontainer.json&machine=basicLinux32gb) -Static chat UI for [Web LLM](https://webllm.mlc.ai) on GitHub Pages. Inspired by [Perplexity Labs](https://labs.perplexity.ai). +React chat UI for [Web LLM](https://webllm.mlc.ai) on GitHub Pages. Inspired by [Perplexity Labs](https://labs.perplexity.ai). https://github.com/adamelliotfields/chat/assets/7433025/07565763-606b-4de3-aa2d-8d5a26c83941 @@ -129,11 +129,8 @@ See [utils/vram_requirements](https://github.com/mlc-ai/web-llm/tree/main/utils/ - [ ] Dark mode - [ ] Settings menu (temperature, system message, etc.) -- [ ] Adapters for alternative backends (e.g., Ollama) - [ ] Inference on web worker - [ ] Offline/PWA - [ ] Cache management -- [ ] GPU stats - [ ] Image upload for multimodal like [LLaVA](https://llava-vl.github.io) -- [ ] [StableLM Zephyr 3B](https://huggingface.co/stabilityai/stablelm-zephyr-3b) - [ ] Tailwind class sorting by Biome 🤞 diff --git a/src/components/App.tsx b/src/components/App.tsx index 4b01ee7..14028ab 100644 --- a/src/components/App.tsx +++ b/src/components/App.tsx @@ -2,7 +2,7 @@ import { ChatModule } from '@mlc-ai/web-llm' import { useAtom, useAtomValue, useSetAtom } from 'jotai' import { RESET } from 'jotai/utils' import { Square, Trash } from 'lucide-react' -import { type MouseEvent } from 'react' +import { type MouseEvent, useEffect } from 'react' import { activeModelIdAtom, @@ -32,13 +32,28 @@ export default function App({ chat }: AppProps) { const [generating, setGenerating] = useAtom(generatingAtom) const [loading, setLoading] = useAtom(loadingAtom) const [conversation, setConversation] = useAtom(conversationAtom) - const setActiveModelId = useSetAtom(activeModelIdAtom) + const [activeModelId, setActiveModelId] = useAtom(activeModelIdAtom) const setStatsText = useSetAtom(runtimeStatsTextAtom) const config = useAtomValue(configAtom) const trashDisabled = conversation.messages.length < 1 const stopDisabled = loading || !generating + // set initial status message + useEffect(() => { + const content = + "### Welcome\n\nThis app runs _small_ LLMs in your browser using your device's GPU. Select a model and press the power button to load it.\n\nErrors? Check [webgpureport.org](https://webgpureport.org) to inspect your system. A VPN can get around some network issues.\n\nRefresh the page to see this message again." + setConversation(() => ({ + messages: [ + { + messageRole: 'status', + content + } + ], + stream: null + })) + }, [setConversation]) + const onGenerate: GenerateCallback = (_, content) => { setConversation(({ messages }) => ({ messages, @@ -185,13 +200,13 @@ export default function App({ chat }: AppProps) {
-