From cf7988ad156755217da064604e2eea82c4dcf0ed Mon Sep 17 00:00:00 2001 From: deftio Date: Sat, 27 Jul 2024 02:52:34 -0700 Subject: [PATCH] updated examples --- dev/todo.md | 9 ++- examples/index.html | 4 + examples/ollama_adapters.js | 17 ++--- examples/ollama_with_memory.html | 122 +++++++++++++++++++++++++++++++ src/quikchat.js | 4 +- 5 files changed, 144 insertions(+), 12 deletions(-) create mode 100644 examples/ollama_with_memory.html diff --git a/dev/todo.md b/dev/todo.md index 3cffdce..14f9498 100644 --- a/dev/todo.md +++ b/dev/todo.md @@ -1,10 +1,17 @@ # QuikChat JS TODO list -* make robust the add/remove/update message (harden id scheme for messages) +* (OK) make robust the add/remove/update message (harden id scheme for messages) * (OK) add center div styling (addMessage(content, user, center)) * show/remove timestamps +* move callback from {meta} to 2nd param of constructor * add support for right to left languages by making css with [send] on left * provide minified css * add functions for light, dark, debug styles to be built-in +* rename project.. +* example ChatGPT +* example Mistral +* fix alternate light and dark to use css nth-child (etc) + + diff --git a/examples/index.html b/examples/index.html index de631b7..1a50fc7 100644 --- a/examples/index.html +++ b/examples/index.html @@ -59,4 +59,8 @@

Example 4: Simple Ollama

This example shows how to use quikchat with a local LLM using Ollama

View Example Ollama + +

Example 5: LLM with Conversational Memory

+

This example demonstrates how to use quikchat with a local LLM using Ollama where quikchat provides the chat history to Ollama provides the llm model. This allows the chat to "remember" what is being discussed.

+View Example Ollama with Memory \ No newline at end of file diff --git a/examples/ollama_adapters.js b/examples/ollama_adapters.js index e62e4b3..492a788 100644 --- a/examples/ollama_adapters.js +++ b/examples/ollama_adapters.js @@ -4,7 +4,7 @@ contains some small wrappers for calling ollama // this calls the Ollama Completion API without token by token streaming function getOllamaCompletionCallback (chatInstance,userInput) { - let x= chatInstance.messageAddNew(userInput, "user", "right"); + chatInstance.messageAddNew(userInput, "user", "right"); // echos the user input to the chat return fetch('http://localhost:11434/api/generate', { method: 'POST', headers: { @@ -18,9 +18,7 @@ function getOllamaCompletionCallback (chatInstance,userInput) { }) .then(response => response.json()) .then(data => { - // console.log(data.response); - chatInstance.messageAddNew (data.response.trim(), "Bot", 'left'); // Use the chat instance to display the bot's response - + chatInstance.messageAddNew (data.response.trim(), "Bot", 'left'); // display the bot's response }) .catch(error => console.error('Error:', error)); } @@ -30,9 +28,8 @@ function getOllamaCompletionCallback (chatInstance,userInput) { // this calls the Ollama Streaming API with token streaming function getOllamaStreamingCallback (chatInstance,userInput) { var fetchedData = []; - let start = true; - chatInstance.messageAddNew(userInput, "user", "right"); + chatInstance.messageAddNew(userInput, "user", "right"); // echos the user input to the chat return fetch('http://localhost:11434/api/generate', { method: 'POST', headers: { @@ -67,11 +64,11 @@ function getOllamaStreamingCallback (chatInstance,userInput) { const json = JSON.parse(lines[i]); const content = json.response; if (start) { - id = chatInstance.messageAddNew(content,"bot","left"); + id = chatInstance.messageAddNew(content,"bot","left"); // start a new chat message start=false; } else { - chatInstance.messageAppendContent(id,content); + chatInstance.messageAppendContent(id,content); // append new content to message } } @@ -82,9 +79,11 @@ function getOllamaStreamingCallback (chatInstance,userInput) { }) .then(() => { // At this point, fetchedData contains all the parsed JSON objects - //console.log(fetchedData); + //console.log(fetchedData); // use this to see the entire response }) .catch(error => { console.error('Fetch error:', error); }); } + + diff --git a/examples/ollama_with_memory.html b/examples/ollama_with_memory.html new file mode 100644 index 0000000..2f80f72 --- /dev/null +++ b/examples/ollama_with_memory.html @@ -0,0 +1,122 @@ + + + + + + + + + YackBox Demo with local Ollama + + + + + + + + + + + + + + + + +
+
+

Ollama quikchat Demo with Conversational Memory

+

This example demonstrates how to use quikchat with a local LLM using Ollama where quikchat provides the + chat history to Ollama provides the llm model. This allows the chat to "remember" what is being discussed.

+
+
+
+
+
+
+
+ + + + + \ No newline at end of file diff --git a/src/quikchat.js b/src/quikchat.js index bb4f7b0..34b2953 100644 --- a/src/quikchat.js +++ b/src/quikchat.js @@ -321,8 +321,8 @@ class quikchat { return this._theme; } - static getVersion() { - return {"version" : "1.0.3"} + static version() { + return {"version" : "1.0.4", "license" : "BSD-2", "url" :"https://github/deftio/quikchat"}; } }