Skip to content

Commit

Permalink
updated examples
Browse files Browse the repository at this point in the history
  • Loading branch information
deftio committed Jul 27, 2024
1 parent a991724 commit cf7988a
Show file tree
Hide file tree
Showing 5 changed files with 144 additions and 12 deletions.
9 changes: 8 additions & 1 deletion dev/todo.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,17 @@
# QuikChat JS TODO list

* make robust the add/remove/update message (harden id scheme for messages)
* (OK) make robust the add/remove/update message (harden id scheme for messages)
* (OK) add center div styling (addMessage(content, user, center))
* show/remove timestamps
* move callback from {meta} to 2nd param of constructor
* add support for right to left languages by making css with [send] on left
* provide minified css
* add functions for light, dark, debug styles to be built-in
* rename project..
* example ChatGPT
* example Mistral
* fix alternate light and dark to use css nth-child (etc)




4 changes: 4 additions & 0 deletions examples/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -59,4 +59,8 @@ <h2>Example 4: Simple Ollama</h2>
<p>This example shows how to use quikchat with a local LLM using Ollama</p>
<a href="./simple_ollama.html">View Example Ollama</a>
</body>

<h2>Example 5: LLM with Conversational Memory</h2>
<p>This example demonstrates how to use quikchat with a local LLM using Ollama where quikchat provides the chat history to Ollama provides the llm model. This allows the chat to "remember" what is being discussed.</p>
<a href="./ollama_with_memory.html">View Example Ollama with Memory</a>
</html>
17 changes: 8 additions & 9 deletions examples/ollama_adapters.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ contains some small wrappers for calling ollama

// this calls the Ollama Completion API without token by token streaming
function getOllamaCompletionCallback (chatInstance,userInput) {
let x= chatInstance.messageAddNew(userInput, "user", "right");
chatInstance.messageAddNew(userInput, "user", "right"); // echos the user input to the chat
return fetch('http://localhost:11434/api/generate', {
method: 'POST',
headers: {
Expand All @@ -18,9 +18,7 @@ function getOllamaCompletionCallback (chatInstance,userInput) {
})
.then(response => response.json())
.then(data => {
// console.log(data.response);
chatInstance.messageAddNew (data.response.trim(), "Bot", 'left'); // Use the chat instance to display the bot's response

chatInstance.messageAddNew (data.response.trim(), "Bot", 'left'); // display the bot's response
})
.catch(error => console.error('Error:', error));
}
Expand All @@ -30,9 +28,8 @@ function getOllamaCompletionCallback (chatInstance,userInput) {
// this calls the Ollama Streaming API with token streaming
function getOllamaStreamingCallback (chatInstance,userInput) {
var fetchedData = [];

let start = true;
chatInstance.messageAddNew(userInput, "user", "right");
chatInstance.messageAddNew(userInput, "user", "right"); // echos the user input to the chat
return fetch('http://localhost:11434/api/generate', {
method: 'POST',
headers: {
Expand Down Expand Up @@ -67,11 +64,11 @@ function getOllamaStreamingCallback (chatInstance,userInput) {
const json = JSON.parse(lines[i]);
const content = json.response;
if (start) {
id = chatInstance.messageAddNew(content,"bot","left");
id = chatInstance.messageAddNew(content,"bot","left"); // start a new chat message
start=false;
}
else {
chatInstance.messageAppendContent(id,content);
chatInstance.messageAppendContent(id,content); // append new content to message
}

}
Expand All @@ -82,9 +79,11 @@ function getOllamaStreamingCallback (chatInstance,userInput) {
})
.then(() => {
// At this point, fetchedData contains all the parsed JSON objects
//console.log(fetchedData);
//console.log(fetchedData); // use this to see the entire response
})
.catch(error => {
console.error('Fetch error:', error);
});
}


122 changes: 122 additions & 0 deletions examples/ollama_with_memory.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
<!DOCTYPE html>
<html lang="en">

<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="icon" href="data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7"
type="image/gif">

<title>YackBox Demo with local Ollama</title>
<!-- Include Bootstrap CSS file -->
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css">
<!-- Include Yackbox Chat CSS file -->
<link rel="stylesheet" href="../../dist/quikchat.css">

<!-- Include Yackbox Chat JavaScript file -->
<script src="../../dist/quikchat.umd.min.js"></script>

<script src="./ollama_adapters.js"></script>
<style>
html,
body {
width: 100%;
height: 100%;
}

.chat-container {
height: 70vh;
width: 100%;
}
</style>

</head>

<body>


<div class="container">
<br>
<h2 class="">Ollama quikchat Demo with Conversational Memory</h2>
<p>This example demonstrates how to use quikchat with a local LLM using Ollama where quikchat provides the
chat history to Ollama provides the llm model. This allows the chat to "remember" what is being discussed.</p>
<br>
<div class="row">
<div class="col-10">
<div class="chat-container" id="chat-container"></div>
</div>
</div>
</div>

<script>

// set up chat instance
const streamingChat = new quikchat('#chat-container', {
theme: 'quikchat-theme-light',
onSend: getOllamaStreamingCallback,
titleArea: { title: "Memory Chat", "show": true, "align": "left" },
});
//streamingChat.setDefaultUserName("Me", "left"); // this is the user's name
streamingChat.messageAddNew("How can I help? ", "bot", "left", "system");


// this calls the Ollama Streaming API with token streaming
function getOllamaStreamingCallback(chatInstance, userInput) {
var fetchedData = [];
let start = true;
chatInstance.messageAddNew(userInput, "user", "right"); // echos the user input to the chat
return fetch('http://localhost:11434/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: "llama3.1",
messages: chatInstance.historyGet(), // passes the chat history to the model
stream: true
})
})
.then(response => {
if (!response.ok) {
throw new Error(`HTTP error! Status: ${response.status}`);
}
return response.body.getReader();
})
.then(reader => {
let partialData = '';
let id;

// Read and process the NDJSON response
return reader.read().then(function processResult(result) {
if (result.done) {
return;
}
let x = new TextDecoder().decode(result.value, { stream: true });

let y = JSON.parse(x);
let content = y.message.content;//.message.content;
if (start) {
id = chatInstance.messageAddNew(content, "bot", "left"); // start a new chat message
start = false;
}
else {
chatInstance.messageAppendContent(id, content); // append new content to message
}

return reader.read().then(processResult);
});
})
.then(() => {
// At this point, fetchedData contains all the parsed JSON objects
//console.log(fetchedData); // use this to see the entire response
})
.catch(error => {
console.error('Fetch error:', error);
});
}


</script>
</body>

</html>
4 changes: 2 additions & 2 deletions src/quikchat.js
Original file line number Diff line number Diff line change
Expand Up @@ -321,8 +321,8 @@ class quikchat {
return this._theme;
}

static getVersion() {
return {"version" : "1.0.3"}
static version() {
return {"version" : "1.0.4", "license" : "BSD-2", "url" :"https://github/deftio/quikchat"};
}
}

Expand Down

0 comments on commit cf7988a

Please sign in to comment.