Skip to content

Commit 69ce5b4

Browse files
committed
Support thread level context (#29)
1 parent a37df65 commit 69ce5b4

File tree

7 files changed

+173
-39
lines changed

7 files changed

+173
-39
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,3 +173,4 @@ cython_debug/
173173
sync_db
174174
manage_db
175175
element-keys.txt
176+
context.db

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
# Changelog
22

3+
## 1.7.0
4+
- Support thread level context
5+
36
## 1.6.0
47
- Add GPT Vision
58

README.md

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ This is a simple Matrix bot that support using OpenAI API, Langchain to generate
1212
4. Langchain([Flowise](https://github.com/FlowiseAI/Flowise))
1313
5. Image Generation with [DALL·E](https://platform.openai.com/docs/api-reference/images/create) or [LocalAI](https://localai.io/features/image-generation/) or [stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API)
1414
6. GPT Vision(openai or [GPT Vision API](https://platform.openai.com/docs/guides/vision) compatible such as [LocalAI](https://localai.io/features/gpt-vision/))
15+
7. Room level and thread level chat context
1516

1617
## Installation and Setup
1718

@@ -21,10 +22,10 @@ For explainations and complete parameter list see: https://github.com/hibobmaste
2122
Create two empty file, for persist database only<br>
2223

2324
```bash
24-
touch sync_db manage_db
25+
touch sync_db context.db manage_db
2526
sudo docker compose up -d
2627
```
27-
manage_db(can be ignored) is for langchain agent, sync_db is for matrix sync database<br>
28+
manage_db(can be ignored) is for langchain agent, sync_db is for matrix sync database, context.db is for bot chat context<br>
2829
<hr>
2930
Normal Method:<br>
3031
system dependece: <code>libolm-dev</code>
@@ -115,12 +116,16 @@ LangChain(flowise) admin: https://github.com/hibobmaster/matrix_chatgpt_bot/wiki
115116
![demo2](https://i.imgur.com/BKZktWd.jpg)
116117
https://github.com/hibobmaster/matrix_chatgpt_bot/wiki/ <br>
117118

119+
## Thread level Context
120+
Mention bot with prompt, bot will reply in thread.
121+
122+
To keep context just send prompt in thread directly without mention it.
123+
124+
![thread level context 1](https://i.imgur.com/4vLvNCt.jpeg)
125+
![thread level context 2](https://i.imgur.com/1eb1Lmd.jpeg)
126+
118127

119128
## Thanks
120129
1. [matrix-nio](https://github.com/poljar/matrix-nio)
121130
2. [acheong08](https://github.com/acheong08)
122131
3. [8go](https://github.com/8go/)
123-
124-
<a href="https://jb.gg/OpenSourceSupport" target="_blank">
125-
<img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jb_beam.png" alt="JetBrains Logo (Main) logo." width="200" height="200">
126-
</a>

compose.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,9 @@ services:
1212
# use env file or config.json
1313
# - ./config.json:/app/config.json
1414
# use touch to create empty db file, for persist database only
15-
# manage_db(can be ignored) is for langchain agent, sync_db is for matrix sync database
15+
# manage_db(can be ignored) is for langchain agent, sync_db is for matrix sync database, context.db is for bot chat context
1616
- ./sync_db:/app/sync_db
17+
- ./context.db:/app/context.db
1718
# - ./manage_db:/app/manage_db
1819
# import_keys path
1920
# - ./element-keys.txt:/app/element-keys.txt

src/bot.py

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,8 @@ def __init__(
227227
self.new_prog = re.compile(r"\s*!new\s+(.+)$")
228228

229229
async def close(self, task: asyncio.Task) -> None:
230+
self.chatbot.cursor.close()
231+
self.chatbot.conn.close()
230232
await self.httpx_client.aclose()
231233
if self.lc_admin is not None:
232234
self.lc_manager.c.close()
@@ -251,6 +253,9 @@ async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> No
251253
# sender_id
252254
sender_id = event.sender
253255

256+
# event source
257+
event_source = event.source
258+
254259
# user_message
255260
raw_user_message = event.body
256261

@@ -265,6 +270,48 @@ async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> No
265270
# remove newline character from event.body
266271
content_body = re.sub("\r\n|\r|\n", " ", raw_user_message)
267272

273+
# @bot and reply in thread
274+
if "m.mentions" in event_source["content"]:
275+
if "user_ids" in event_source["content"]["m.mentions"]:
276+
# @bot
277+
if (
278+
self.user_id
279+
in event_source["content"]["m.mentions"]["user_ids"]
280+
):
281+
try:
282+
asyncio.create_task(
283+
self.thread_chat(
284+
room_id,
285+
reply_to_event_id,
286+
sender_id=sender_id,
287+
thread_root_id=reply_to_event_id,
288+
prompt=content_body,
289+
)
290+
)
291+
except Exception as e:
292+
logger.error(e, exe_info=True)
293+
294+
# thread converstaion
295+
if "m.relates_to" in event_source["content"]:
296+
if "rel_type" in event_source["content"]["m.relates_to"]:
297+
thread_root_id = event_source["content"]["m.relates_to"]["event_id"]
298+
# thread is created by @bot
299+
if thread_root_id in self.chatbot.conversation:
300+
try:
301+
asyncio.create_task(
302+
self.thread_chat(
303+
room_id,
304+
reply_to_event_id,
305+
sender_id=sender_id,
306+
thread_root_id=thread_root_id,
307+
prompt=content_body,
308+
)
309+
)
310+
except Exception as e:
311+
logger.error(e, exe_info=True)
312+
313+
# common command
314+
268315
# !gpt command
269316
if (
270317
self.openai_api_key is not None
@@ -1300,6 +1347,37 @@ async def to_device_callback(self, event: KeyVerificationEvent) -> None:
13001347
estr = traceback.format_exc()
13011348
logger.info(estr)
13021349

1350+
# thread chat
1351+
async def thread_chat(
1352+
self, room_id, reply_to_event_id, thread_root_id, prompt, sender_id
1353+
):
1354+
try:
1355+
await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
1356+
content = await self.chatbot.ask_async_v2(
1357+
prompt=prompt,
1358+
convo_id=thread_root_id,
1359+
)
1360+
await send_room_message(
1361+
self.client,
1362+
room_id,
1363+
reply_message=content,
1364+
reply_to_event_id=reply_to_event_id,
1365+
sender_id=sender_id,
1366+
reply_in_thread=True,
1367+
thread_root_id=thread_root_id,
1368+
)
1369+
except Exception as e:
1370+
logger.error(e, exe_info=True)
1371+
await send_room_message(
1372+
self.client,
1373+
room_id,
1374+
reply_message=GENERAL_ERROR_MESSAGE,
1375+
sender_id=sender_id,
1376+
reply_to_event_id=reply_to_event_id,
1377+
reply_in_thread=True,
1378+
thread_root_id=thread_root_id,
1379+
)
1380+
13031381
# !chat command
13041382
async def chat(self, room_id, reply_to_event_id, prompt, sender_id, user_message):
13051383
try:

src/gptbot.py

Lines changed: 51 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -2,23 +2,15 @@
22
Code derived from https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py
33
A simple wrapper for the official ChatGPT API
44
"""
5+
import sqlite3
56
import json
67
from typing import AsyncGenerator
78
from tenacity import retry, wait_random_exponential, stop_after_attempt
89
import httpx
910
import tiktoken
1011

1112

12-
ENGINES = [
13-
"gpt-3.5-turbo",
14-
"gpt-3.5-turbo-16k",
15-
"gpt-3.5-turbo-0613",
16-
"gpt-3.5-turbo-16k-0613",
17-
"gpt-4",
18-
"gpt-4-32k",
19-
"gpt-4-0613",
20-
"gpt-4-32k-0613",
21-
]
13+
ENGINES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k", "gpt-4-turbo"]
2214

2315

2416
class Chatbot:
@@ -41,6 +33,7 @@ def __init__(
4133
reply_count: int = 1,
4234
truncate_limit: int = None,
4335
system_prompt: str = None,
36+
db_path: str = "context.db",
4437
) -> None:
4538
"""
4639
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
@@ -53,23 +46,24 @@ def __init__(
5346
or "You are ChatGPT, \
5447
a large language model trained by OpenAI. Respond conversationally"
5548
)
49+
# https://platform.openai.com/docs/models
5650
self.max_tokens: int = max_tokens or (
57-
31000
51+
127000
52+
if "gpt-4-turbo" in engine
53+
else 31000
5854
if "gpt-4-32k" in engine
5955
else 7000
6056
if "gpt-4" in engine
61-
else 15000
62-
if "gpt-3.5-turbo-16k" in engine
63-
else 4000
57+
else 16000
6458
)
6559
self.truncate_limit: int = truncate_limit or (
66-
30500
60+
126500
61+
if "gpt-4-turbo" in engine
62+
else 30500
6763
if "gpt-4-32k" in engine
6864
else 6500
6965
if "gpt-4" in engine
70-
else 14500
71-
if "gpt-3.5-turbo-16k" in engine
72-
else 3500
66+
else 15500
7367
)
7468
self.temperature: float = temperature
7569
self.top_p: float = top_p
@@ -80,17 +74,49 @@ def __init__(
8074

8175
self.aclient = aclient
8276

83-
self.conversation: dict[str, list[dict]] = {
77+
self.db_path = db_path
78+
79+
self.conn = sqlite3.connect(self.db_path)
80+
self.cursor = self.conn.cursor()
81+
82+
self._create_tables()
83+
84+
self.conversation = self._load_conversation()
85+
86+
if self.get_token_count("default") > self.max_tokens:
87+
raise Exception("System prompt is too long")
88+
89+
def _create_tables(self) -> None:
90+
self.conn.execute(
91+
"""
92+
CREATE TABLE IF NOT EXISTS conversations(
93+
id INTEGER PRIMARY KEY AUTOINCREMENT,
94+
convo_id TEXT UNIQUE,
95+
messages TEXT
96+
)
97+
"""
98+
)
99+
100+
def _load_conversation(self) -> dict[str, list[dict]]:
101+
conversations: dict[str, list[dict]] = {
84102
"default": [
85103
{
86104
"role": "system",
87-
"content": system_prompt,
105+
"content": self.system_prompt,
88106
},
89107
],
90108
}
109+
self.cursor.execute("SELECT convo_id, messages FROM conversations")
110+
for convo_id, messages in self.cursor.fetchall():
111+
conversations[convo_id] = json.loads(messages)
112+
return conversations
91113

92-
if self.get_token_count("default") > self.max_tokens:
93-
raise Exception("System prompt is too long")
114+
def _save_conversation(self, convo_id) -> None:
115+
self.conn.execute(
116+
"INSERT OR REPLACE INTO conversations (convo_id, messages) VALUES (?, ?)",
117+
(convo_id, json.dumps(self.conversation[convo_id])),
118+
)
119+
self.conn.commit()
94120

95121
def add_to_conversation(
96122
self,
@@ -102,6 +128,7 @@ def add_to_conversation(
102128
Add a message to the conversation
103129
"""
104130
self.conversation[convo_id].append({"role": role, "content": message})
131+
self._save_conversation(convo_id)
105132

106133
def __truncate_conversation(self, convo_id: str = "default") -> None:
107134
"""
@@ -116,6 +143,7 @@ def __truncate_conversation(self, convo_id: str = "default") -> None:
116143
self.conversation[convo_id].pop(1)
117144
else:
118145
break
146+
self._save_conversation(convo_id)
119147

120148
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
121149
def get_token_count(self, convo_id: str = "default") -> int:
@@ -305,6 +333,7 @@ def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
305333
self.conversation[convo_id] = [
306334
{"role": "system", "content": system_prompt or self.system_prompt},
307335
]
336+
self._save_conversation(convo_id)
308337

309338
@retry(wait=wait_random_exponential(min=2, max=5), stop=stop_after_attempt(3))
310339
async def oneTimeAsk(

src/send_message.py

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ async def send_room_message(
1212
sender_id: str = "",
1313
user_message: str = "",
1414
reply_to_event_id: str = "",
15+
reply_in_thread: bool = False,
16+
thread_root_id: str = "",
1517
) -> None:
1618
if reply_to_event_id == "":
1719
content = {
@@ -23,6 +25,23 @@ async def send_room_message(
2325
extensions=["nl2br", "tables", "fenced_code"],
2426
),
2527
}
28+
elif reply_in_thread and thread_root_id:
29+
content = {
30+
"msgtype": "m.text",
31+
"body": reply_message,
32+
"format": "org.matrix.custom.html",
33+
"formatted_body": markdown.markdown(
34+
reply_message,
35+
extensions=["nl2br", "tables", "fenced_code"],
36+
),
37+
"m.relates_to": {
38+
"m.in_reply_to": {"event_id": reply_to_event_id},
39+
"rel_type": "m.thread",
40+
"event_id": thread_root_id,
41+
"is_falling_back": True,
42+
},
43+
}
44+
2645
else:
2746
body = "> <" + sender_id + "> " + user_message + "\n\n" + reply_message
2847
format = r"org.matrix.custom.html"
@@ -51,13 +70,11 @@ async def send_room_message(
5170
"formatted_body": formatted_body,
5271
"m.relates_to": {"m.in_reply_to": {"event_id": reply_to_event_id}},
5372
}
54-
try:
55-
await client.room_send(
56-
room_id,
57-
message_type="m.room.message",
58-
content=content,
59-
ignore_unverified_devices=True,
60-
)
61-
await client.room_typing(room_id, typing_state=False)
62-
except Exception as e:
63-
logger.error(e)
73+
74+
await client.room_send(
75+
room_id,
76+
message_type="m.room.message",
77+
content=content,
78+
ignore_unverified_devices=True,
79+
)
80+
await client.room_typing(room_id, typing_state=False)

0 commit comments

Comments
 (0)