Skip to content

Commit 6904497

Browse files
committed
Fixing cross-references
1 parent ee20d38 commit 6904497

File tree

8 files changed

+35
-31
lines changed

8 files changed

+35
-31
lines changed

src/main/askai/core/askai.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,7 @@ def ask_and_reply(self, question: str) -> tuple[bool, Optional[str]]:
155155
output: str | None = None
156156
processor: AIProcessor = self.mode.processor
157157
assert isinstance(processor, AIProcessor)
158+
shared.context.push("HISTORY", question)
158159

159160
try:
160161
if command := re.search(RE_ASKAI_CMD, question):
@@ -166,12 +167,12 @@ def ask_and_reply(self, question: str) -> tuple[bool, Optional[str]]:
166167
log.debug('Response not found for "%s" in cache. Querying from %s.', question, self.engine.nickname())
167168
events.reply.emit(reply=AIReply.detailed(msg.wait()))
168169
if output := processor.process(question, context=read_stdin(), query_prompt=self._query_prompt):
169-
events.reply.emit(reply=AIReply.info(output or msg.no_output("processor")))
170+
events.reply.emit(reply=AIReply.info(output))
171+
shared.context.push("HISTORY", output, "assistant")
170172
else:
171173
log.debug("Reply found for '%s' in cache.", question)
172-
events.reply.emit(reply=AIReply.info(output))
173-
shared.context.push("HISTORY", question)
174174
shared.context.push("HISTORY", output, "assistant")
175+
events.reply.emit(reply=AIReply.info(output))
175176
except (NotImplementedError, ImpossibleQuery) as err:
176177
events.reply.emit(reply=AIReply.error(err))
177178
except (MaxInteractionsReached, InaccurateResponse) as err:

src/main/askai/core/features/processors/task_splitter.py

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
from types import SimpleNamespace
2020
from typing import Any, Optional, Type, TypeAlias
2121

22+
from langchain_core.messages import AIMessage
23+
2224
from askai.core.askai_configs import configs
2325
from askai.core.askai_events import events
2426
from askai.core.askai_messages import msg
@@ -67,20 +69,20 @@ class TaskSplitter(metaclass=Singleton):
6769

6870
@staticmethod
6971
def wrap_answer(
70-
query: str,
72+
question: str,
7173
answer: str,
7274
model_result: ModelResult = ModelResult.default(),
7375
acc_response: AccResponse | None = None,
7476
) -> str:
7577
"""Provide a final answer to the user by wrapping the AI response with additional context.
76-
:param query: The user's question.
78+
:param question: The user's question.
7779
:param answer: The AI's response to the question.
7880
:param model_result: The result from the selected routing model (default is ModelResult.default()).
7981
:param acc_response: The final accuracy response, if available.
8082
:return: A formatted string containing the final answer.
8183
"""
8284
output: str = answer
83-
args = {"user": prompt.user.title(), "idiom": shared.idiom, "context": answer, "question": query}
85+
args = {"user": prompt.user.title(), "idiom": shared.idiom, "context": answer, "question": question}
8486
prompt_args: list[str] = [k for k in args.keys()]
8587
model: ResponseModel = (
8688
ResponseModel.REFINER
@@ -99,16 +101,20 @@ def wrap_answer(
99101
case ResponseModel.REFINER, _:
100102
if acc_response and acc_response.reasoning:
101103
ctx: str = str(shared.context.flat("HISTORY"))
102-
args = {"improvements": acc_response.details, "context": ctx, "response": answer, "question": query}
104+
args = {
105+
"improvements": acc_response.details,
106+
"context": ctx,
107+
"response": answer,
108+
"question": question,
109+
}
103110
prompt_args = [k for k in args.keys()]
104111
events.reply.emit(reply=AIReply.debug(msg.refine_answer(answer)))
105112
output = final_answer("taius-refiner", prompt_args, **args)
106113
case _:
107114
pass # Default is to leave the last AI response as is
108115

109-
shared.context.push("HISTORY", query)
110-
shared.context.push("HISTORY", output, "assistant")
111-
shared.memory.save_context({"input": query}, {"output": output})
116+
# Save the conversation to use with the task agent executor.
117+
shared.memory.save_context({"input": question}, {"output": output})
112118

113119
return output
114120

@@ -165,25 +171,23 @@ def _splitter_wrapper_(retry_count: int) -> Optional[str]:
165171
runnable = RunnableWithMessageHistory(
166172
runnable, shared.context.flat, input_messages_key="input", history_messages_key="chat_history"
167173
)
168-
174+
response: AIMessage
169175
if response := runnable.invoke({"input": question}, config={"configurable": {"session_id": "HISTORY"}}):
170-
log.info("Router::[RESPONSE] Received from AI: \n%s.", str(response.content))
171-
plan = ActionPlan.create(question, response, model)
176+
answer: str = str(response.content)
177+
log.info("Router::[RESPONSE] Received from AI: \n%s.", answer)
178+
plan = ActionPlan.create(question, answer, model)
172179
if task_list := plan.tasks:
173180
events.reply.emit(reply=AIReply.debug(msg.action_plan(str(plan))))
174181
if plan.speak:
175182
events.reply.emit(reply=AIReply.info(plan.speak))
176183
else:
177184
# Most of the times, indicates the LLM responded directly.
178-
acc_response: AccResponse = assert_accuracy(question, response, AccColor.GOOD)
179-
if output := plan.speak:
180-
shared.context.push("HISTORY", question)
181-
shared.context.push("HISTORY", output, "assistant")
182-
else:
185+
acc_response: AccResponse = assert_accuracy(question, response.content, AccColor.GOOD)
186+
if not (output := plan.speak):
183187
output = msg.no_output("Task-Splitter")
184188
return self.wrap_answer(question, output, plan.model, acc_response)
185189
else:
186-
return response # Most of the times, indicates a failure.
190+
return msg.no_output("Task-Splitter") # Most of the times, indicates a failure.
187191

188192
try:
189193
agent_output: str | None = self._process_tasks(task_list, retries)
@@ -219,8 +223,6 @@ def _process_tasks(self, task_list: list[SimpleNamespace], retry_count: int) ->
219223
task: str = f"{action.task} {path_str or ''}"
220224
if agent_output := agent.invoke(task):
221225
resp_history.append(agent_output)
222-
shared.context.push("HISTORY", task)
223-
shared.context.push("HISTORY", agent_output, "assistant")
224226
task_list.pop(0)
225227
except (InterruptionRequest, TerminatingQuery) as err:
226228
return str(err)

src/main/askai/core/features/router/task_agent.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,15 +57,16 @@ def invoke(self, task: str) -> str:
5757
:return: The agent's response as a string.
5858
"""
5959
events.reply.emit(reply=AIReply.debug(msg.task(task)))
60+
shared.context.push("HISTORY", task)
6061
if (response := self._exec_task(task)) and (output := response["output"]):
6162
log.info("Router::[RESPONSE] Received from AI: \n%s.", output)
62-
shared.context.push("HISTORY", task, "assistant")
6363
shared.context.push("HISTORY", output, "assistant")
64-
shared.memory.save_context({"input": task}, {"output": output})
6564
assert_accuracy(task, output, AccColor.MODERATE)
6665
else:
6766
output = msg.no_output("AI")
6867

68+
shared.memory.save_context({"input": task}, {"output": output})
69+
6970
return output
7071

7172
def _create_lc_agent(self, temperature: Temperature = Temperature.COLDEST) -> Runnable:

src/main/askai/core/model/action_plan.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,12 @@
1515
import os
1616
import re
1717
from dataclasses import dataclass, field
18-
from os.path import expandvars
1918
from types import SimpleNamespace
2019

20+
from hspylib.core.preconditions import check_state
21+
2122
from askai.core.model.model_result import ModelResult
2223
from askai.core.support.llm_parser import parse_field, parse_list, parse_word
23-
from hspylib.core.preconditions import check_state
24-
from langchain_core.messages import AIMessage
2524

2625

2726
@dataclass
@@ -38,14 +37,14 @@ class ActionPlan:
3837
model: ModelResult = field(default_factory=ModelResult.default)
3938

4039
@staticmethod
41-
def create(question: str, message: AIMessage, model: ModelResult) -> "ActionPlan":
40+
def create(question: str, message: str, model: ModelResult) -> "ActionPlan":
4241
"""Create an ActionPlan based on the provided question, AI message, and result model.
4342
:param question: The original question or command that was sent to the AI.
4443
:param message: The AIMessage object containing the AI's response and metadata.
4544
:param model: The result model.
4645
:return: An instance of ActionPlan created from the provided inputs.
4746
"""
48-
plan: ActionPlan = ActionPlan._parse_response(question, message.content)
47+
plan: ActionPlan = ActionPlan._parse_response(question, message)
4948
check_state(
5049
plan is not None and isinstance(plan, ActionPlan), f"Invalid action plan received from LLM: {type(plan)}"
5150
)

src/main/askai/resources/prompts/task-splitter.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ Your decisions must always be made independently WITHOUT SEEKING USER ASSISTANCE
66

77
Given a complex user request, break it down into simple actionable tasks first.
88

9-
When the context of the question is not explicit, refer to past events to clarify user requests and resolve ambiguities. Start from the most recent responses moving backwards. For queries like 'open 1', 'play 2', etc., backtrack to identify the first relevant cross-reference in past conversations from the latest to the earliest. Follow the conversation from the most recent.
9+
When the context of the question is not explicit, refer to past events to clarify user requests and resolve ambiguities. To ensure accurate cross-referencing in conversations, when a user refers to an item by number (e.g., "open 1"), you should search backward from the most recent messages to locate the latest list or context where that number applies. This approach ensures you respond based on the most recent information provided. For example, if you listed files and the user says "open 1," you should refer to the first item in the most recent list you've given.
1010

1111
Today is "{datetime}". Use this information if it is relevant to the response.
1212

src/main/askai/resources/rag/accuracy.csv

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,3 +22,4 @@ Display the file name of the gabiroba song;The Gabiroba song file name is 'have-
2222
What is my second task;The first task or action to be taken according to the reminder is to 'Thank Rafael Romão for the keyboard' at 17:20 on 04/11/2024.;Red;The AI partially addresses the user's question by providing information about the first task but lacks a direct response to the query about the second task.
2323
Who is Garirubal?;Your Google search returned the following:\n\nIt appears that there are currently no significant search results;Yellow;Internet searches can yield negative results or a lack of information, therefore, there is no definitive right or wrong.
2424
Bye bye;The user requested to end the session.;White;The user requested to terminate the session, so, white is the right classification.
25+
Show the file readme.txt;To proceed, I will open this file for you. Please hold on a moment.;The user requested to open the file reminder.txt, but the AI inaccurately responded that it would open it a moment. This message is unhelpful and is therefore classified as Red.

src/main/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
###### AUTO-GENERATED Requirements file for: AskAI ######
22

33
hspylib>=1.12.49
4-
hspylib-clitt>=0.9.136
4+
hspylib-clitt>=0.9.137
55
hspylib-setman>=0.10.39
66
retry2>=0.9.5
77
pause>=0.3

src/test/core/model/test_action_plan.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def test_should_extract_and_parse_llm_responses(self):
5959

6060
for question, response, expected in test_cases:
6161
with self.subTest(response=response):
62-
result = ActionPlan.create(question, AIMessage(response), ModelResult.default())
62+
result = ActionPlan.create(question, response, ModelResult.default())
6363
self.assertEqual(result, expected)
6464

6565

0 commit comments

Comments
 (0)