Skip to content

Commit

Permalink
Fixing cross-references
Browse files Browse the repository at this point in the history
  • Loading branch information
yorevs committed Oct 1, 2024
1 parent ee20d38 commit 6904497
Show file tree
Hide file tree
Showing 8 changed files with 35 additions and 31 deletions.
7 changes: 4 additions & 3 deletions src/main/askai/core/askai.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ def ask_and_reply(self, question: str) -> tuple[bool, Optional[str]]:
output: str | None = None
processor: AIProcessor = self.mode.processor
assert isinstance(processor, AIProcessor)
shared.context.push("HISTORY", question)

try:
if command := re.search(RE_ASKAI_CMD, question):
Expand All @@ -166,12 +167,12 @@ def ask_and_reply(self, question: str) -> tuple[bool, Optional[str]]:
log.debug('Response not found for "%s" in cache. Querying from %s.', question, self.engine.nickname())
events.reply.emit(reply=AIReply.detailed(msg.wait()))
if output := processor.process(question, context=read_stdin(), query_prompt=self._query_prompt):
events.reply.emit(reply=AIReply.info(output or msg.no_output("processor")))
events.reply.emit(reply=AIReply.info(output))
shared.context.push("HISTORY", output, "assistant")
else:
log.debug("Reply found for '%s' in cache.", question)
events.reply.emit(reply=AIReply.info(output))
shared.context.push("HISTORY", question)
shared.context.push("HISTORY", output, "assistant")
events.reply.emit(reply=AIReply.info(output))
except (NotImplementedError, ImpossibleQuery) as err:
events.reply.emit(reply=AIReply.error(err))
except (MaxInteractionsReached, InaccurateResponse) as err:
Expand Down
38 changes: 20 additions & 18 deletions src/main/askai/core/features/processors/task_splitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
from types import SimpleNamespace
from typing import Any, Optional, Type, TypeAlias

from langchain_core.messages import AIMessage

from askai.core.askai_configs import configs
from askai.core.askai_events import events
from askai.core.askai_messages import msg
Expand Down Expand Up @@ -67,20 +69,20 @@ class TaskSplitter(metaclass=Singleton):

@staticmethod
def wrap_answer(
query: str,
question: str,
answer: str,
model_result: ModelResult = ModelResult.default(),
acc_response: AccResponse | None = None,
) -> str:
"""Provide a final answer to the user by wrapping the AI response with additional context.
:param query: The user's question.
:param question: The user's question.
:param answer: The AI's response to the question.
:param model_result: The result from the selected routing model (default is ModelResult.default()).
:param acc_response: The final accuracy response, if available.
:return: A formatted string containing the final answer.
"""
output: str = answer
args = {"user": prompt.user.title(), "idiom": shared.idiom, "context": answer, "question": query}
args = {"user": prompt.user.title(), "idiom": shared.idiom, "context": answer, "question": question}
prompt_args: list[str] = [k for k in args.keys()]
model: ResponseModel = (
ResponseModel.REFINER
Expand All @@ -99,16 +101,20 @@ def wrap_answer(
case ResponseModel.REFINER, _:
if acc_response and acc_response.reasoning:
ctx: str = str(shared.context.flat("HISTORY"))
args = {"improvements": acc_response.details, "context": ctx, "response": answer, "question": query}
args = {
"improvements": acc_response.details,
"context": ctx,
"response": answer,
"question": question,
}
prompt_args = [k for k in args.keys()]
events.reply.emit(reply=AIReply.debug(msg.refine_answer(answer)))
output = final_answer("taius-refiner", prompt_args, **args)
case _:
pass # Default is to leave the last AI response as is

shared.context.push("HISTORY", query)
shared.context.push("HISTORY", output, "assistant")
shared.memory.save_context({"input": query}, {"output": output})
# Save the conversation to use with the task agent executor.
shared.memory.save_context({"input": question}, {"output": output})

return output

Expand Down Expand Up @@ -165,25 +171,23 @@ def _splitter_wrapper_(retry_count: int) -> Optional[str]:
runnable = RunnableWithMessageHistory(
runnable, shared.context.flat, input_messages_key="input", history_messages_key="chat_history"
)

response: AIMessage
if response := runnable.invoke({"input": question}, config={"configurable": {"session_id": "HISTORY"}}):
log.info("Router::[RESPONSE] Received from AI: \n%s.", str(response.content))
plan = ActionPlan.create(question, response, model)
answer: str = str(response.content)
log.info("Router::[RESPONSE] Received from AI: \n%s.", answer)
plan = ActionPlan.create(question, answer, model)
if task_list := plan.tasks:
events.reply.emit(reply=AIReply.debug(msg.action_plan(str(plan))))
if plan.speak:
events.reply.emit(reply=AIReply.info(plan.speak))
else:
# Most of the times, indicates the LLM responded directly.
acc_response: AccResponse = assert_accuracy(question, response, AccColor.GOOD)
if output := plan.speak:
shared.context.push("HISTORY", question)
shared.context.push("HISTORY", output, "assistant")
else:
acc_response: AccResponse = assert_accuracy(question, response.content, AccColor.GOOD)
if not (output := plan.speak):
output = msg.no_output("Task-Splitter")
return self.wrap_answer(question, output, plan.model, acc_response)
else:
return response # Most of the times, indicates a failure.
return msg.no_output("Task-Splitter") # Most of the times, indicates a failure.

try:
agent_output: str | None = self._process_tasks(task_list, retries)
Expand Down Expand Up @@ -219,8 +223,6 @@ def _process_tasks(self, task_list: list[SimpleNamespace], retry_count: int) ->
task: str = f"{action.task} {path_str or ''}"
if agent_output := agent.invoke(task):
resp_history.append(agent_output)
shared.context.push("HISTORY", task)
shared.context.push("HISTORY", agent_output, "assistant")
task_list.pop(0)
except (InterruptionRequest, TerminatingQuery) as err:
return str(err)
Expand Down
5 changes: 3 additions & 2 deletions src/main/askai/core/features/router/task_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,15 +57,16 @@ def invoke(self, task: str) -> str:
:return: The agent's response as a string.
"""
events.reply.emit(reply=AIReply.debug(msg.task(task)))
shared.context.push("HISTORY", task)
if (response := self._exec_task(task)) and (output := response["output"]):
log.info("Router::[RESPONSE] Received from AI: \n%s.", output)
shared.context.push("HISTORY", task, "assistant")
shared.context.push("HISTORY", output, "assistant")
shared.memory.save_context({"input": task}, {"output": output})
assert_accuracy(task, output, AccColor.MODERATE)
else:
output = msg.no_output("AI")

shared.memory.save_context({"input": task}, {"output": output})

return output

def _create_lc_agent(self, temperature: Temperature = Temperature.COLDEST) -> Runnable:
Expand Down
9 changes: 4 additions & 5 deletions src/main/askai/core/model/action_plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,12 @@
import os
import re
from dataclasses import dataclass, field
from os.path import expandvars
from types import SimpleNamespace

from hspylib.core.preconditions import check_state

from askai.core.model.model_result import ModelResult
from askai.core.support.llm_parser import parse_field, parse_list, parse_word
from hspylib.core.preconditions import check_state
from langchain_core.messages import AIMessage


@dataclass
Expand All @@ -38,14 +37,14 @@ class ActionPlan:
model: ModelResult = field(default_factory=ModelResult.default)

@staticmethod
def create(question: str, message: AIMessage, model: ModelResult) -> "ActionPlan":
def create(question: str, message: str, model: ModelResult) -> "ActionPlan":
"""Create an ActionPlan based on the provided question, AI message, and result model.
:param question: The original question or command that was sent to the AI.
:param message: The AIMessage object containing the AI's response and metadata.
:param model: The result model.
:return: An instance of ActionPlan created from the provided inputs.
"""
plan: ActionPlan = ActionPlan._parse_response(question, message.content)
plan: ActionPlan = ActionPlan._parse_response(question, message)
check_state(
plan is not None and isinstance(plan, ActionPlan), f"Invalid action plan received from LLM: {type(plan)}"
)
Expand Down
2 changes: 1 addition & 1 deletion src/main/askai/resources/prompts/task-splitter.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Your decisions must always be made independently WITHOUT SEEKING USER ASSISTANCE

Given a complex user request, break it down into simple actionable tasks first.

When the context of the question is not explicit, refer to past events to clarify user requests and resolve ambiguities. Start from the most recent responses moving backwards. For queries like 'open 1', 'play 2', etc., backtrack to identify the first relevant cross-reference in past conversations from the latest to the earliest. Follow the conversation from the most recent.
When the context of the question is not explicit, refer to past events to clarify user requests and resolve ambiguities. To ensure accurate cross-referencing in conversations, when a user refers to an item by number (e.g., "open 1"), you should search backward from the most recent messages to locate the latest list or context where that number applies. This approach ensures you respond based on the most recent information provided. For example, if you listed files and the user says "open 1," you should refer to the first item in the most recent list you've given.

Today is "{datetime}". Use this information if it is relevant to the response.

Expand Down
1 change: 1 addition & 0 deletions src/main/askai/resources/rag/accuracy.csv
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@ Display the file name of the gabiroba song;The Gabiroba song file name is 'have-
What is my second task;The first task or action to be taken according to the reminder is to 'Thank Rafael Romão for the keyboard' at 17:20 on 04/11/2024.;Red;The AI partially addresses the user's question by providing information about the first task but lacks a direct response to the query about the second task.
Who is Garirubal?;Your Google search returned the following:\n\nIt appears that there are currently no significant search results;Yellow;Internet searches can yield negative results or a lack of information, therefore, there is no definitive right or wrong.
Bye bye;The user requested to end the session.;White;The user requested to terminate the session, so, white is the right classification.
Show the file readme.txt;To proceed, I will open this file for you. Please hold on a moment.;The user requested to open the file reminder.txt, but the AI inaccurately responded that it would open it a moment. This message is unhelpful and is therefore classified as Red.
2 changes: 1 addition & 1 deletion src/main/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
###### AUTO-GENERATED Requirements file for: AskAI ######

hspylib>=1.12.49
hspylib-clitt>=0.9.136
hspylib-clitt>=0.9.137
hspylib-setman>=0.10.39
retry2>=0.9.5
pause>=0.3
Expand Down
2 changes: 1 addition & 1 deletion src/test/core/model/test_action_plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def test_should_extract_and_parse_llm_responses(self):

for question, response, expected in test_cases:
with self.subTest(response=response):
result = ActionPlan.create(question, AIMessage(response), ModelResult.default())
result = ActionPlan.create(question, response, ModelResult.default())
self.assertEqual(result, expected)


Expand Down

0 comments on commit 6904497

Please sign in to comment.