diff --git a/src/main/askai/core/askai.py b/src/main/askai/core/askai.py index a7003844..ef8f0993 100644 --- a/src/main/askai/core/askai.py +++ b/src/main/askai/core/askai.py @@ -138,7 +138,6 @@ def ask_and_reply(self, question: str) -> tuple[bool, Optional[str]]: output: str | None = None processor: AIProcessor = self.mode.processor assert isinstance(processor, AIProcessor) - shared.context.push("HISTORY", question) try: if command := re.search(RE_ASKAI_CMD, question): @@ -151,9 +150,11 @@ def ask_and_reply(self, question: str) -> tuple[bool, Optional[str]]: events.reply.emit(reply=AIReply.detailed(msg.wait())) if output := processor.process(question, context=read_stdin(), query_prompt=self._query_prompt): events.reply.emit(reply=AIReply.info(output)) + shared.context.push("HISTORY", question) shared.context.push("HISTORY", output, "assistant") else: log.debug("Reply found for '%s' in cache.", question) + shared.context.push("HISTORY", question) shared.context.push("HISTORY", output, "assistant") events.reply.emit(reply=AIReply.info(output)) except (NotImplementedError, ImpossibleQuery) as err: @@ -172,7 +173,7 @@ def ask_and_reply(self, question: str) -> tuple[bool, Optional[str]]: status = False finally: if output: - shared.context.set("LAST_REPLY", output) + shared.context.set("LAST_REPLY", output, "assistant") return status, output diff --git a/src/main/askai/core/features/processors/task_splitter.py b/src/main/askai/core/features/processors/task_splitter.py index d3f74abf..5a7cf1d8 100644 --- a/src/main/askai/core/features/processors/task_splitter.py +++ b/src/main/askai/core/features/processors/task_splitter.py @@ -175,7 +175,7 @@ def _splitter_wrapper_(retry_count: int) -> Optional[str]: answer: str = str(response.content) log.info("Router::[RESPONSE] Received from AI: \n%s.", answer) plan = ActionPlan.create(question, answer, model) - if task_list := plan.tasks: + if not plan.is_direct and (task_list := plan.tasks): events.reply.emit(reply=AIReply.debug(msg.action_plan(str(plan)))) if plan.speak: events.reply.emit(reply=AIReply.info(plan.speak)) diff --git a/src/main/askai/core/features/router/task_agent.py b/src/main/askai/core/features/router/task_agent.py index cc7dd032..b52ce94d 100644 --- a/src/main/askai/core/features/router/task_agent.py +++ b/src/main/askai/core/features/router/task_agent.py @@ -57,16 +57,15 @@ def invoke(self, task: str) -> str: :return: The agent's response as a string. """ events.reply.emit(reply=AIReply.debug(msg.task(task))) - shared.context.push("HISTORY", task) + shared.context.push("HISTORY", task, "assistant") if (response := self._exec_task(task)) and (output := response["output"]): log.info("Router::[RESPONSE] Received from AI: \n%s.", output) shared.context.push("HISTORY", output, "assistant") assert_accuracy(task, output, AccColor.MODERATE) + shared.memory.save_context({"input": task}, {"output": output}) else: output = msg.no_output("AI") - shared.memory.save_context({"input": task}, {"output": output}) - return output def _create_lc_agent(self, temperature: Temperature = Temperature.COLDEST) -> Runnable: diff --git a/src/main/askai/core/model/action_plan.py b/src/main/askai/core/model/action_plan.py index b45ef3df..1b439b8b 100644 --- a/src/main/askai/core/model/action_plan.py +++ b/src/main/askai/core/model/action_plan.py @@ -12,13 +12,15 @@ Copyright (c) 2024, HomeSetup """ -from askai.core.model.model_result import ModelResult -from askai.core.support.llm_parser import parse_field, parse_list, parse_word from dataclasses import dataclass, field -from hspylib.core.preconditions import check_state from types import SimpleNamespace +from typing import Any -import re +from hspylib.core.preconditions import check_state + +from askai.core.model.model_result import ModelResult +from askai.core.support.llm_parser import parse_field, parse_list, parse_word +from askai.exception.exceptions import InaccurateResponse @dataclass @@ -30,6 +32,7 @@ class ActionPlan: question: str = None speak: str = None primary_goal: str = None + is_direct: bool = False sub_goals: list[SimpleNamespace] = field(default_factory=list) tasks: list[SimpleNamespace] = field(default_factory=list) model: ModelResult = field(default_factory=ModelResult.default) @@ -65,46 +68,42 @@ def _parse_response(question: str, response: str) -> "ActionPlan": direct: str = parse_word("direct", response) # fmt: off - if primary_goal and tasks: - plan = ActionPlan( - question=question, - speak=speak, - primary_goal=primary_goal, - sub_goals=sub_goals, - tasks=tasks - ) - elif direct and len(direct) > 1: - plan = ActionPlan._direct_answer(question, response, ModelResult.default()) + if (direct and len(direct) > 1) or len(tasks) == 0: + plan = ActionPlan._direct_answer(question, direct, primary_goal, ModelResult.default()) + elif (direct and len(direct) > 1) or len(tasks) == 1: + plan = ActionPlan._direct_task(question, speak, primary_goal, tasks, ModelResult.default()) + elif tasks: + plan = ActionPlan(question, speak, primary_goal, False, sub_goals, tasks) else: - plan = ActionPlan._direct_task(question, response, ModelResult.default()) + raise InaccurateResponse("AI provided an invalid action plan!") # fmt: on return plan @staticmethod - def _direct_answer(question: str, response: str, model: ModelResult) -> "ActionPlan": + def _direct_answer(question: str, answer: str, goal: str, model: ModelResult) -> "ActionPlan": """Create a simple ActionPlan from an AI's direct response in plain text. :param question: The original question that was sent to the AI. - :param response: The AI's direct response in plain text (unformatted JSON). + :param answer: The AI's direct answer to the user. :param model: The result model. :return: An instance of ActionPlan created from the direct response. """ - flags: int = re.IGNORECASE | re.MULTILINE | re.DOTALL - speak: str = re.sub(r"\*\*Direct:\*\*(.+?)", "\1", response, flags) + speak: str = answer.split(',')[0].strip("'\"") - return ActionPlan(question, speak, "N/A", [], [], model) + return ActionPlan(question, speak, goal, True, [], [], model) @staticmethod - def _direct_task(question: str, response: str, model: ModelResult) -> "ActionPlan": + def _direct_task(question: str, speak: str, goal: str, tasks: list[Any], model: ModelResult) -> "ActionPlan": """Create a simple ActionPlan from an AI's direct response in plain text. :param question: The original question that was sent to the AI. - :param response: The AI's direct response in plain text (unformatted JSON). + :param speak: The spoken response generated by the AI. + :param goal: The goal or desired outcome from the task. + :param tasks: A list of tasks related to achieving the goal. :param model: The result model. :return: An instance of ActionPlan created from the direct response. """ - tasks: list[SimpleNamespace] = [SimpleNamespace(id="1", task=response)] - return ActionPlan(question, "", "N/A", [], tasks, model) + return ActionPlan(question, speak, goal, False, [], tasks, model) def __str__(self): sub_goals: str = " ".join(f"{i + 1}. {g}" for i, g in enumerate(self.sub_goals)) if self.sub_goals else "N/A" @@ -121,7 +120,6 @@ def __len__(self): return len(self.tasks) def __eq__(self, other: "ActionPlan") -> bool: - """TODO""" return ( self.question == other.question and self.speak == other.speak