From 64d86dc5f30bc55bf597db3e44c6c0633f80c885 Mon Sep 17 00:00:00 2001 From: Hugo Saporetti Junior Date: Wed, 23 Oct 2024 16:08:51 -0300 Subject: [PATCH] Replace actual processor by the pipeline - part 3 --- dependencies.hspd | 2 +- src/main/askai/__classpath__.py | 2 + src/main/askai/__main__.py | 2 +- .../processors/splitter/splitter_actions.py | 5 +- .../processors/splitter/splitter_executor.py | 16 +- .../processors/splitter/splitter_pipeline.py | 191 +++++++++--------- .../processors/splitter/splitter_result.py | 31 +++ .../processors/splitter/splitter_states.py | 25 ++- .../splitter/splitter_transitions.py | 19 +- src/main/askai/core/router/evaluation.py | 13 +- src/main/askai/core/router/task_agent.py | 49 ++--- src/main/askai/resources/rag/accuracy.csv | 5 +- src/main/requirements.txt | 3 +- 13 files changed, 203 insertions(+), 160 deletions(-) create mode 100644 src/main/askai/core/processors/splitter/splitter_result.py diff --git a/dependencies.hspd b/dependencies.hspd index 3e78317d..12eb6f53 100644 --- a/dependencies.hspd +++ b/dependencies.hspd @@ -44,7 +44,7 @@ package: html2text, version: 2024.2.26, mode: ge /* CLI/TUI */ package: rich, version: 13.8.1, mode: ge -package: textual, version: 0.80.1, mode: ge +package: textual, version: 0.80.1, mode: eq /* Audio */ package: soundfile, version: 0.12.1, mode: ge diff --git a/src/main/askai/__classpath__.py b/src/main/askai/__classpath__.py index 85c24231..0c858a46 100644 --- a/src/main/askai/__classpath__.py +++ b/src/main/askai/__classpath__.py @@ -34,6 +34,8 @@ if not is_a_tty(): log.getLogger().setLevel(log.ERROR) +else: + log.getLogger().setLevel(log.INFO) if not os.environ.get("USER_AGENT"): # The AskAI User Agent, required by the langchain framework diff --git a/src/main/askai/__main__.py b/src/main/askai/__main__.py index a9963902..118c8d0e 100755 --- a/src/main/askai/__main__.py +++ b/src/main/askai/__main__.py @@ -152,7 +152,7 @@ def _main(self, *params, **kwargs) -> ExitStatus: os.environ["ASKAI_APP"] = RunModes.ASKAI_CMD.value return self._execute_command(query_string) - log.info( + log.debug( dedent( f""" {os.environ.get("ASKAI_APP")} v{self._app_version} diff --git a/src/main/askai/core/processors/splitter/splitter_actions.py b/src/main/askai/core/processors/splitter/splitter_actions.py index 2e7a5675..d6abaa96 100644 --- a/src/main/askai/core/processors/splitter/splitter_actions.py +++ b/src/main/askai/core/processors/splitter/splitter_actions.py @@ -94,15 +94,14 @@ def refine_answer(question: str, answer: str, acc_response: AccResponse | None = return answer @staticmethod - def process_action(action: SimpleNamespace) -> str: + def process_action(action: SimpleNamespace) -> Optional[str]: """TODO""" path_str: str | None = ( "Path: " + action.path if hasattr(action, "path") and action.path.upper() not in ["N/A", "NONE", ""] else None ) - task: str = f"{action.task} {path_str or ''}" - return agent.invoke(task) + return agent.invoke(f"{action.task} {path_str or ''}") def __init__(self): self._rag: RAGProvider = RAGProvider("task-splitter.csv") diff --git a/src/main/askai/core/processors/splitter/splitter_executor.py b/src/main/askai/core/processors/splitter/splitter_executor.py index 727d0ed6..5fd905f7 100644 --- a/src/main/askai/core/processors/splitter/splitter_executor.py +++ b/src/main/askai/core/processors/splitter/splitter_executor.py @@ -47,11 +47,10 @@ def run(self): with self._console.status(msg.wait(), spinner="dots") as spinner: while not self.pipeline.state == States.COMPLETE: self.pipeline.track_previous() - spinner.update(f"{shared.nickname_spinner}[green]{self.pipeline.state.value}…[/green]") - if 0 < configs.max_router_retries < self.pipeline.failures[self.pipeline.state.value]: + if 1 < configs.max_router_retries < 1 + self.pipeline.failures[self.pipeline.state.value]: self.display(f"\n[red] Max retries exceeded: {configs.max_router_retries}[/red]\n") break - if 0 < configs.max_iteractions < self.pipeline.iteractions: + if 1 < configs.max_iteractions < 1 + self.pipeline.iteractions: self.display(f"\n[red] Max iteractions exceeded: {configs.max_iteractions}[/red]\n") break match self.pipeline.state: @@ -64,18 +63,18 @@ def run(self): case States.TASK_SPLIT: if self.pipeline.st_task_split(): if self.pipeline.is_direct(): - self.display("[yellow] AI decided to respond directly[/yellow]") + self.display("[yellow]Direct answer provided[/yellow]") self.pipeline.ev_direct_answer() else: - spinner.update("[green] Executing action plan[/green]") + self.display(f"[green]Action plan created[/green]") self.pipeline.ev_plan_created() case States.EXECUTE_TASK: - if self.pipeline.st_execute_next(): + if self.pipeline.st_execute_task(): self.pipeline.ev_task_executed() - case States.ACCURACY_CHECK: + case States.ACC_CHECK: acc_color: AccColor = self.pipeline.st_accuracy_check() c_name: str = acc_color.color.casefold() - self.display(f"[green] Accuracy check: [{c_name}]{c_name.upper()}[/{c_name}][/green]") + self.display(f"[green]Accuracy check: [{c_name}]{c_name.upper()}[/{c_name}][/green]") if acc_color.passed(AccColor.GOOD): self.pipeline.ev_accuracy_passed() elif acc_color.passed(AccColor.MODERATE): @@ -98,6 +97,7 @@ def run(self): ) self.pipeline.failures[self.pipeline.state.value] += 1 if not execution_status else 0 self.display(f"[green]{execution_status_str}[/green]") + spinner.update(f"{shared.nickname_spinner}[green]{self.pipeline.state.value}…[/green]") self.pipeline.iteractions += 1 if configs.is_debug: diff --git a/src/main/askai/core/processors/splitter/splitter_pipeline.py b/src/main/askai/core/processors/splitter/splitter_pipeline.py index 6ef9e02f..fd0df45c 100644 --- a/src/main/askai/core/processors/splitter/splitter_pipeline.py +++ b/src/main/askai/core/processors/splitter/splitter_pipeline.py @@ -13,10 +13,10 @@ Copyright (c) 2024, HomeSetup """ import logging as log -import os from collections import defaultdict -from typing import AnyStr, Optional +from typing import AnyStr +from hspylib.core.preconditions import check_state from langchain_core.prompts import PromptTemplate from transitions import Machine @@ -27,9 +27,10 @@ from askai.core.model.action_plan import ActionPlan from askai.core.model.model_result import ModelResult from askai.core.processors.splitter.splitter_actions import actions +from askai.core.processors.splitter.splitter_result import SplitterResult, PipelineResponse from askai.core.processors.splitter.splitter_states import States from askai.core.processors.splitter.splitter_transitions import Transition, TRANSITIONS -from askai.core.router.evaluation import assert_accuracy, EVALUATION_GUIDE +from askai.core.router.evaluation import eval_response, EVALUATION_GUIDE from askai.core.support.shared_instances import shared @@ -40,35 +41,21 @@ class SplitterPipeline: FAKE_SLEEP: float = 0.3 - def __init__(self, query: AnyStr): + def __init__(self, question: AnyStr): self._transitions: list[Transition] = [t for t in TRANSITIONS] self._machine: Machine = Machine( - name="Taius-Coder", - model=self, - initial=States.STARTUP, - states=States, - transitions=self._transitions, + name="Taius-Coder", model=self, + initial=States.STARTUP, states=States, transitions=self._transitions, auto_transitions=False ) - self._previous: States | None = None - self._failures: dict[str, int] = defaultdict(int) + self._previous: States = States.NOT_STARTED self._iteractions: int = 0 - self._query: str = query - self._plan: ActionPlan | None = None - self._direct_answer: Optional[str] = None - self._model: ModelResult | None = None - self._resp_history: list[str] = list() - self._last_acc_response: AccResponse | None = None - self._last_task: str | None = None - - def _invalidate(self) -> None: - """TODO""" - self._plan = None - self._direct_answer = None - self._model = None - self._resp_history = list() - self._last_acc_response = None - self._last_task = None + self._failures: dict[str, int] = defaultdict(int) + self._result: SplitterResult = SplitterResult(question) + + @property + def previous(self) -> States: + return self._previous @property def iteractions(self) -> int: @@ -79,56 +66,64 @@ def iteractions(self, value: int): self._iteractions = value @property - def last_acc_response(self) -> AccResponse: - return self._last_acc_response - - @last_acc_response.setter - def last_acc_response(self, value: AccResponse) -> None: - self._last_acc_response = value + def failures(self) -> dict[str, int]: + return self._failures @property - def last_task(self) -> str: - return self._last_task + def result(self) -> SplitterResult: + return self._result - @last_task.setter - def last_task(self, value: str) -> None: - self._last_task = value + @property + def responses(self) -> list[PipelineResponse]: + return self._result.responses @property - def failures(self) -> dict[str, int]: - return self._failures + def question(self) -> str: + return self.result.question @property - def plan(self) -> ActionPlan: - return self._plan + def last_query(self) -> str: + return self.responses[-1].query + + @last_query.setter + def last_query(self, value: str) -> None: + self.responses[-1].query = value @property - def model(self) -> ModelResult: - return self._model + def last_answer(self) -> str: + return self.responses[-1].answer + + @last_answer.setter + def last_answer(self, value: str) -> None: + self.responses[-1].answer = value @property - def previous(self) -> States: - return self._previous + def last_accuracy(self) -> AccResponse: + return self.responses[-1].accuracy + + @last_accuracy.setter + def last_accuracy(self, value: AccResponse) -> None: + self.responses[-1].accuracy = value @property - def query(self) -> str: - if self.last_task is not None: - question: str = self.last_task - else: - question: str = self._query - return question + def plan(self) -> ActionPlan: + return self.result.plan + + @plan.setter + def plan(self, value: ActionPlan): + self.result.plan = value @property - def final_answer(self) -> Optional[str]: - if self.is_direct(): - ai_response: str = self._direct_answer - else: - ai_response: str = os.linesep.join(self._resp_history) - return ai_response + def model(self) -> ModelResult: + return self.result.model + + @model.setter + def model(self, value: ModelResult): + self.result.model = value @property - def resp_history(self) -> list[str]: - return self._resp_history + def final_answer(self) -> str: + return self.result.final_response() def track_previous(self) -> None: """TODO""" @@ -143,67 +138,73 @@ def is_direct(self) -> bool: return self.plan.is_direct if self.plan is not None else True def st_startup(self) -> bool: + """TODO""" log.info("Task Splitter pipeline has started!") - self._invalidate() return True def st_model_select(self) -> bool: + """TODO""" log.info("Selecting response model...") - self._model = ModelResult.default() + # FIXME: Model select is default for now + self.model = ModelResult.default() return True def st_task_split(self) -> bool: + """TODO""" log.info("Splitting tasks...") - self._plan = actions.split(self.query, self.model) - if self._plan.is_direct: - self._direct_answer = self._plan.speak or msg.no_output("TaskSplitter") - return True + if (plan := actions.split(self.question, self.model)) is not None: + if plan.is_direct: + self.responses.append(PipelineResponse(self.question, plan.speak or msg.no_output("TaskSplitter"))) + self.plan = plan + return True + return False - def st_execute_next(self) -> bool: + def st_execute_task(self) -> bool: + """TODO""" + check_state(self.plan.tasks is not None and len(self.plan.tasks) > 0) _iter_ = self.plan.tasks.copy().__iter__() if action := next(_iter_, None): + log.info(f"Executing task '{action}'...") if agent_output := actions.process_action(action): - self.last_task = self.plan.tasks.pop(0).task if len(self.plan.tasks) > 0 else None - return self.last_task is not None + self.responses.append(PipelineResponse(action.task, agent_output)) + return True return False def st_accuracy_check(self) -> AccColor: + """TODO""" + + if self.last_query is None or self.last_answer is None: + return AccColor.BAD # FIXME Hardcoded for now - pass_threshold: AccColor = AccColor.GOOD - - acc: AccResponse = assert_accuracy(self.query, self.final_answer, pass_threshold) - - if acc.is_interrupt: - # AI flags that it can't continue interacting. - log.warning(msg.interruption_requested(self.final_answer)) - elif acc.is_terminate: - # AI flags that the user wants to end the session. - log.warning(msg.terminate_requested(self.final_answer)) - elif acc.is_pass(pass_threshold): - # AI provided a good answer. - log.warning(f"AI provided a final answer: {self.final_answer}") - self.resp_history.append(self.final_answer) - shared.memory.save_context({"input": self.query}, {"output": self.final_answer}) + pass_threshold: AccColor = AccColor.MODERATE + acc: AccResponse = eval_response(self.last_query, self.last_answer) + + if acc.is_interrupt: # AI flags that it can't continue interacting. + log.warning(msg.interruption_requested(self.last_answer)) + self.plan.tasks.clear() + elif acc.is_terminate: # AI flags that the user wants to end the session. + log.warning(msg.terminate_requested(self.last_answer)) + self.plan.tasks.clear() + elif acc.is_pass(pass_threshold): # AI provided a good answer. + log.info(f"AI provided a good answer: {self.last_answer}") + if len(self.plan.tasks) > 0: + self.plan.tasks.pop(0) + shared.memory.save_context({"input": self.last_query}, {"output": self.last_answer}) else: + if len(self.responses) > 0: + self.responses.pop(0) acc_template = PromptTemplate(input_variables=["problems"], template=prompt.read_prompt("acc-report")) - # Include the guidelines for the first mistake. - if not shared.context.get("EVALUATION"): + if not shared.context.get("EVALUATION"): # Include the guidelines for the first mistake. shared.context.push("EVALUATION", EVALUATION_GUIDE) shared.context.push("EVALUATION", acc_template.format(problems=acc.details)) - self.last_acc_response = acc + self.last_accuracy = acc return acc.acc_color def st_refine_answer(self) -> bool: - if self.is_direct: - ai_response: str = self.final_answer - else: - ai_response: str = os.linesep.join(self._resp_history) - - return actions.refine_answer(self.query, ai_response, self.last_acc_response) + return actions.refine_answer(self.question, self.final_answer, self.last_accuracy) def st_final_answer(self) -> bool: - - return actions.wrap_answer(self.query, self.final_answer, self.model) + return actions.wrap_answer(self.question, self.final_answer, self.model) diff --git a/src/main/askai/core/processors/splitter/splitter_result.py b/src/main/askai/core/processors/splitter/splitter_result.py new file mode 100644 index 00000000..13b79ec3 --- /dev/null +++ b/src/main/askai/core/processors/splitter/splitter_result.py @@ -0,0 +1,31 @@ +import os +from dataclasses import dataclass, field + +from askai.core.enums.acc_color import AccColor +from askai.core.model.acc_response import AccResponse +from askai.core.model.action_plan import ActionPlan +from askai.core.model.model_result import ModelResult + + +@dataclass +class PipelineResponse: + """TODO""" + query: str + answer: str | None = None + accuracy: AccResponse | None = None + + +@dataclass +class SplitterResult: + """TODO""" + question: str + responses: list[PipelineResponse] = field(default_factory=list) + plan: ActionPlan | None = None + model: ModelResult | None = None + + def final_response(self) -> str: + """TODO""" + return os.linesep.join( + list(map(lambda r: r.answer, filter( + lambda acc: acc.accuracy and acc.accuracy.acc_color.passed(AccColor.MODERATE), self.responses))) + ) diff --git a/src/main/askai/core/processors/splitter/splitter_states.py b/src/main/askai/core/processors/splitter/splitter_states.py index 6809943f..57c2551b 100644 --- a/src/main/askai/core/processors/splitter/splitter_states.py +++ b/src/main/askai/core/processors/splitter/splitter_states.py @@ -18,12 +18,21 @@ class States(Enumeration): """Enumeration of possible task splitter states.""" # fmt: off - STARTUP = ' Processing query' - MODEL_SELECT = ' Selecting Model' - TASK_SPLIT = ' Splitting Tasks' - ACCURACY_CHECK = ' Checking Accuracy' - EXECUTE_TASK = ' Executing Task' - REFINE_ANSWER = ' Refining Answer' - WRAP_ANSWER = ' Wrapping final answer' - COMPLETE = 'ﲏ Completed' + NOT_STARTED = 'Not started' + + STARTUP = 'Processing Query' + + MODEL_SELECT = 'Selecting Model' + + TASK_SPLIT = 'Splitting Tasks' + + ACC_CHECK = 'Checking Accuracy' + + EXECUTE_TASK = 'Executing Task' + + REFINE_ANSWER = 'Refining Answer' + + WRAP_ANSWER = 'Wrapping Answer' + + COMPLETE = 'Completed' # fmt: on diff --git a/src/main/askai/core/processors/splitter/splitter_transitions.py b/src/main/askai/core/processors/splitter/splitter_transitions.py index c743ade8..cfb0a6f5 100644 --- a/src/main/askai/core/processors/splitter/splitter_transitions.py +++ b/src/main/askai/core/processors/splitter/splitter_transitions.py @@ -24,19 +24,24 @@ {'trigger': 'ev_pipeline_started', 'source': States.STARTUP, 'dest': States.MODEL_SELECT}, {'trigger': 'ev_model_selected', 'source': States.MODEL_SELECT, 'dest': States.TASK_SPLIT}, - {'trigger': 'ev_direct_answer', 'source': States.TASK_SPLIT, 'dest': States.ACCURACY_CHECK}, + {'trigger': 'ev_direct_answer', 'source': States.TASK_SPLIT, 'dest': States.ACC_CHECK}, {'trigger': 'ev_plan_created', 'source': States.TASK_SPLIT, 'dest': States.EXECUTE_TASK}, - {'trigger': 'ev_accuracy_check', 'source': States.ACCURACY_CHECK, 'dest': States.EXECUTE_TASK}, + {'trigger': 'ev_accuracy_check', 'source': States.ACC_CHECK, 'dest': States.EXECUTE_TASK}, - {'trigger': 'ev_task_executed', 'source': States.EXECUTE_TASK, 'dest': States.ACCURACY_CHECK}, + {'trigger': 'ev_task_executed', 'source': States.EXECUTE_TASK, 'dest': States.ACC_CHECK}, - {'trigger': 'ev_accuracy_passed', 'source': States.ACCURACY_CHECK, 'dest': States.EXECUTE_TASK, 'conditions': ['has_next']}, - {'trigger': 'ev_accuracy_passed', 'source': States.ACCURACY_CHECK, 'dest': States.WRAP_ANSWER, 'unless': ['has_next']}, - {'trigger': 'ev_accuracy_failed', 'source': States.ACCURACY_CHECK, 'dest': States.EXECUTE_TASK}, - {'trigger': 'ev_refine_required', 'source': States.ACCURACY_CHECK, 'dest': States.REFINE_ANSWER, 'unless': ['has_next']}, + {'trigger': 'ev_accuracy_passed', 'source': States.ACC_CHECK, 'dest': States.EXECUTE_TASK, 'conditions': ['has_next']}, + {'trigger': 'ev_accuracy_passed', 'source': States.ACC_CHECK, 'dest': States.WRAP_ANSWER, 'unless': ['has_next']}, + + {'trigger': 'ev_accuracy_failed', 'source': States.ACC_CHECK, 'dest': States.EXECUTE_TASK}, + {'trigger': 'ev_accuracy_failed', 'source': States.ACC_CHECK, 'dest': States.TASK_SPLIT, 'unless': ['has_next']}, + + {'trigger': 'ev_refine_required', 'source': States.ACC_CHECK, 'dest': States.EXECUTE_TASK, 'conditions': ['has_next']}, + {'trigger': 'ev_refine_required', 'source': States.ACC_CHECK, 'dest': States.REFINE_ANSWER, 'unless': ['has_next']}, {'trigger': 'ev_answer_refined', 'source': States.REFINE_ANSWER, 'dest': States.WRAP_ANSWER}, + {'trigger': 'ev_final_answer', 'source': States.WRAP_ANSWER, 'dest': States.COMPLETE}, {'trigger': 'ev_task_complete', 'source': States.TASK_SPLIT, 'dest': States.COMPLETE}, diff --git a/src/main/askai/core/router/evaluation.py b/src/main/askai/core/router/evaluation.py index 7bc877d4..44c48c60 100644 --- a/src/main/askai/core/router/evaluation.py +++ b/src/main/askai/core/router/evaluation.py @@ -25,11 +25,11 @@ from askai.core.askai_prompt import prompt from askai.core.component.rag_provider import RAGProvider from askai.core.engine.openai.temperature import Temperature -from askai.core.enums.acc_color import AccColor from askai.core.model.acc_response import AccResponse from askai.core.model.ai_reply import AIReply from askai.core.support.langchain_support import lc_llm from askai.core.support.shared_instances import shared +from askai.exception.exceptions import InaccurateResponse # fmt: off EVALUATION_GUIDE: str = dedent(""" @@ -45,18 +45,15 @@ RAG: RAGProvider = RAGProvider("accuracy.csv") -def assert_accuracy(question: str, ai_response: str, pass_threshold: AccColor = AccColor.MODERATE) -> AccResponse: - """Assert that the AI's response to the question meets the required accuracy threshold. +def eval_response(question: str, ai_response: str) -> AccResponse: + """Check whether the AI's response to the question meets the required accuracy. :param question: The user's question. :param ai_response: The AI's response to be analyzed for accuracy. - :param pass_threshold: The accuracy threshold, represented by a color, that must be met or exceeded for the - response to be considered a pass (default is AccResponse.MODERATE). :return: The accuracy classification of the AI's response as an AccResponse enum value. """ if ai_response and ai_response not in msg.accurate_responses: eval_template = PromptTemplate( - input_variables=["rag", "input", "response"], template=prompt.read_prompt("evaluation") - ) + input_variables=["rag", "input", "response"], template=prompt.read_prompt("evaluation")) final_prompt = eval_template.format(rag=RAG.get_rag_examples(question), input=question, response=ai_response) log.info("Assert::[QUESTION] '%s' context: '%s'", question, ai_response) llm = lc_llm.create_chat_model(Temperature.COLDEST.temp) @@ -65,6 +62,8 @@ def assert_accuracy(question: str, ai_response: str, pass_threshold: AccColor = if response and (output := response.content): return AccResponse.parse_response(output) + raise InaccurateResponse(f"Accuracy response was null: {ai_response}") + def resolve_x_refs(ref_name: str, context: str | None = None) -> str: """Replace all cross-references with their actual values. diff --git a/src/main/askai/core/router/task_agent.py b/src/main/askai/core/router/task_agent.py index 6a88fa7f..c5c04f8a 100644 --- a/src/main/askai/core/router/task_agent.py +++ b/src/main/askai/core/router/task_agent.py @@ -12,18 +12,10 @@ Copyright (c) 2024, HomeSetup """ -from askai.core.askai_configs import configs -from askai.core.askai_events import events -from askai.core.askai_messages import msg -from askai.core.askai_prompt import prompt -from askai.core.engine.openai.temperature import Temperature -from askai.core.enums.acc_color import AccColor -from askai.core.model.ai_reply import AIReply -from askai.core.router.agent_tools import features -from askai.core.router.evaluation import assert_accuracy -from askai.core.support.langchain_support import lc_llm -from askai.core.support.shared_instances import shared -from askai.exception.exceptions import InaccurateResponse +import logging as log +from typing import AnyStr, Optional + +import openai from hspylib.core.config.path_object import PathObject from hspylib.core.metaclass.singleton import Singleton from langchain.agents import AgentExecutor, create_structured_chat_agent @@ -31,10 +23,13 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.runnables import Runnable from langchain_core.runnables.utils import Output -from openai import APIError -from typing import AnyStr, Optional -import logging as log +from askai.core.askai_configs import configs +from askai.core.askai_prompt import prompt +from askai.core.engine.openai.temperature import Temperature +from askai.core.router.agent_tools import features +from askai.core.support.langchain_support import lc_llm +from askai.core.support.shared_instances import shared class TaskAgent(metaclass=Singleton): @@ -44,9 +39,6 @@ class TaskAgent(metaclass=Singleton): INSTANCE: "TaskAgent" - def __init__(self): - self._lc_agent: Runnable | None = None - @property def agent_template(self) -> ChatPromptTemplate: """Retrieve the Structured Agent Template for use in the chat agent. This template is used to structure the @@ -65,18 +57,17 @@ def agent_template(self) -> ChatPromptTemplate: ] ) - def invoke(self, task: str) -> str: + def invoke(self, task: str) -> Optional[str]: """Invoke the agent to respond to the given query using the specified action plan. :param task: The AI task that outlines the steps to generate the response. :return: The agent's response as a string. """ - events.reply.emit(reply=AIReply.debug(msg.task(task))) + output: str | None = None + # events.reply.emit(reply=AIReply.debug(msg.task(task))) shared.context.push("HISTORY", task, "assistant") if (response := self._exec_task(task)) and (output := response["output"]): log.info("Router::[RESPONSE] Received from AI: \n%s.", output) - else: - output = msg.no_output("AI") - shared.context.push("HISTORY", output, "assistant") + shared.context.push("HISTORY", output, "assistant") return output @@ -86,11 +77,12 @@ def _create_lc_agent(self, temperature: Temperature = Temperature.COLDEST) -> Ru Temperature.COLDEST). :return: An instance of a Runnable representing the LangChain agent. """ + tools = features.tools() llm = lc_llm.create_chat_model(temperature.temp) chat_memory: BaseChatMemory = shared.memory lc_agent = create_structured_chat_agent(llm, tools, self.agent_template) - self._lc_agent: Runnable = AgentExecutor( + lc_agent: Runnable = AgentExecutor( agent=lc_agent, tools=tools, max_iterations=configs.max_router_retries, @@ -100,7 +92,7 @@ def _create_lc_agent(self, temperature: Temperature = Temperature.COLDEST) -> Ru verbose=configs.is_debug, ) - return self._lc_agent + return lc_agent def _exec_task(self, task: AnyStr) -> Optional[Output]: """Execute the specified agent task. @@ -108,11 +100,14 @@ def _exec_task(self, task: AnyStr) -> Optional[Output]: :return: An instance of Output containing the result of the task, or None if the task fails or produces no output. """ + output: Output | None = None try: lc_agent: Runnable = self._create_lc_agent() output = lc_agent.invoke({"input": task}) - except APIError as err: - raise InaccurateResponse(str(err)) + if "Agent stopped due to iteration limit or time limit." in str(output): + output = None + except openai.APIError as err: + log.error(str(err)) return output diff --git a/src/main/askai/resources/rag/accuracy.csv b/src/main/askai/resources/rag/accuracy.csv index ba0d69e3..d94bd1ac 100644 --- a/src/main/askai/resources/rag/accuracy.csv +++ b/src/main/askai/resources/rag/accuracy.csv @@ -17,9 +17,10 @@ What is the whether like today?;Invalid or incomplete response;Red;This error me List my downloads;I will list your downloads for you;Red;Acknowledging or mentioning previous responses and indicating or stating the intention of accomplishment are considered unhelpful. Who are you?;Yes he is famous;Red;The AI should assess whether the response is coherent with the question, ensure the answer is unbiased and free from stereotypes, and identify any AI hallucinations by verifying the accuracy of the response, labeling it 'Red' if it does not align with established facts. What is my first task?;You can find out your first task by checking the list of activities above;Red;The AI should refrain from seeking assistance from the user. -List my music and let me know if there is any gabiroba song. If so, show me the file name and open it.;The 'gabiroba' song file was not found in the Music folder.;Black;This is a clear negative response indicating that the file does not exist, and therefore, the AI cannot proceed any further. +List my music and let me know if there is any gabiroba song. If so, show me the file name and open it.;There are no gabiroba songs in the provided music directory.;Black;This is a clear negative response indicating that the file does not exist, and therefore, the AI cannot proceed any further. Display the file name of the gabiroba song;The Gabiroba song file name is 'have-a-nice-day.mp3';Red;This is a clear hallucination because the filename does not conform to the required file pattern. What is my second task;The first task or action to be taken according to the reminder is to 'Thank Rafael Romão for the keyboard' at 17:20 on 04/11/2024.;Red;The AI partially addresses the user's question by providing information about the first task but lacks a direct response to the query about the second task. Who is Garirubal?;Your Google search returned the following:\n\nIt appears that there are currently no significant search results;Yellow;Internet searches can yield negative results or a lack of information, therefore, there is no definitive right or wrong. Bye bye;The user requested to end the session.;White;The user requested to terminate the session, so, white is the right classification. -Show the file readme.txt;To proceed, I will open this file for you. Please hold on a moment.;The user requested to open the file reminder.txt, but the AI inaccurately responded that it would open it a moment. This message is unhelpful and is therefore classified as Red. +Show the file readme.txt;To proceed, I will open this file for you. Please hold on a moment.;Red;The user requested to open the file reminder.txt, but the AI inaccurately responded that it would open it a moment. This message is unhelpful and is therefore classified as Red. +Identify if any file names containing 'gabiroba';No files containing 'gabiroba' were found in the specified directory.;Black;The AI responded negatively, stating that gabiroba song files do not exist and, as a result, cannot proceed with further interactions. diff --git a/src/main/requirements.txt b/src/main/requirements.txt index d6966e93..e7de4077 100644 --- a/src/main/requirements.txt +++ b/src/main/requirements.txt @@ -9,6 +9,7 @@ tqdm>=4.66.5 pyperclip>=1.9.0 python-magic>=0.4.27 pytz>=2024.1 +transitions>=0.9.2 langchain>=0.3.0 langchain-openai>=0.2.0 langchain-community>=0.3.1 @@ -23,7 +24,7 @@ protobuf>=4.25.4 aiohttp>=3.10.5 html2text>=2024.2.26 rich>=13.8.1 -textual>=0.80.1 +textual==0.80.1 soundfile>=0.12.1 PyAudio>=0.2.14 SpeechRecognition>=3.10.4