Skip to content

Commit

Permalink
Fix the history issue.
Browse files Browse the repository at this point in the history
  • Loading branch information
yorevs committed Oct 10, 2024
1 parent 972b096 commit 4dabc61
Show file tree
Hide file tree
Showing 9 changed files with 49 additions and 101 deletions.
9 changes: 4 additions & 5 deletions src/main/askai/core/askai.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,17 +145,15 @@ def ask_and_reply(self, question: str) -> tuple[bool, Optional[str]]:
filter(lambda a: a and a != "None", re.split(r"\s", f"{command.group(1)} {command.group(2)}"))
)
ask_commander(args, standalone_mode=False)
elif not (output := cache.read_reply(question)):
return True, None
shared.context.push("HISTORY", question)
if not (output := cache.read_reply(question)):
log.debug('Response not found for "%s" in cache. Querying from %s.', question, self.engine.nickname())
events.reply.emit(reply=AIReply.detailed(msg.wait()))
if output := processor.process(question, context=read_stdin(), query_prompt=self._query_prompt):
events.reply.emit(reply=AIReply.info(output))
shared.context.push("HISTORY", question)
shared.context.push("HISTORY", output, "assistant")
else:
log.debug("Reply found for '%s' in cache.", question)
shared.context.push("HISTORY", question)
shared.context.push("HISTORY", output, "assistant")
events.reply.emit(reply=AIReply.info(output))
except (NotImplementedError, ImpossibleQuery) as err:
events.reply.emit(reply=AIReply.error(err))
Expand All @@ -173,6 +171,7 @@ def ask_and_reply(self, question: str) -> tuple[bool, Optional[str]]:
status = False
finally:
if output:
shared.context.push("HISTORY", output, "assistant")
shared.context.set("LAST_REPLY", output, "assistant")

return status, output
Expand Down
2 changes: 0 additions & 2 deletions src/main/askai/core/features/processors/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,6 @@ def process(self, question: str, **kwargs) -> Optional[str]:

if output := runnable.invoke({"input": question}, config={"configurable": {"session_id": history_ctx or ""}}):
response = output.content
shared.context.push(history_ctx, question)
shared.context.push(history_ctx, response, "assistant")

return response

Expand Down
24 changes: 12 additions & 12 deletions src/main/askai/core/features/processors/task_splitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,9 +176,21 @@ def _splitter_wrapper_(retry_count: int) -> Optional[str]:
log.info("Router::[RESPONSE] Received from AI: \n%s.", answer)
plan = ActionPlan.create(question, answer, model)
if not plan.is_direct and (task_list := plan.tasks):
acc_response: str | None = None
events.reply.emit(reply=AIReply.debug(msg.action_plan(str(plan))))
if plan.speak:
events.reply.emit(reply=AIReply.info(plan.speak))
try:
agent_output: str | None = self._process_tasks(task_list, retries)
if len(task_list) > 1:
acc_response: AccResponse = assert_accuracy(question, agent_output, AccColor.MODERATE)
except InterruptionRequest as err:
return str(err)
except self.RETRIABLE_ERRORS:
if retry_count <= 1:
events.reply.emit(reply=AIReply.error(msg.sorry_retry()))
raise
return self.wrap_answer(question, agent_output, plan.model, acc_response)
else:
# Most of the times, indicates the LLM responded directly.
acc_response: AccResponse = assert_accuracy(question, response.content, AccColor.GOOD)
Expand All @@ -188,18 +200,6 @@ def _splitter_wrapper_(retry_count: int) -> Optional[str]:
else:
return msg.no_output("Task-Splitter") # Most of the times, indicates a failure.

try:
agent_output: str | None = self._process_tasks(task_list, retries)
acc_response: AccResponse = assert_accuracy(question, agent_output, AccColor.MODERATE)
except InterruptionRequest as err:
return str(err)
except self.RETRIABLE_ERRORS:
if retry_count <= 1:
events.reply.emit(reply=AIReply.error(msg.sorry_retry()))
raise

return self.wrap_answer(question, agent_output, plan.model, acc_response)

return _splitter_wrapper_(retries)

@retry(exceptions=RETRIABLE_ERRORS, tries=configs.max_router_retries, backoff=1, jitter=1)
Expand Down
3 changes: 1 addition & 2 deletions src/main/askai/core/features/router/task_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,11 @@ def invoke(self, task: str) -> str:
shared.context.push("HISTORY", task, "assistant")
if (response := self._exec_task(task)) and (output := response["output"]):
log.info("Router::[RESPONSE] Received from AI: \n%s.", output)
shared.context.push("HISTORY", output, "assistant")
assert_accuracy(task, output, AccColor.MODERATE)
shared.memory.save_context({"input": task}, {"output": output})
else:
output = msg.no_output("AI")
shared.context.push("HISTORY", output, "assistant")

return output

Expand Down Expand Up @@ -96,7 +96,6 @@ def _exec_task(self, task: AnyStr) -> Optional[Output]:
:return: An instance of Output containing the result of the task, or None if the task fails or produces
no output.
"""
output: str | None = None
try:
lc_agent: Runnable = self._create_lc_agent()
output = lc_agent.invoke({"input": task})
Expand Down
9 changes: 5 additions & 4 deletions src/main/askai/core/model/acc_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,13 @@
Copyright (c) 2024, HomeSetup
"""
from askai.core.enums.acc_color import AccColor, AccuracyColors
from askai.core.support.llm_parser import parse_field
from dataclasses import dataclass
from pathlib import Path

from hspylib.core.tools.text_tools import ensure_endswith
from os.path import expandvars

import os
from askai.core.enums.acc_color import AccColor, AccuracyColors
from askai.core.support.llm_parser import parse_field


@dataclass(frozen=True)
Expand All @@ -38,6 +38,7 @@ def parse_response(cls, response: str) -> "AccResponse":
:param response: The LLM response.
:return: An instance of AccResponse created from the parsed response.
"""
Path(Path.home() / 'acc-resp.txt').write_text(response)

acc_color: AccColor = AccColor.of_color(parse_field("@color", response))
accuracy: float = float(parse_field("@accuracy", response).strip("%"))
Expand Down
22 changes: 15 additions & 7 deletions src/main/askai/core/model/action_plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
Copyright (c) 2024, HomeSetup
"""
from dataclasses import dataclass, field
from pathlib import Path
from types import SimpleNamespace
from typing import Any

from hspylib.core.preconditions import check_state

Expand Down Expand Up @@ -60,6 +60,7 @@ def _parse_response(question: str, response: str) -> "ActionPlan":
:param response: The router's response.
:return: An instance of ActionPlan created from the parsed response.
"""
Path(Path.home() / 'splitter-resp.txt').write_text(response)

speak: str = parse_field("@speak", response)
primary_goal: str = parse_field("@primary_goal", response)
Expand All @@ -68,12 +69,14 @@ def _parse_response(question: str, response: str) -> "ActionPlan":
direct: str = parse_word("direct", response)

# fmt: off
if (direct and len(direct) > 1) or len(tasks) == 0:
if direct and len(direct) > 1:
plan = ActionPlan._direct_answer(question, direct, primary_goal, ModelResult.default())
elif (direct and len(direct) > 1) or len(tasks) == 1:
plan = ActionPlan._direct_task(question, speak, primary_goal, tasks, ModelResult.default())
plan = ActionPlan._direct_task(question, speak, primary_goal, tasks[0], ModelResult.default())
elif tasks:
plan = ActionPlan(question, speak, primary_goal, False, sub_goals, tasks)
elif not speak and not primary_goal:
plan = ActionPlan._direct_task(question, "", "", response, ModelResult.default())
else:
raise InaccurateResponse("AI provided an invalid action plan!")
# fmt: on
Expand All @@ -88,22 +91,27 @@ def _direct_answer(question: str, answer: str, goal: str, model: ModelResult) ->
:param model: The result model.
:return: An instance of ActionPlan created from the direct response.
"""
speak: str = answer.split(',')[0].strip("'\"")
if answer:
speak: str = answer.split(',')[0].strip("'\"")
else:
speak: str = answer

return ActionPlan(question, speak, goal, True, [], [], model)

@staticmethod
def _direct_task(question: str, speak: str, goal: str, tasks: list[Any], model: ModelResult) -> "ActionPlan":
def _direct_task(
question: str, speak: str, goal: str, task: str | SimpleNamespace, model: ModelResult) -> "ActionPlan":
"""Create a simple ActionPlan from an AI's direct response in plain text.
:param question: The original question that was sent to the AI.
:param speak: The spoken response generated by the AI.
:param goal: The goal or desired outcome from the task.
:param tasks: A list of tasks related to achieving the goal.
:param task: The task related to achieving the goal.
:param model: The result model.
:return: An instance of ActionPlan created from the direct response.
"""
task_list: list[SimpleNamespace] = [SimpleNamespace(id=1, task=str(task)) if isinstance(task, str) else task]

return ActionPlan(question, speak, goal, False, [], tasks, model)
return ActionPlan(question, speak, goal, False, [], task_list, model)

def __str__(self):
sub_goals: str = " ".join(f"{i + 1}. {g}" for i, g in enumerate(self.sub_goals)) if self.sub_goals else "N/A"
Expand Down
16 changes: 10 additions & 6 deletions src/main/askai/core/support/chat_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,19 @@
Copyright (c) 2024, HomeSetup
"""
from askai.core.component.cache_service import cache
from askai.exception.exceptions import TokenLengthExceeded

import os
import re
from collections import defaultdict, deque, namedtuple
from functools import partial, reduce
from typing import Any, AnyStr, Literal, Optional, TypeAlias, get_args

from hspylib.core.preconditions import check_argument
from langchain_community.chat_message_histories.in_memory import ChatMessageHistory
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from typing import Any, AnyStr, Literal, Optional, TypeAlias

import os
import re
from askai.core.component.cache_service import cache
from askai.exception.exceptions import TokenLengthExceeded

ChatRoles: TypeAlias = Literal["system", "human", "assistant"]

Expand Down Expand Up @@ -90,9 +93,10 @@ def push(self, key: str, content: Any, role: ChatRoles = "human") -> ContextRaw:
:param role: The role associated with the message (default is "human").
:return: The updated chat context.
"""
check_argument(role in get_args(ChatRoles), f"Invalid ChatRole: '{role}'")
if (token_length := (self.length(key)) + len(content)) > self._token_limit:
raise TokenLengthExceeded(f"Required token length={token_length} limit={self._token_limit}")
if (entry := ContextEntry(role, content)) not in (ctx := self._store[key]):
if (entry := ContextEntry(role, content.strip())) not in (ctx := self._store[key]):
ctx.append(entry)

return self.get(key)
Expand Down
18 changes: 2 additions & 16 deletions src/main/askai/resources/prompts/refine-response.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,33 +19,19 @@ Act as a text editor and formatter. Refine the AI response to ensure they are cl
- Highlight these details using appropriate Markdown formatting (e.g., `code` for file paths and names).
- The user's name is "{user}". Address him by his name in responses.

4. **Use Provided Tips:**
- If available, integrate the provided tips to enhance the final user response:

---
{improvements}
---

5. **Leave it Untouched**:
4. **Leave it Untouched**:
- If no improvements are possible, return the response as is without any extraneous explanation or comments.

6. **Watermark**:
- Add your watermark at the end of the response (do not include the triple quotes):

"""
---
> Improved by the Refiner Model
"""
{improvements}


Human Question: "{question}"


AI Response:

"""
{context}
"""


Begin refining the response!
47 changes: 0 additions & 47 deletions src/main/requirements.txt

This file was deleted.

0 comments on commit 4dabc61

Please sign in to comment.