Skip to content

Commit

Permalink
Add chat processor
Browse files Browse the repository at this point in the history
  • Loading branch information
yorevs committed Sep 27, 2024
1 parent 43710f7 commit e868085
Show file tree
Hide file tree
Showing 7 changed files with 110 additions and 35 deletions.
2 changes: 1 addition & 1 deletion src/main/askai/core/askai.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def __init__(
configs.model = model_name

self._session_id = now("%Y%m%d")[:8]
self._engine: AIEngine = shared.create_engine(engine_name, model_name, RouterMode.RAG)
self._engine: AIEngine = shared.create_engine(engine_name, model_name, RouterMode.CHAT)
self._context: ChatContext = shared.create_context(self._engine.ai_token_limit())
self._mode: RouterMode = shared.mode
self._console_path = Path(f"{CACHE_DIR}/askai-{self.session_id}.md")
Expand Down
9 changes: 2 additions & 7 deletions src/main/askai/core/enums/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,5 @@
# Package: main.askai.core.enums
"""Package initialization."""

__all__ = [
'acc_response',
'router_mode',
'routing_model',
'verbosity'
]
__version__ = '1.0.13'
__all__ = ["acc_response", "router_mode", "response_model.py", "verbosity"]
__version__ = "1.0.13"
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"""
@project: HsPyLib-AskAI
@package: askai.core.enums.routing_model
@file: routing_model.py
@file: response_model.py
@created: Tue, 11 Jun 2024
@author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior
@site: https://github.com/yorevs/askai
Expand All @@ -19,7 +19,7 @@
import os


class RoutingModel(Enumeration):
class ResponseModel(Enumeration):
"""Enumeration representing the model used to provide the final answer to the user.
This class defines the different models that can be used in the routing process to generate and deliver the
final response.
Expand Down Expand Up @@ -93,11 +93,11 @@ class RoutingModel(Enumeration):
# fmt: on

@classmethod
def of_model(cls, model_id: str) -> "RoutingModel":
"""Return the RoutingModel instance that matches the given model ID.
def of_model(cls, model_id: str) -> "ResponseModel":
"""Return the ResponseModel instance that matches the given model ID.
:param model_id: The ID of the model to retrieve.
:return: The RoutingModel instance corresponding to the specified model ID.
:raises ValueError: If no matching RoutingModel is found.
:return: The ResponseModel instance corresponding to the specified model ID.
:raises ValueError: If no matching ResponseModel is found.
"""
for v in cls.values():
if v[0] == model_id:
Expand Down
23 changes: 13 additions & 10 deletions src/main/askai/core/enums/router_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from askai.core.features.processors.qstring import qstring
from askai.core.features.processors.rag import rag
from askai.core.features.processors.task_splitter import splitter
from askai.core.features.processors.chat import chat
from hspylib.core.enums.enumeration import Enumeration
from typing import Optional

Expand All @@ -27,17 +28,19 @@ class RouterMode(Enumeration):
defines the different modes that the router can operate in, each affecting how answers are generated and delivered.
"""

# fmt: on
# fmt: off

TASK_SPLIT = "Task Splitter", "", splitter
TASK_SPLIT = "Task Splitter", "", splitter

QNA = "Questions and Answers", "", qna
QNA = "Questions & Answers", "", qna

QSTRING = "Non-Interactive", "", qstring
QSTRING = "Non-Interactive", "", qstring

RAG = "Retrieval-Augmented Generation", "ﮐ", rag
RAG = "Retrieval-Augmented-Generation", "ﮐ", rag

# fmt: off
CHAT = "Taius Chat", "", chat

# fmt: on

@classmethod
def modes(cls) -> list[str]:
Expand All @@ -47,25 +50,25 @@ def modes(cls) -> list[str]:
return RouterMode.names()

@staticmethod
def default() -> 'RouterMode':
def default() -> "RouterMode":
"""Return the default routing mode.
:return: The default RouterMode instance.
"""
return RouterMode.TASK_SPLIT if configs.is_interactive else RouterMode.QSTRING

@classmethod
def of_name(cls, name: str) -> 'RouterMode':
def of_name(cls, name: str) -> "RouterMode":
"""Retrieve the RouterMode instance corresponding to the given name.
:param name: The name of the router mode to retrieve.
:return: The RouterMode instance that matches the given name.
:raises ValueError: If no matching RouterMode is found.
"""
return cls[name] if name.casefold() != 'default' else cls.default()
return cls[name] if name.casefold() != "default" else cls.default()

def __str__(self):
return f"{self.icon} {self.name}"

def __eq__(self, other: 'RouterMode') -> bool:
def __eq__(self, other: "RouterMode") -> bool:
return self.name == other.name

@property
Expand Down
77 changes: 77 additions & 0 deletions src/main/askai/core/features/processors/chat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
@project: taius-coder
@package: taius-coder.main.taius_coder.core
@file: chat_processor.py
@created: Mon, 23 Sep 2024
@author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior"
@site: https://github.com/yorevs/taius-coder
@license: MIT - Please refer to <https://opensource.org/licenses/MIT>
Copyright 2024, HSPyLib team
"""
from typing import Optional, Any

from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperature import Temperature
from askai.core.support.langchain_support import lc_llm
from askai.core.support.shared_instances import shared
from hspylib.core.config.path_object import PathObject
from hspylib.core.metaclass.singleton import Singleton
from hspylib.core.tools.dict_tools import get_or_default_by_key
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate
from langchain_core.runnables import RunnableWithMessageHistory


class ChatProcessor(metaclass=Singleton):
"""TODO"""

INSTANCE: "ChatProcessor"

def template(self, prompt_str: str, *inputs: str, **kwargs) -> ChatPromptTemplate:
"""Retrieve the processor Template."""

template = PromptTemplate(input_variables=list(inputs), template=prompt_str)

# fmt: off
return ChatPromptTemplate.from_messages([
("system", template.format(**kwargs)),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
])
# fmt: on

def process(self, question: str, **kwargs) -> Optional[str]:
"""Process the user question to retrieve the final response.
:param question: The user question to process.
:return: The final response after processing the question.
"""

response = None
prompt_file: str = get_or_default_by_key(kwargs, "prompt_file", None)
history_ctx: Any | None = get_or_default_by_key(kwargs, "history_ctx", "HISTORY")
ctx: str = get_or_default_by_key(kwargs, "context", "")
inputs: list[str] = get_or_default_by_key(kwargs, "inputs", [])
args: dict[str, Any] = get_or_default_by_key(kwargs, "args", {})
inputs = inputs or ["user", "idiom", "context", "question"]
args = args or {"user": prompt.user.title(), "idiom": shared.idiom, "context": ctx, "question": question}
prompt_file: PathObject = PathObject.of(prompt_file or prompt.append_path(f"taius/taius-jarvis"))
prompt_str: str = prompt.read_prompt(prompt_file.filename, prompt_file.abs_dir)

template = self.template(prompt_str, *inputs, **args)
runnable = template | lc_llm.create_chat_model(Temperature.COLDEST.temp)
runnable = RunnableWithMessageHistory(
runnable, shared.context.flat, input_messages_key="input", history_messages_key="chat_history"
)

if output := runnable.invoke({"input": question}, config={"configurable": {"session_id": history_ctx or ""}}):
response = output.content
shared.context.push(history_ctx, question)
shared.context.push(history_ctx, response, "assistant")

return response


assert (chat := ChatProcessor().INSTANCE) is not None
18 changes: 9 additions & 9 deletions src/main/askai/core/features/processors/task_splitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from askai.core.component.geo_location import geo_location
from askai.core.engine.openai.temperature import Temperature
from askai.core.enums.acc_color import AccColor
from askai.core.enums.routing_model import RoutingModel
from askai.core.enums.response_model import ResponseModel
from askai.core.features.router.agent_tools import features
from askai.core.features.router.evaluation import assert_accuracy
from askai.core.features.router.task_agent import agent
Expand Down Expand Up @@ -80,23 +80,23 @@ def wrap_answer(
:return: A formatted string containing the final answer.
"""
output: str = answer
args = {"user": shared.username, "idiom": shared.idiom, "context": answer, "question": query}
args = {"user": prompt.user.title(), "idiom": shared.idiom, "context": answer, "question": query}
prompt_args: list[str] = [k for k in args.keys()]
model: RoutingModel = (
RoutingModel.REFINER
model: ResponseModel = (
ResponseModel.REFINER
if acc_response and (acc_response.acc_color > AccColor.GOOD)
else RoutingModel.of_model(model_result.mid)
else ResponseModel.of_model(model_result.mid)
)
events.reply.emit(reply=AIReply.full(msg.model_select(model)))

match model, configs.is_speak:
case RoutingModel.TERMINAL_COMMAND, True:
case ResponseModel.TERMINAL_COMMAND, True:
output = final_answer("taius-stt", prompt_args, **args)
case RoutingModel.ASSISTIVE_TECH_HELPER, _:
case ResponseModel.ASSISTIVE_TECH_HELPER, _:
output = final_answer("taius-stt", prompt_args, **args)
case RoutingModel.CHAT_MASTER, _:
case ResponseModel.CHAT_MASTER, _:
output = final_answer("taius-jarvis", prompt_args, **args)
case RoutingModel.REFINER, _:
case ResponseModel.REFINER, _:
if acc_response and acc_response.reasoning:
ctx: str = str(shared.context.flat("HISTORY"))
args = {"improvements": acc_response.details, "context": ctx, "response": answer, "question": query}
Expand Down
4 changes: 2 additions & 2 deletions src/main/askai/core/features/router/model_selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from askai.core.askai_prompt import prompt
from askai.core.component.geo_location import geo_location
from askai.core.engine.openai.temperature import Temperature
from askai.core.enums.routing_model import RoutingModel
from askai.core.enums.response_model import ResponseModel
from askai.core.model.model_result import ModelResult
from askai.core.support.langchain_support import lc_llm
from hspylib.core.metaclass.singleton import Singleton
Expand Down Expand Up @@ -45,7 +45,7 @@ def select_model(self, question: str) -> ModelResult:
:return: An instance of ModelResult representing the selected model.
"""
final_prompt: str = self.model_template.format(
datetime=geo_location.datetime, models=RoutingModel.enlist(), question=question
datetime=geo_location.datetime, models=ResponseModel.enlist(), question=question
)
llm: BaseChatModel = lc_llm.create_chat_model(Temperature.DATA_ANALYSIS.temp)
if response := llm.invoke(final_prompt):
Expand Down

0 comments on commit e868085

Please sign in to comment.