Skip to content

Commit

Permalink
New proxy structure - 3
Browse files Browse the repository at this point in the history
  • Loading branch information
yorevs committed Mar 26, 2024
1 parent c7f2719 commit 8657ccd
Show file tree
Hide file tree
Showing 15 changed files with 159 additions and 59 deletions.
2 changes: 1 addition & 1 deletion gradle
Submodule gradle updated 1 files
+5 −5 dependencies.gradle
16 changes: 12 additions & 4 deletions src/main/askai/core/askai.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
from askai.core.component.recorder import recorder
from askai.core.engine.ai_engine import AIEngine
from askai.core.model.chat_context import ChatContext
from askai.core.model.query_response import QueryResponse
from askai.core.model.processor_response import ProcessorResponse
from askai.core.processor.processor_factory import ProcessorFactory
from askai.core.processor.processor_proxy import proxy
from askai.core.support.object_mapper import object_mapper
Expand Down Expand Up @@ -208,7 +208,7 @@ def _ask_and_reply(self, question: str) -> bool:
status = True
return status

def _process_response(self, proxy_response: QueryResponse) -> bool:
def _process_response(self, proxy_response: ProcessorResponse) -> bool:
"""Process a query response using a processor that supports the query type.
:param proxy_response: The processor proxy response.
"""
Expand All @@ -220,6 +220,14 @@ def _process_response(self, proxy_response: QueryResponse) -> bool:
elif proxy_response.terminating:
log.info("User wants to terminate the conversation.")
return False
elif proxy_response.require_internet:
log.info("Internet is required to fulfill the request.")
processor = ProcessorFactory.get_by_name('InternetProcessor')
processor.bind(ProcessorFactory.get_by_name('GenericProcessor'))
elif proxy_response.require_summarization:
log.info("Summarization is required to fulfill the request.")
processor = ProcessorFactory.get_by_name('SummaryProcessor')
processor.bind(ProcessorFactory.get_by_name('GenericProcessor'))
# Query processors
if processor or (query_type := proxy_response.query_type or 'General'):
if not processor and not (processor := ProcessorFactory.find_processor(query_type)):
Expand All @@ -229,8 +237,8 @@ def _process_response(self, proxy_response: QueryResponse) -> bool:
log.info("%s::Processing response for '%s'", processor, proxy_response.question)
status, output = processor.process(proxy_response)
if status and output and processor.next_in_chain():
mapped_response = object_mapper.of_json(output, QueryResponse)
if isinstance(mapped_response, QueryResponse):
mapped_response = object_mapper.of_json(output, ProcessorResponse)
if isinstance(mapped_response, ProcessorResponse):
self._process_response(mapped_response)
else:
self.reply(str(mapped_response))
Expand Down
12 changes: 6 additions & 6 deletions src/main/askai/core/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@
"""Package initialization."""

__all__ = [
'ai_model',
'ai_reply',
'chat_context',
'query_response',
'search_result',
'summary_result',
'ai_model',
'ai_reply',
'chat_context',
'processor_response.py',
'search_result',
'summary_result',
'terminal_command'
]
__version__ = '1.0.1'
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""
@project: HsPyLib-AskAI
@package: askai.core.model
@file: query_response.py
@file: processor_response.py
@created: Fri, 23 Feb 2024
@author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior"
@site: https://github.com/yorevs/hspylib
Expand All @@ -18,14 +18,16 @@


@dataclass
class QueryResponse:
"""Keep track of the first-query responses."""
class ProcessorResponse:
"""Keep track of the processor responses."""

query_type: str = ""
question: str = ""
response: str = ""
terminating: bool = False
intelligible: bool = False
require_internet: bool = False
require_summarization: bool = False
commands: List[TerminalCommand] = field(default_factory=list)

def __str__(self):
Expand Down
17 changes: 17 additions & 0 deletions src/main/askai/core/model/query_types.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from hspylib.core.enums.enumeration import Enumeration


class QueryTypes(Enumeration):
"""TODO"""

ANALYSIS_QUERY = 'AnalysisQuery'

COMMAND_QUERY = 'CommandQuery'

GENERIC_QUERY = 'GenericQuery'

INTERNET_QUERY = 'InternetQuery'

OUTPUT_QUERY = 'OutputQuery'

SUMMARY_QUERY = 'SummaryQuery'
11 changes: 8 additions & 3 deletions src/main/askai/core/processor/instances/analysis_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,23 @@
from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.model.processor_response import ProcessorResponse
from askai.core.model.query_types import QueryTypes
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.shared_instances import shared


class AnalysisProcessor:
"""Process analysis prompts."""

@staticmethod
def q_type() -> str:
return QueryTypes.ANALYSIS_QUERY.value

def __init__(self):
self._template_file: str = "analysis-prompt"
self._next_in_chain: AIProcessor | None = None
self._supports: List[str] = ["Data analysis", "Informational"]
self._supports: List[str] = [self.q_type()]

def supports(self, query_type: str) -> bool:
return query_type in self._supports
Expand All @@ -48,7 +53,7 @@ def bind(self, next_in_chain: AIProcessor):
def template(self) -> str:
return prompt.read_prompt(self._template_file)

def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
template = PromptTemplate(input_variables=[], template=self.template())
final_prompt: str = msg.translate(template.format())
Expand Down
25 changes: 17 additions & 8 deletions src/main/askai/core/processor/instances/command_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,10 @@
from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.model.processor_response import ProcessorResponse
from askai.core.model.query_types import QueryTypes
from askai.core.model.terminal_command import TerminalCommand
from askai.core.processor.instances.output_processor import OutputProcessor
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.shared_instances import shared
from askai.core.support.utilities import extract_command, extract_path

Expand All @@ -39,33 +40,41 @@ class CommandProcessor:
"""Process command request prompts."""

@staticmethod
def _wrap_output(query_response: QueryResponse, cmd_line: str, cmd_out: str) -> str:
def _wrap_output(query_response: ProcessorResponse, cmd_line: str, cmd_out: str) -> str:
"""Wrap the output into a new string to be forwarded to the next processor.
:param query_response: The query response provided by the AI.
:param cmd_line: The command line that was executed by this processor.
"""
query_response.query_type = 'Command Output'
query_response.query_type = QueryTypes.OUTPUT_QUERY.value
query_response.require_summarization = False
query_response.require_internet = False
query_response.commands.append(TerminalCommand(cmd_line, cmd_out, prompt.os_type, prompt.shell))

return str(query_response)

@staticmethod
def q_type() -> str:
return QueryTypes.COMMAND_QUERY.value

def __init__(self):
self._template_file: str = "command-prompt"
self._next_in_chain: str = OutputProcessor.__name__
self._supports: List[str] = ["Command execution"]
self._next_in_chain: AIProcessor | None = None
self._supports: List[str] = [self.q_type()]

def supports(self, query_type: str) -> bool:
return query_type in self._supports

def next_in_chain(self) -> Optional[str]:
return self._next_in_chain

def bind(self, next_in_chain: AIProcessor):
pass

@lru_cache
def template(self) -> str:
return prompt.read_prompt(self._template_file)

def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
template = PromptTemplate(input_variables=["os_type", "shell"], template=self.template())
final_prompt: str = template.format(os_type=prompt.os_type, shell=prompt.shell)
Expand All @@ -89,7 +98,7 @@ def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:

return status, output

def _process_command(self, query_response: QueryResponse, cmd_line: str) -> Tuple[bool, Optional[str]]:
def _process_command(self, query_response: ProcessorResponse, cmd_line: str) -> Tuple[bool, Optional[str]]:
"""Process a terminal command.
:param query_response: The response for the query asked by the user.
:param cmd_line: The command line to execute.
Expand Down
14 changes: 11 additions & 3 deletions src/main/askai/core/processor/instances/generic_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,30 +23,38 @@
from askai.core.component.cache_service import cache
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.model.processor_response import ProcessorResponse
from askai.core.model.query_types import QueryTypes
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.shared_instances import shared


class GenericProcessor:
"""Process generic prompts."""

@staticmethod
def q_type() -> str:
return QueryTypes.GENERIC_QUERY.value

def __init__(self):
self._template_file: str = "generic-prompt"
self._next_in_chain: AIProcessor | None = None
self._supports: List[str] = ['AI Database', 'General']
self._supports: List[str] = [self.q_type()]

def supports(self, query_type: str) -> bool:
return query_type in self._supports

def next_in_chain(self) -> Optional[str]:
return self._next_in_chain

def bind(self, next_in_chain: AIProcessor):
self._next_in_chain = next_in_chain

@lru_cache
def template(self) -> str:
return prompt.read_prompt(self._template_file)

def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
template = PromptTemplate(input_variables=["user"], template=self.template())
final_prompt: str = msg.translate(template.format(user=prompt.user))
Expand Down
14 changes: 11 additions & 3 deletions src/main/askai/core/processor/instances/internet_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@
from askai.core.component.internet_service import internet
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.model.processor_response import ProcessorResponse
from askai.core.model.query_types import QueryTypes
from askai.core.model.search_result import SearchResult
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.object_mapper import object_mapper
Expand All @@ -37,22 +38,29 @@ class InternetProcessor:

DATE_FMT: str = "%a %d %b %-H:%M %Y" # E.g:. Fri 22 Mar 19:47 2024

@staticmethod
def q_type() -> str:
return QueryTypes.INTERNET_QUERY.value

def __init__(self):
self._template_file: str = "internet-prompt"
self._next_in_chain: AIProcessor | None = None
self._supports: List[str] = ['Internet research']
self._supports: List[str] = [self.q_type()]

def supports(self, query_type: str) -> bool:
return query_type in self._supports

def next_in_chain(self) -> Optional[str]:
return self._next_in_chain

def bind(self, next_in_chain: AIProcessor):
self._next_in_chain = next_in_chain

@lru_cache
def template(self) -> str:
return prompt.read_prompt(self._template_file)

def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
template = PromptTemplate(input_variables=["cur_date"], template=self.template())
final_prompt: str = msg.translate(template.format(cur_date=now(self.DATE_FMT)))
Expand Down
14 changes: 11 additions & 3 deletions src/main/askai/core/processor/instances/output_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,30 +22,38 @@
from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.model.processor_response import ProcessorResponse
from askai.core.model.query_types import QueryTypes
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.shared_instances import shared


class OutputProcessor(AIProcessor):
"""Process command output prompts."""

@staticmethod
def q_type() -> str:
return QueryTypes.OUTPUT_QUERY.value

def __init__(self):
self._template_file: str = "output-prompt"
self._next_in_chain: AIProcessor | None = None
self._supports: List[str] = ['Command Output']
self._supports: List[str] = [self.q_type()]

def supports(self, query_type: str) -> bool:
return query_type in self._supports

def next_in_chain(self) -> Optional[str]:
return self._next_in_chain

def bind(self, next_in_chain: AIProcessor):
self._next_in_chain = next_in_chain

@lru_cache
def template(self) -> str:
return prompt.read_prompt(self._template_file)

def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
commands = "; ".join([c.cmd_line for c in query_response.commands])
template = PromptTemplate(input_variables=["command_line", "shell"], template=self.template())
Expand Down
14 changes: 11 additions & 3 deletions src/main/askai/core/processor/instances/summary_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@
from askai.core.component.summarizer import summarizer
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.model.processor_response import ProcessorResponse
from askai.core.model.query_types import QueryTypes
from askai.core.model.summary_result import SummaryResult
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.object_mapper import object_mapper
Expand All @@ -47,22 +48,29 @@ def _ask_and_reply(question: str) -> Optional[str]:
output = os.linesep.join([r.answer for r in results]).strip()
return output

@staticmethod
def q_type() -> str:
return QueryTypes.SUMMARY_QUERY.value

def __init__(self):
self._template_file: str = "summary-prompt"
self._next_in_chain: AIProcessor | None = None
self._supports: List[str] = ['Summarization']
self._supports: List[str] = [self.q_type()]

def supports(self, query_type: str) -> bool:
return query_type in self._supports

def next_in_chain(self) -> Optional[str]:
return self._next_in_chain

def bind(self, next_in_chain: AIProcessor):
self._next_in_chain = next_in_chain

@lru_cache
def template(self) -> str:
return prompt.read_prompt(self._template_file)

def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
template = PromptTemplate(input_variables=["os_type"], template=self.template())
final_prompt: str = msg.translate(template.format(os_type=prompt.os_type))
Expand Down
Loading

0 comments on commit 8657ccd

Please sign in to comment.