diff --git a/gradle b/gradle
index ad74e974..6e42086e 160000
--- a/gradle
+++ b/gradle
@@ -1 +1 @@
-Subproject commit ad74e9744f763b0cc07bf81956857a172b456cb4
+Subproject commit 6e42086e7aed4a39743362bf3a6c623b1af90409
diff --git a/src/main/askai/core/askai.py b/src/main/askai/core/askai.py
index eb716465..c7b09d05 100644
--- a/src/main/askai/core/askai.py
+++ b/src/main/askai/core/askai.py
@@ -37,7 +37,7 @@
from askai.core.component.recorder import recorder
from askai.core.engine.ai_engine import AIEngine
from askai.core.model.chat_context import ChatContext
-from askai.core.model.query_response import QueryResponse
+from askai.core.model.processor_response import ProcessorResponse
from askai.core.processor.processor_factory import ProcessorFactory
from askai.core.processor.processor_proxy import proxy
from askai.core.support.object_mapper import object_mapper
@@ -208,7 +208,7 @@ def _ask_and_reply(self, question: str) -> bool:
status = True
return status
- def _process_response(self, proxy_response: QueryResponse) -> bool:
+ def _process_response(self, proxy_response: ProcessorResponse) -> bool:
"""Process a query response using a processor that supports the query type.
:param proxy_response: The processor proxy response.
"""
@@ -220,6 +220,14 @@ def _process_response(self, proxy_response: QueryResponse) -> bool:
elif proxy_response.terminating:
log.info("User wants to terminate the conversation.")
return False
+ elif proxy_response.require_internet:
+ log.info("Internet is required to fulfill the request.")
+ processor = ProcessorFactory.get_by_name('InternetProcessor')
+ processor.bind(ProcessorFactory.get_by_name('GenericProcessor'))
+ elif proxy_response.require_summarization:
+ log.info("Summarization is required to fulfill the request.")
+ processor = ProcessorFactory.get_by_name('SummaryProcessor')
+ processor.bind(ProcessorFactory.get_by_name('GenericProcessor'))
# Query processors
if processor or (query_type := proxy_response.query_type or 'General'):
if not processor and not (processor := ProcessorFactory.find_processor(query_type)):
@@ -229,8 +237,8 @@ def _process_response(self, proxy_response: QueryResponse) -> bool:
log.info("%s::Processing response for '%s'", processor, proxy_response.question)
status, output = processor.process(proxy_response)
if status and output and processor.next_in_chain():
- mapped_response = object_mapper.of_json(output, QueryResponse)
- if isinstance(mapped_response, QueryResponse):
+ mapped_response = object_mapper.of_json(output, ProcessorResponse)
+ if isinstance(mapped_response, ProcessorResponse):
self._process_response(mapped_response)
else:
self.reply(str(mapped_response))
diff --git a/src/main/askai/core/model/__init__.py b/src/main/askai/core/model/__init__.py
index d8f07ff9..df6e34a5 100644
--- a/src/main/askai/core/model/__init__.py
+++ b/src/main/askai/core/model/__init__.py
@@ -6,12 +6,12 @@
"""Package initialization."""
__all__ = [
- 'ai_model',
- 'ai_reply',
- 'chat_context',
- 'query_response',
- 'search_result',
- 'summary_result',
+ 'ai_model',
+ 'ai_reply',
+ 'chat_context',
+ 'processor_response.py',
+ 'search_result',
+ 'summary_result',
'terminal_command'
]
__version__ = '1.0.1'
diff --git a/src/main/askai/core/model/query_response.py b/src/main/askai/core/model/processor_response.py
similarity index 80%
rename from src/main/askai/core/model/query_response.py
rename to src/main/askai/core/model/processor_response.py
index 3f3586c4..03c59afe 100644
--- a/src/main/askai/core/model/query_response.py
+++ b/src/main/askai/core/model/processor_response.py
@@ -1,7 +1,7 @@
"""
@project: HsPyLib-AskAI
@package: askai.core.model
- @file: query_response.py
+ @file: processor_response.py
@created: Fri, 23 Feb 2024
@author: Hugo Saporetti Junior"
@site: https://github.com/yorevs/hspylib
@@ -18,14 +18,16 @@
@dataclass
-class QueryResponse:
- """Keep track of the first-query responses."""
+class ProcessorResponse:
+ """Keep track of the processor responses."""
query_type: str = ""
question: str = ""
response: str = ""
terminating: bool = False
intelligible: bool = False
+ require_internet: bool = False
+ require_summarization: bool = False
commands: List[TerminalCommand] = field(default_factory=list)
def __str__(self):
diff --git a/src/main/askai/core/model/query_types.py b/src/main/askai/core/model/query_types.py
new file mode 100644
index 00000000..ed17fe00
--- /dev/null
+++ b/src/main/askai/core/model/query_types.py
@@ -0,0 +1,17 @@
+from hspylib.core.enums.enumeration import Enumeration
+
+
+class QueryTypes(Enumeration):
+ """TODO"""
+
+ ANALYSIS_QUERY = 'AnalysisQuery'
+
+ COMMAND_QUERY = 'CommandQuery'
+
+ GENERIC_QUERY = 'GenericQuery'
+
+ INTERNET_QUERY = 'InternetQuery'
+
+ OUTPUT_QUERY = 'OutputQuery'
+
+ SUMMARY_QUERY = 'SummaryQuery'
diff --git a/src/main/askai/core/processor/instances/analysis_processor.py b/src/main/askai/core/processor/instances/analysis_processor.py
index 11c6e87b..e8aa25e5 100644
--- a/src/main/askai/core/processor/instances/analysis_processor.py
+++ b/src/main/askai/core/processor/instances/analysis_processor.py
@@ -22,7 +22,8 @@
from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
-from askai.core.model.query_response import QueryResponse
+from askai.core.model.processor_response import ProcessorResponse
+from askai.core.model.query_types import QueryTypes
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.shared_instances import shared
@@ -30,10 +31,14 @@
class AnalysisProcessor:
"""Process analysis prompts."""
+ @staticmethod
+ def q_type() -> str:
+ return QueryTypes.ANALYSIS_QUERY.value
+
def __init__(self):
self._template_file: str = "analysis-prompt"
self._next_in_chain: AIProcessor | None = None
- self._supports: List[str] = ["Data analysis", "Informational"]
+ self._supports: List[str] = [self.q_type()]
def supports(self, query_type: str) -> bool:
return query_type in self._supports
@@ -48,7 +53,7 @@ def bind(self, next_in_chain: AIProcessor):
def template(self) -> str:
return prompt.read_prompt(self._template_file)
- def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
+ def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
template = PromptTemplate(input_variables=[], template=self.template())
final_prompt: str = msg.translate(template.format())
diff --git a/src/main/askai/core/processor/instances/command_processor.py b/src/main/askai/core/processor/instances/command_processor.py
index df5762f1..54220444 100644
--- a/src/main/askai/core/processor/instances/command_processor.py
+++ b/src/main/askai/core/processor/instances/command_processor.py
@@ -28,9 +28,10 @@
from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
-from askai.core.model.query_response import QueryResponse
+from askai.core.model.processor_response import ProcessorResponse
+from askai.core.model.query_types import QueryTypes
from askai.core.model.terminal_command import TerminalCommand
-from askai.core.processor.instances.output_processor import OutputProcessor
+from askai.core.processor.processor_base import AIProcessor
from askai.core.support.shared_instances import shared
from askai.core.support.utilities import extract_command, extract_path
@@ -39,21 +40,26 @@ class CommandProcessor:
"""Process command request prompts."""
@staticmethod
- def _wrap_output(query_response: QueryResponse, cmd_line: str, cmd_out: str) -> str:
+ def _wrap_output(query_response: ProcessorResponse, cmd_line: str, cmd_out: str) -> str:
"""Wrap the output into a new string to be forwarded to the next processor.
:param query_response: The query response provided by the AI.
:param cmd_line: The command line that was executed by this processor.
"""
- query_response.query_type = 'Command Output'
+ query_response.query_type = QueryTypes.OUTPUT_QUERY.value
query_response.require_summarization = False
query_response.require_internet = False
query_response.commands.append(TerminalCommand(cmd_line, cmd_out, prompt.os_type, prompt.shell))
+
return str(query_response)
+ @staticmethod
+ def q_type() -> str:
+ return QueryTypes.COMMAND_QUERY.value
+
def __init__(self):
self._template_file: str = "command-prompt"
- self._next_in_chain: str = OutputProcessor.__name__
- self._supports: List[str] = ["Command execution"]
+ self._next_in_chain: AIProcessor | None = None
+ self._supports: List[str] = [self.q_type()]
def supports(self, query_type: str) -> bool:
return query_type in self._supports
@@ -61,11 +67,14 @@ def supports(self, query_type: str) -> bool:
def next_in_chain(self) -> Optional[str]:
return self._next_in_chain
+ def bind(self, next_in_chain: AIProcessor):
+ pass
+
@lru_cache
def template(self) -> str:
return prompt.read_prompt(self._template_file)
- def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
+ def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
template = PromptTemplate(input_variables=["os_type", "shell"], template=self.template())
final_prompt: str = template.format(os_type=prompt.os_type, shell=prompt.shell)
@@ -89,7 +98,7 @@ def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
return status, output
- def _process_command(self, query_response: QueryResponse, cmd_line: str) -> Tuple[bool, Optional[str]]:
+ def _process_command(self, query_response: ProcessorResponse, cmd_line: str) -> Tuple[bool, Optional[str]]:
"""Process a terminal command.
:param query_response: The response for the query asked by the user.
:param cmd_line: The command line to execute.
diff --git a/src/main/askai/core/processor/instances/generic_processor.py b/src/main/askai/core/processor/instances/generic_processor.py
index e788441d..9447bf9b 100644
--- a/src/main/askai/core/processor/instances/generic_processor.py
+++ b/src/main/askai/core/processor/instances/generic_processor.py
@@ -23,7 +23,8 @@
from askai.core.component.cache_service import cache
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
-from askai.core.model.query_response import QueryResponse
+from askai.core.model.processor_response import ProcessorResponse
+from askai.core.model.query_types import QueryTypes
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.shared_instances import shared
@@ -31,10 +32,14 @@
class GenericProcessor:
"""Process generic prompts."""
+ @staticmethod
+ def q_type() -> str:
+ return QueryTypes.GENERIC_QUERY.value
+
def __init__(self):
self._template_file: str = "generic-prompt"
self._next_in_chain: AIProcessor | None = None
- self._supports: List[str] = ['AI Database', 'General']
+ self._supports: List[str] = [self.q_type()]
def supports(self, query_type: str) -> bool:
return query_type in self._supports
@@ -42,11 +47,14 @@ def supports(self, query_type: str) -> bool:
def next_in_chain(self) -> Optional[str]:
return self._next_in_chain
+ def bind(self, next_in_chain: AIProcessor):
+ self._next_in_chain = next_in_chain
+
@lru_cache
def template(self) -> str:
return prompt.read_prompt(self._template_file)
- def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
+ def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
template = PromptTemplate(input_variables=["user"], template=self.template())
final_prompt: str = msg.translate(template.format(user=prompt.user))
diff --git a/src/main/askai/core/processor/instances/internet_processor.py b/src/main/askai/core/processor/instances/internet_processor.py
index 5a3f9027..3ee523ba 100644
--- a/src/main/askai/core/processor/instances/internet_processor.py
+++ b/src/main/askai/core/processor/instances/internet_processor.py
@@ -25,7 +25,8 @@
from askai.core.component.internet_service import internet
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
-from askai.core.model.query_response import QueryResponse
+from askai.core.model.processor_response import ProcessorResponse
+from askai.core.model.query_types import QueryTypes
from askai.core.model.search_result import SearchResult
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.object_mapper import object_mapper
@@ -37,10 +38,14 @@ class InternetProcessor:
DATE_FMT: str = "%a %d %b %-H:%M %Y" # E.g:. Fri 22 Mar 19:47 2024
+ @staticmethod
+ def q_type() -> str:
+ return QueryTypes.INTERNET_QUERY.value
+
def __init__(self):
self._template_file: str = "internet-prompt"
self._next_in_chain: AIProcessor | None = None
- self._supports: List[str] = ['Internet research']
+ self._supports: List[str] = [self.q_type()]
def supports(self, query_type: str) -> bool:
return query_type in self._supports
@@ -48,11 +53,14 @@ def supports(self, query_type: str) -> bool:
def next_in_chain(self) -> Optional[str]:
return self._next_in_chain
+ def bind(self, next_in_chain: AIProcessor):
+ self._next_in_chain = next_in_chain
+
@lru_cache
def template(self) -> str:
return prompt.read_prompt(self._template_file)
- def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
+ def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
template = PromptTemplate(input_variables=["cur_date"], template=self.template())
final_prompt: str = msg.translate(template.format(cur_date=now(self.DATE_FMT)))
diff --git a/src/main/askai/core/processor/instances/output_processor.py b/src/main/askai/core/processor/instances/output_processor.py
index ee37e9e8..6d396a08 100644
--- a/src/main/askai/core/processor/instances/output_processor.py
+++ b/src/main/askai/core/processor/instances/output_processor.py
@@ -22,7 +22,8 @@
from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
-from askai.core.model.query_response import QueryResponse
+from askai.core.model.processor_response import ProcessorResponse
+from askai.core.model.query_types import QueryTypes
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.shared_instances import shared
@@ -30,10 +31,14 @@
class OutputProcessor(AIProcessor):
"""Process command output prompts."""
+ @staticmethod
+ def q_type() -> str:
+ return QueryTypes.OUTPUT_QUERY.value
+
def __init__(self):
self._template_file: str = "output-prompt"
self._next_in_chain: AIProcessor | None = None
- self._supports: List[str] = ['Command Output']
+ self._supports: List[str] = [self.q_type()]
def supports(self, query_type: str) -> bool:
return query_type in self._supports
@@ -41,11 +46,14 @@ def supports(self, query_type: str) -> bool:
def next_in_chain(self) -> Optional[str]:
return self._next_in_chain
+ def bind(self, next_in_chain: AIProcessor):
+ self._next_in_chain = next_in_chain
+
@lru_cache
def template(self) -> str:
return prompt.read_prompt(self._template_file)
- def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
+ def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
commands = "; ".join([c.cmd_line for c in query_response.commands])
template = PromptTemplate(input_variables=["command_line", "shell"], template=self.template())
diff --git a/src/main/askai/core/processor/instances/summary_processor.py b/src/main/askai/core/processor/instances/summary_processor.py
index 8de6fb72..bc58f680 100644
--- a/src/main/askai/core/processor/instances/summary_processor.py
+++ b/src/main/askai/core/processor/instances/summary_processor.py
@@ -25,7 +25,8 @@
from askai.core.component.summarizer import summarizer
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
-from askai.core.model.query_response import QueryResponse
+from askai.core.model.processor_response import ProcessorResponse
+from askai.core.model.query_types import QueryTypes
from askai.core.model.summary_result import SummaryResult
from askai.core.processor.processor_base import AIProcessor
from askai.core.support.object_mapper import object_mapper
@@ -47,10 +48,14 @@ def _ask_and_reply(question: str) -> Optional[str]:
output = os.linesep.join([r.answer for r in results]).strip()
return output
+ @staticmethod
+ def q_type() -> str:
+ return QueryTypes.SUMMARY_QUERY.value
+
def __init__(self):
self._template_file: str = "summary-prompt"
self._next_in_chain: AIProcessor | None = None
- self._supports: List[str] = ['Summarization']
+ self._supports: List[str] = [self.q_type()]
def supports(self, query_type: str) -> bool:
return query_type in self._supports
@@ -58,11 +63,14 @@ def supports(self, query_type: str) -> bool:
def next_in_chain(self) -> Optional[str]:
return self._next_in_chain
+ def bind(self, next_in_chain: AIProcessor):
+ self._next_in_chain = next_in_chain
+
@lru_cache
def template(self) -> str:
return prompt.read_prompt(self._template_file)
- def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
+ def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
status = False
template = PromptTemplate(input_variables=["os_type"], template=self.template())
final_prompt: str = msg.translate(template.format(os_type=prompt.os_type))
diff --git a/src/main/askai/core/processor/processor_base.py b/src/main/askai/core/processor/processor_base.py
index 630c6eac..0fc3fb5f 100644
--- a/src/main/askai/core/processor/processor_base.py
+++ b/src/main/askai/core/processor/processor_base.py
@@ -14,7 +14,7 @@
"""
from typing import Optional, Tuple, Protocol
-from askai.core.model.query_response import QueryResponse
+from askai.core.model.processor_response import ProcessorResponse
class AIProcessor(Protocol):
@@ -34,7 +34,7 @@ def bind(self, next_in_chain: 'AIProcessor') -> None:
"""Bind a processor to be the next in chain."""
...
- def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
+ def process(self, query_response: ProcessorResponse) -> Tuple[bool, Optional[str]]:
"""Process the query response."""
...
diff --git a/src/main/askai/core/processor/processor_factory.py b/src/main/askai/core/processor/processor_factory.py
index 6caed3ff..ee94b77a 100644
--- a/src/main/askai/core/processor/processor_factory.py
+++ b/src/main/askai/core/processor/processor_factory.py
@@ -58,3 +58,34 @@ def get_by_name(cls, name: str) -> Optional[AIProcessor]:
return next(
(p for p in cls._PROCESSORS.values() if type(p).__name__ == name), None
)
+
+ @classmethod
+ @lru_cache
+ def analysis(cls) -> AIProcessor:
+ return cls._PROCESSORS['AnalysisProcessor']
+
+ @classmethod
+ @lru_cache
+ def command(cls) -> AIProcessor:
+ return cls._PROCESSORS['CommandProcessor']
+
+ @classmethod
+ @lru_cache
+ def generic(cls) -> AIProcessor:
+ return cls._PROCESSORS['GenericProcessor']
+
+ @classmethod
+ @lru_cache
+ def internet(cls) -> AIProcessor:
+ return cls._PROCESSORS['InternetProcessor']
+
+ @classmethod
+ @lru_cache
+ def output(cls) -> AIProcessor:
+ return cls._PROCESSORS['OutputProcessor']
+
+ @classmethod
+ @lru_cache
+ def summary(cls) -> AIProcessor:
+ return cls._PROCESSORS['SummaryProcessor']
+
diff --git a/src/main/askai/core/processor/processor_proxy.py b/src/main/askai/core/processor/processor_proxy.py
index bbb0836e..394b7e50 100644
--- a/src/main/askai/core/processor/processor_proxy.py
+++ b/src/main/askai/core/processor/processor_proxy.py
@@ -24,7 +24,7 @@
from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
-from askai.core.model.query_response import QueryResponse
+from askai.core.model.processor_response import ProcessorResponse
from askai.core.support.object_mapper import object_mapper
from askai.core.support.shared_instances import shared
@@ -41,7 +41,7 @@ def __init__(self):
def template(self) -> str:
return prompt.read_prompt("proxy-prompt")
- def process(self, question: str) -> Tuple[bool, QueryResponse]:
+ def process(self, question: str) -> Tuple[bool, ProcessorResponse]:
"""Return the setup prompt.
:param question: The question to the AI engine.
"""
@@ -50,19 +50,19 @@ def process(self, question: str) -> Tuple[bool, QueryResponse]:
final_prompt = msg.translate(template.format())
shared.context.set("SETUP", final_prompt)
shared.context.set("QUESTION", f"\n\nQuestion: {question}\n\nHelpful Answer:")
- context: ContextRaw = shared.context.join("SETUP", "CONTEXT", "QUESTION")
+ context: ContextRaw = shared.context.join("CONTEXT", "SETUP", "QUESTION")
log.info("Ask::[QUESTION] '%s' context=%s", question, context)
if (response := shared.engine.ask(context, *Temperatures.ZERO.value)) and response.is_success:
log.info("Ask::[PROXY] Received from AI: %s.", str(response))
- output = object_mapper.of_json(response.message, QueryResponse)
- if not isinstance(output, QueryResponse):
+ output = object_mapper.of_json(response.message, ProcessorResponse)
+ if not isinstance(output, ProcessorResponse):
log.error(msg.invalid_response(output))
output = response.message
else:
status = True
else:
- output = QueryResponse(question=question, terminating=True, response=response.message)
+ output = ProcessorResponse(question=question, terminating=True, response=response.message)
return status, output
diff --git a/src/main/askai/resources/assets/prompts/proxy-prompt.txt b/src/main/askai/resources/assets/prompts/proxy-prompt.txt
index 69b871ac..035feb74 100644
--- a/src/main/askai/resources/assets/prompts/proxy-prompt.txt
+++ b/src/main/askai/resources/assets/prompts/proxy-prompt.txt
@@ -1,11 +1,9 @@
As 'Taius', the AI query proxy. Your task is to analyze and categorize the types of queries presented to you. Discern the diverse query types and identify their specific processing requirements. You MUST return a "JSON string" containing the designated fields, no more than that. Queries must fall into one of the following categories:
-- "AI Database".
-- "Internet research".
-- "Summarization".
-- "Data analysis".
-- "Command execution".
-- "Informational".
+- "InternetQuery"
+- "SummarizationQuery"
+- "AnalysisQuery"
+- "CommandQuery"
Before responding to the user, you must follow the step-by-step instructions provided below in sequential order:
@@ -13,16 +11,14 @@ Before responding to the user, you must follow the step-by-step instructions pro
2. Determine if the query suggests the user intends to end the conversation.
-3. Determine if the prompt or chat history includes any "Command output". If it does, ensure that the response is consistently based on that content.
+3. Determine if the query requires internet access or a google search to complete your reply.
-4. Summarization prompts will always need a command, but you will select "Summarization" every time that happens.
+4. Determine if the query requires summarization of files and folders to complete your reply. This query will consistently commence with "summarize" or a synonymous term.
-5. If you don't know the answer, just say that you don't have an answer, don't try to make up an answer.
+5. If you don't have an answer so far, or, haven't decided yet, select the "AnalysisQuery".
-6. The final response is a formatted JSON with no additional description or context.
+5. The final response is a formatted JSON with no additional description or context.
-7. The final response 'JSON' must contain the boolean fields: 'intelligible', and 'terminating'.
+6. The final response 'JSON' must contain the boolean fields: 'intelligible', 'terminating', 'require_summarization', 'require_internet'.
-8. The final response 'JSON' must contain the string fields: 'query_type', and 'question'.
-
-Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
+7. The final response 'JSON' must contain the string fields: 'query_type', and 'question'.