Skip to content

Commit

Permalink
Added summarization QnA
Browse files Browse the repository at this point in the history
  • Loading branch information
Hugo Saporetti Junior committed Mar 21, 2024
1 parent 05cab6b commit 2ef436f
Show file tree
Hide file tree
Showing 10 changed files with 166 additions and 129 deletions.
4 changes: 4 additions & 0 deletions docs/devel/askai-questions.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
Questions:

1. summarize my markdown files at my HomeSetup docs folder.
2.
97 changes: 36 additions & 61 deletions src/main/askai/core/askai.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,23 +20,20 @@

import nltk
import pause
from clitt.core.term.cursor import Cursor
from clitt.core.term.screen import Screen
from clitt.core.term.terminal import Terminal
from clitt.core.tui.line_input.line_input import line_input
from clitt.core.term.cursor import cursor
from clitt.core.term.screen import screen
from clitt.core.term.terminal import terminal
from hspylib.core.enums.charset import Charset
from hspylib.core.tools.commons import sysout
from hspylib.modules.application.exit_status import ExitStatus
from hspylib.modules.cli.keyboard import Keyboard
from hspylib.modules.eventbus.event import Event

from askai.__classpath__ import classpath
from askai.core.askai_configs import configs
from askai.core.askai_events import ASKAI_BUS_NAME, AskAiEvents, REPLY_EVENT
from askai.core.askai_messages import msg
from askai.core.askai_prompt import prompt
from askai.core.component.audio_player import AudioPlayer
from askai.core.component.cache_service import cache
from askai.core.component.audio_player import player
from askai.core.component.cache_service import cache, CACHE_DIR
from askai.core.component.recorder import recorder
from askai.core.engine.ai_engine import AIEngine
from askai.core.model.chat_context import ChatContext
Expand Down Expand Up @@ -99,14 +96,6 @@ def engine(self) -> AIEngine:
def context(self) -> ChatContext:
return self._context

@property
def nickname(self) -> str:
return f" {self.engine.nickname()}"

@property
def username(self) -> str:
return f" {prompt.user.title()}"

@property
def cache_enabled(self) -> bool:
return configs.is_cache
Expand All @@ -128,7 +117,7 @@ def is_processing(self, processing: bool) -> None:
if processing:
self.reply(msg.wait())
elif not processing and self._processing is not None and processing != self._processing:
Terminal.INSTANCE.cursor.erase_line()
terminal.cursor.erase_line()
self._processing = processing

def run(self) -> None:
Expand All @@ -137,41 +126,44 @@ def run(self) -> None:
self._startup()
self._prompt()
elif self.query_string:
display_text(f"{self.username}: {self.query_string}%EOL%")
display_text(f"{shared.username}: {self.query_string}%EOL%")
self._ask_and_reply(self.query_string)

def reply(self, message: str) -> None:
"""Reply to the user with the AI response.
:param message: The message to reply to the user.
"""
if self.is_speak:
self.engine.text_to_speech(f"{self.nickname}: ", message)
self.engine.text_to_speech(f"{shared.nickname}: ", message)
else:
display_text(f"{self.nickname}: %GREEN%{message}%NC%")
display_text(f"{shared.nickname}: %GREEN%{message}%NC%")

def reply_error(self, message: str) -> None:
"""Reply API or system errors.
:param message: The error message to be displayed.
"""
log.error(message)
display_text(f"{self.nickname}: Error: {message or 'Aborted!'} %NC%")
if self.is_speak:
self.engine.text_to_speech(f"{shared.nickname}: ", message)
else:
display_text(f"{shared.nickname}: Error: {message or 'Aborted!'} %NC%")

def _cb_reply_event(self, ev: Event) -> None:
"""Callback to handle reply events."""
if ev.args.erase_last:
Cursor.INSTANCE.erase_line()
cursor.erase_line()
self.reply(ev.args.message)

def _splash(self) -> None:
"""Display the AskAI splash screen."""
splash_interval = 1000
while not self._ready:
if not self._processing:
Screen.INSTANCE.clear()
screen.clear()
sysout(f"%GREEN%{self.SPLASH}%NC%")
pause.milliseconds(splash_interval)
pause.milliseconds(splash_interval * 2)
Screen.INSTANCE.clear()
screen.clear()

def _startup(self) -> None:
"""Initialize the application."""
Expand All @@ -180,68 +172,50 @@ def _startup(self) -> None:
if configs.is_speak:
recorder.setup()
configs.is_speak = recorder.input_device is not None
if configs.is_speak:
AudioPlayer.INSTANCE.start_delay()
nltk.download("averaged_perceptron_tagger", quiet=True)
nltk.download("averaged_perceptron_tagger", quiet=True, download_dir=CACHE_DIR)
cache.set_cache_enable(self.cache_enabled)
cache.read_query_history()
askai_bus = AskAiEvents.get_bus(ASKAI_BUS_NAME)
askai_bus.subscribe(REPLY_EVENT, self._cb_reply_event)
if configs.is_speak:
player.start_delay()
self._ready = True
log.info("AskAI is ready !")
splash_thread.join()
display_text(self)
log.info("AskAI is ready to use!")
self.reply(msg.welcome(os.getenv("USER", "you")))

def _prompt(self) -> None:
"""Prompt for user interaction."""
while query := self._input(f"{self.username}: "):
while query := shared.input_text(f"{shared.username}: "):
if not self._ask_and_reply(query):
query = None
break
if not query:
self.reply(msg.goodbye())
display_text("")

def _input(self, __prompt: str) -> Optional[str]:
"""Prompt for user input.
:param __prompt: The prompt to display to the user.
"""
ret = None
while ret is None:
ret = line_input(__prompt)
if ret == Keyboard.VK_CTRL_L: # Use speech as input method.
Terminal.INSTANCE.cursor.erase_line()
spoken_text = self.engine.speech_to_text()
if spoken_text:
display_text(f"{self.username}: {spoken_text}")
ret = spoken_text
elif not isinstance(ret, str):
display_text(f"{self.username}: %YELLOW%Speech-To-Text is disabled!%NC%", erase_last=True)
ret = None

return ret if not ret or isinstance(ret, str) else ret.val

def _ask_and_reply(self, question: str) -> bool:
"""Ask the question and provide the reply.
:param question: The question to ask to the AI engine.
"""
if not (reply := cache.read_reply(question)):
log.debug('Response not found for "%s" in cache. Querying from %s.', question, self.engine.nickname())
status, response = proxy.process(question)
if status:
status = self._process_response(response)
else:
self.reply_error(response)
if status and response:
return self._process_response(response)
self.reply_error(response)
else:
log.debug('Reply found for "%s" in cache.', question)
log.debug("Reply found for '%s' in cache.", question)
self.reply(reply)
status = True
return status

def _process_response(self, proxy_response: QueryResponse) -> bool:
"""Process a query response using a processor that supports the query type."""
status, output, q_type, processor = False, None, None, None
"""Process a query response using a processor that supports the query type.
:param proxy_response: The processor proxy response.
"""
status, output, query_type, processor = False, None, None, None
# Intrinsic features
if not proxy_response.intelligible:
self.reply_error(msg.intelligible(proxy_response.question))
Expand All @@ -258,23 +232,24 @@ def _process_response(self, proxy_response: QueryResponse) -> bool:
processor = AIProcessor.get_by_name(SummaryProcessor.__name__)
processor.bind(AIProcessor.get_by_name(GenericProcessor.__name__))
# Query processors
if processor or (q_type := proxy_response.query_type):
if not processor and not (processor := AIProcessor.get_by_query_type(q_type)):
if processor or (query_type := proxy_response.query_type):
if not processor and not (processor := AIProcessor.get_by_query_type(query_type)):
log.error(f"Unable to find a proper processor: {str(proxy_response)}")
self.reply_error(msg.no_processor(q_type))
self.reply_error(msg.no_processor(query_type))
return False
log.info("%s::Processing response for '%s'", processor, proxy_response.question)
status, output = processor.process(proxy_response)
if status and processor.next_in_chain():
if status and output and processor.next_in_chain():
mapped_response = object_mapper.of_json(output, QueryResponse)
if isinstance(mapped_response, QueryResponse):
self._process_response(mapped_response)
else:
self.reply(str(mapped_response))
elif status:
self.reply(str(output))
if output:
self.reply(output)
else:
self.reply_error(str(output))
self.reply_error(output)
else:
self.reply_error(msg.invalid_response(proxy_response))

Expand Down
34 changes: 23 additions & 11 deletions src/main/askai/core/askai_messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,23 +27,23 @@ def welcome(self, username: str) -> str:

@lru_cache
def wait(self) -> str:
return self.translate(f"I'm thinking, please wait…")
return self.translate("I'm thinking, please wait…")

@lru_cache
def listening(self) -> str:
return self.translate(f"I'm listening…")
def welcome_back(self) -> str:
return self.translate("Is there anything else I can help you with?")

@lru_cache
def noise_levels(self) -> str:
return self.translate(f"Adjusting noise levels…")
def listening(self) -> str:
return self.translate("I'm listening…")

@lru_cache
def transcribing(self) -> str:
return self.translate(f"I'm processing your voice, please wait…")
return self.translate("I'm processing your voice, please wait…")

@lru_cache
def goodbye(self) -> str:
return self.translate(f"Goodbye, have a nice day ! ")
return self.translate("Goodbye, have a nice day ! ")

@lru_cache
def executing(self, cmd_line: str) -> str:
Expand All @@ -63,25 +63,37 @@ def analysis_output(self) -> str:

@lru_cache
def searching(self) -> str:
return self.translate(f"Researching on Google…")
return self.translate("Researching on Google…")

@lru_cache
def summarizing(self, path: str) -> str:
return self.translate(f"Summarizing documents from '{path}'. This can take a moment, please wait…")

@lru_cache
def enter_qna(self) -> str:
return self.translate("You entered the Summarization Questions and Answers")

@lru_cache
def leave_qna(self) -> str:
return self.translate("You left the Summarization Questions and Answers")

@lru_cache
def qna_welcome(self) -> str:
return self.translate("Question me about the summarized content")

# Warnings and alerts

@lru_cache
def cmd_no_output(self) -> str:
return self.translate(f"The command didn't return an output !")
return self.translate("The command didn't return an output !")

@lru_cache
def search_empty(self) -> str:
return self.translate(f"The google research didn't return an output !")
return self.translate("The google research didn't return an output !")

@lru_cache
def access_grant(self) -> str:
return self.translate(f"'AskAI' requires access to your files, folders and apps. Continue (yes/[no])?")
return self.translate("'AskAI' requires access to your files, folders and apps. Continue (yes/[no])?")

@lru_cache
def not_a_command(self, shell: str, content: str) -> str:
Expand Down
2 changes: 1 addition & 1 deletion src/main/askai/core/component/audio_player.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,4 +94,4 @@ def play_sfx(self, filename: str, file_ext: Literal[".mp3", ".wav", ".m4a"] = ".
return self.play_audio_file(filename)


assert AudioPlayer().INSTANCE is not None
assert (player := AudioPlayer().INSTANCE) is not None
12 changes: 8 additions & 4 deletions src/main/askai/core/component/recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,10 @@ def input_device(self) -> Optional[Tuple[int, str]]:
return self._input_device if self._input_device else None

def listen(
self, recognition_api: RecognitionApi = RecognitionApi.OPEN_AI, language: Language = Language.EN_US
) -> Tuple[Path, str]:
self,
recognition_api: RecognitionApi = RecognitionApi.OPEN_AI,
language: Language = Language.EN_US
) -> Tuple[Path, Optional[str]]:
"""Listen to the microphone, save the AudioData as a wav file and then, transcribe the speech.
:param recognition_api: the API to be used to recognize the speech.
:param language: the spoken language.
Expand Down Expand Up @@ -125,7 +127,7 @@ def _detect_noise(self, interval: float = 0.8) -> None:
"""
with Microphone() as source:
try:
log.debug(msg.noise_levels())
log.debug('Adjusting noise levels…')
self._rec.adjust_for_ambient_noise(source, duration=interval)
except UnknownValueError as err:
raise IntelligibleAudioError(f"Unable to detect noise => {str(err)}") from err
Expand Down Expand Up @@ -166,7 +168,9 @@ def _select_device(self) -> Optional[int]:
return None

def _test_device(self, idx: int) -> bool:
"""TODO"""
"""Test whether the input device specified by index can be used as an STT input.
:param idx: The index of the device to be tested.
"""
log.debug(f"Testing input device at index: %d", idx)
try:
with Microphone(device_index=idx) as source:
Expand Down
Loading

0 comments on commit 2ef436f

Please sign in to comment.