From 4ee5f2684be419d7e53e73d0b4678d2f37f9a3e7 Mon Sep 17 00:00:00 2001 From: Hugo Saporetti Junior Date: Thu, 31 Oct 2024 16:46:35 -0300 Subject: [PATCH] Add conversation starters, input history now, comes from file --- docs/devel/misc/askai-questions.txt | 19 --- src/main/askai/core/askai_messages.py | 8 +- src/main/askai/core/askai_settings.py | 3 + .../askai/core/component/cache_service.py | 59 +++++---- .../processors/splitter/splitter_actions.py | 8 +- .../processors/splitter/splitter_executor.py | 118 +++++++++--------- src/main/askai/core/router/tools/terminal.py | 2 +- src/main/askai/language/ai_translator.py | 4 +- .../language/translators/argos_translator.py | 2 +- .../language/translators/deepl_translator.py | 2 +- .../language/translators/marian_translator.py | 2 +- .../askai/resources/conversation-starters.txt | 27 ++++ .../resources/prompts/taius/taius-refiner.txt | 6 +- .../resources/prompts/taius/taius-tts.txt | 27 ++-- 14 files changed, 152 insertions(+), 135 deletions(-) delete mode 100644 docs/devel/misc/askai-questions.txt create mode 100644 src/main/askai/resources/conversation-starters.txt diff --git a/docs/devel/misc/askai-questions.txt b/docs/devel/misc/askai-questions.txt deleted file mode 100644 index 0bb306e9..00000000 --- a/docs/devel/misc/askai-questions.txt +++ /dev/null @@ -1,19 +0,0 @@ -Questions: - -- Summarize my markdown files at my HomeSetup docs folder. -- What are the current weather conditions in San Francisco, U.S today? -- When is the upcoming Los Angeles Lakers match? -- Who currently holds the office of President of the United States? -- List my downloads using stt. -- Who is Hugo Saporetti junior? -- List my music and let me know if there is any ac/dc song. If so, show me the file name and open it. -- Open the first reminder file you find at my downloads. -- Create a small Python program to allow me to calculate the speed given the time and distance, save it as 'dist.py'. -- List my downloads and let me know if there is any reminder. -- List my downloads and let me know if there is any image. -- Open the first reminder you find at my downloads and tell me what I must do first. -- Describe the first image you find at my downloads. -- find . -mxdepth 1 -type f -nme *.png -- I have downloaded a QR logo, open it to me. -- Tell me who you see using the webcam. Respond like talking to an audience. -- List my music and let me know if there is any gabiroba song. If so, show me the file name and open it. diff --git a/src/main/askai/core/askai_messages.py b/src/main/askai/core/askai_messages.py index 240d54e2..b2930bef 100644 --- a/src/main/askai/core/askai_messages.py +++ b/src/main/askai/core/askai_messages.py @@ -33,8 +33,8 @@ class AskAiMessages(metaclass=Singleton): @staticmethod @lru_cache - def get_translator(from_lang: Language, to_lang: Language) -> AITranslator: - return AskAiMessages.TRANSLATOR(from_lang, to_lang) + def get_translator(from_lang: Language) -> AITranslator: + return AskAiMessages.TRANSLATOR(from_lang, configs.language) def __init__(self): # fmt: off @@ -49,7 +49,7 @@ def accurate_responses(self) -> list[str]: @property def translator(self) -> AITranslator: - return AskAiMessages.get_translator(Language.EN_US, configs.language) + return AskAiMessages.get_translator(Language.EN_US) @lru_cache(maxsize=256) def translate(self, text: AnyStr) -> str: @@ -58,7 +58,7 @@ def translate(self, text: AnyStr) -> str: :return: The translated text. """ # Avoid translating debug messages. - if re.match(r"^~~\[DEBUG]~~.*", str(text), flags=re.IGNORECASE | re.MULTILINE): + if re.match(r"^~~\[DEBUG]~~", str(text), flags=re.IGNORECASE | re.DOTALL | re.MULTILINE): return text return self.translator.translate(str(text)) diff --git a/src/main/askai/core/askai_settings.py b/src/main/askai/core/askai_settings.py index 7c6b993d..437bc19a 100644 --- a/src/main/askai/core/askai_settings.py +++ b/src/main/askai/core/askai_settings.py @@ -37,6 +37,9 @@ # Make sure the AskAI directory is exported. os.environ["ASKAI_DIR"] = str(ASKAI_DIR) +# AskAi conversation starters +CONVERSATION_STARTERS: Path = Path(classpath.resource_path / "conversation-starters.txt") + class AskAiSettings(metaclass=Singleton): """The AskAI 'SetMan' Settings.""" diff --git a/src/main/askai/core/component/cache_service.py b/src/main/askai/core/component/cache_service.py index 8ab47e78..708e3c3c 100644 --- a/src/main/askai/core/component/cache_service.py +++ b/src/main/askai/core/component/cache_service.py @@ -12,18 +12,22 @@ Copyright (c) 2024, HomeSetup """ -from askai.core.askai_configs import configs -from askai.core.askai_settings import ASKAI_DIR -from clitt.core.tui.line_input.keyboard_input import KeyboardInput +import os +import re from collections import namedtuple +from pathlib import Path +from shutil import copyfile +from typing import Optional + +from clitt.core.tui.line_input.keyboard_input import KeyboardInput +from hspylib.core.enums.charset import Charset from hspylib.core.metaclass.singleton import Singleton from hspylib.core.tools.commons import file_is_not_empty -from hspylib.core.tools.text_tools import hash_text +from hspylib.core.tools.text_tools import hash_text, ensure_endswith from hspylib.modules.cache.ttl_cache import TTLCache -from pathlib import Path -from typing import Optional -import re +from askai.core.askai_configs import configs +from askai.core.askai_settings import ASKAI_DIR, CONVERSATION_STARTERS # AskAI cache root directory. CACHE_DIR: Path = Path(f"{ASKAI_DIR}/cache") @@ -73,6 +77,9 @@ if not PERSIST_DIR.exists(): PERSIST_DIR.mkdir(parents=True, exist_ok=True) +ASKAI_INPUT_HISTORY_FILE: Path = Path(CACHE_DIR / "askai-input-history.txt") +if not file_is_not_empty(str(ASKAI_INPUT_HISTORY_FILE)): + copyfile(str(CONVERSATION_STARTERS), str(ASKAI_INPUT_HISTORY_FILE)) CacheEntry = namedtuple("CacheEntry", ["key", "expires"]) @@ -87,21 +94,12 @@ class is designed to store and retrieve cached data efficiently, reducing the ne ASKAI_CACHE_KEYS: str = "askai-cache-keys" - ASKAI_INPUT_CACHE_KEY: str = "askai-input-history" - ASKAI_CONTEXT_KEY: str = "askai-context-key" _TTL_CACHE: TTLCache[str] = TTLCache(ttl_minutes=configs.ttl) - def __init__(self): - keys: str | None = self._TTL_CACHE.read(self.ASKAI_CACHE_KEYS) - self._cache_keys: set[str] = set(map(str.strip, keys.split(",") if keys else {})) - - @property - def keys(self) -> set[str]: - return self._cache_keys - - def audio_file_path(self, text: str, voice: str = "onyx", audio_format: str = "mp3") -> tuple[str, bool]: + @staticmethod + def audio_file_path(text: str, voice: str = "onyx", audio_format: str = "mp3") -> tuple[str, bool]: """Retrieve the hashed audio file path and determine whether the file already exists. :param text: The text that the audio represents. :param voice: The AI voice used for speech synthesis (default is "onyx"). @@ -112,6 +110,14 @@ def audio_file_path(self, text: str, voice: str = "onyx", audio_format: str = "m audio_file_path = f"{str(AUDIO_DIR)}/askai-{hash_text(key)}.{audio_format}" return audio_file_path, file_is_not_empty(audio_file_path) + def __init__(self): + keys: str | None = self._TTL_CACHE.read(self.ASKAI_CACHE_KEYS) + self._cache_keys: set[str] = set(map(str.strip, keys.split(",") if keys else {})) + + @property + def keys(self) -> set[str]: + return self._cache_keys + def save_reply(self, text: str, reply: str) -> Optional[str]: """Save an AI reply into the TTL (Time-To-Live) cache. :param text: The text to be cached. @@ -156,25 +162,26 @@ def clear_replies(self) -> list[str]: return list(map(self.del_reply, sorted(self.keys))) def read_input_history(self) -> list[str]: - """Retrieve line input queries from the TTL (Time-To-Live) cache. + """Retrieve line input queries from the history file. :return: A list of input queries stored in the cache. """ - hist_str: str = self._TTL_CACHE.read(self.ASKAI_INPUT_CACHE_KEY) - return hist_str.split(",") if hist_str else [] + return ASKAI_INPUT_HISTORY_FILE.read_text().split(os.linesep) - def save_input_history(self, history: list[str] = None) -> str: - """Save input queries into the TTL (Time-To-Live) cache. + def save_input_history(self, history: list[str] = None) -> None: + """Save input queries into the history file. :param history: A list of input queries to be saved. If None, the current input history will be saved. - :return: The temporary file name of the saved entry. """ - return self._TTL_CACHE.save(self.ASKAI_INPUT_CACHE_KEY, ",".join(history or KeyboardInput.history())) + if history := (history or KeyboardInput.history()): + with open(str(ASKAI_INPUT_HISTORY_FILE), 'w', encoding=Charset.UTF_8.val) as f_hist: + list(map(lambda h: f_hist.write(ensure_endswith(os.linesep, h)), + filter(lambda h: not h.startswith('/'), history))) def load_input_history(self, predefined: list[str] = None) -> list[str]: """Load input queries from the TTL (Time-To-Live) cache extending it with a predefined input history. :param predefined: A list of predefined input queries to be appended to the final list. :return: A list of input queries loaded from the cache. """ - history = self.read_input_history() + history = self.read_input_history() or list() if predefined: history.extend(list(filter(lambda c: c not in history, predefined))) return history diff --git a/src/main/askai/core/processors/splitter/splitter_actions.py b/src/main/askai/core/processors/splitter/splitter_actions.py index 8e47fae4..8e30949d 100644 --- a/src/main/askai/core/processors/splitter/splitter_actions.py +++ b/src/main/askai/core/processors/splitter/splitter_actions.py @@ -39,6 +39,8 @@ import logging as log +from askai.core.support.text_formatter import text_formatter + class SplitterActions(metaclass=Singleton): """Class that provides the splitter some actionable items.""" @@ -54,7 +56,8 @@ def wrap_answer(question: str, answer: str, model_result: ModelResult = ModelRes :return: An optional formatted string containing the wrapped answer. """ output: str = answer - args = {"user": prompt.user.title(), "idiom": shared.idiom, "context": answer, "question": question} + ctx: str = text_formatter.strip_format(answer) + args = {"user": prompt.user.title(), "idiom": shared.idiom, "context": ctx, "question": question} prompt_args: list[str] = [k for k in args.keys()] model: ResponseModel = ResponseModel.of_model(model_result.mid) events.reply.emit(reply=AIReply.full(msg.model_select(model))) @@ -83,9 +86,10 @@ def refine_answer(question: str, answer: str, acc_response: AccResponse | None = :param acc_response: The final accuracy response, if available. """ if acc_response and acc_response.reasoning: - ctx: str = str(shared.context.flat("HISTORY")) + ctx: str = text_formatter.strip_format(str(shared.context.flat("HISTORY"))) args = { "locale": configs.language.locale, + "user": prompt.user.title(), "improvements": acc_response.details, "context": ctx, "response": answer, diff --git a/src/main/askai/core/processors/splitter/splitter_executor.py b/src/main/askai/core/processors/splitter/splitter_executor.py index ace20528..0e982a87 100644 --- a/src/main/askai/core/processors/splitter/splitter_executor.py +++ b/src/main/askai/core/processors/splitter/splitter_executor.py @@ -30,6 +30,7 @@ from askai.core.processors.splitter.splitter_pipeline import SplitterPipeline from askai.core.processors.splitter.splitter_states import States from askai.core.support.text_formatter import text_formatter as tf, text_formatter +from askai.exception.exceptions import InaccurateResponse class SplitterExecutor(Thread): @@ -64,67 +65,70 @@ def interrupt(self, ev: Event) -> None: def run(self) -> None: """Execute the splitter pipeline.""" - with Live(Spinner("dots", f"[green]{self.pipeline.state}…[/green]", style="green"), console=tf.console) as live: - while not (self._interrupted or self.pipeline.state == States.COMPLETE): - self.pipeline.track_previous() - if 1 < configs.max_router_retries < 1 + self.pipeline.failures[self.pipeline.state.value]: - self.display( - f"\n[red] Max retries exceeded: {configs.max_agent_retries}[/red]\n", True) - break - if 1 < configs.max_iteractions < 1 + self.pipeline.iteractions: - self.display( - f"\n[red] Max iteractions exceeded: {configs.max_iteractions}[/red]\n", True) - break - match self.pipeline.state: - case States.STARTUP: - if self.pipeline.st_startup(): - self.pipeline.ev_pipeline_started() - case States.MODEL_SELECT: - if self.pipeline.st_model_select(): - self.pipeline.ev_model_selected() - case States.TASK_SPLIT: - if self.pipeline.st_task_split(): - if self.pipeline.is_direct(): - self.display("[yellow]√ Direct answer provided[/yellow]") - self.pipeline.ev_direct_answer() - else: - self.display(f"[green]√ Action plan created[/green]") - self.pipeline.ev_plan_created() - case States.EXECUTE_TASK: - if self.pipeline.st_execute_task(): - self.pipeline.ev_task_executed() - case States.ACC_CHECK: - acc_color: AccColor = self.pipeline.st_accuracy_check() - c_name: str = acc_color.color.casefold() + try: + with Live(Spinner("dots", f"[green]{self.pipeline.state}…[/green]", style="green"), console=tf.console) as live: + while not (self._interrupted or self.pipeline.state == States.COMPLETE): + self.pipeline.track_previous() + if 1 < configs.max_router_retries < 1 + self.pipeline.failures[self.pipeline.state.value]: self.display( - f"[green]√ Accuracy check: [{c_name}]{c_name.upper()}[/{c_name}][/green]") - if acc_color.passed(AccColor.GOOD): - self.pipeline.ev_accuracy_passed() - elif acc_color.passed(AccColor.MODERATE): - self.pipeline.ev_refine_required() - else: - self.pipeline.ev_accuracy_failed() - case States.REFINE_ANSWER: - if self.pipeline.st_refine_answer(): - self.pipeline.ev_answer_refined() - case States.WRAP_ANSWER: - if self.pipeline.st_final_answer(): - self.pipeline.ev_final_answer() - case _: + f"\n[red] Max retries exceeded: {configs.max_agent_retries}[/red]\n", True) + break + if 1 < configs.max_iteractions < 1 + self.pipeline.iteractions: self.display( - f"[red] Error: Machine halted before complete!({self.pipeline.state})[/red]", True) + f"\n[red] Max iteractions exceeded: {configs.max_iteractions}[/red]\n", True) break + match self.pipeline.state: + case States.STARTUP: + if self.pipeline.st_startup(): + self.pipeline.ev_pipeline_started() + case States.MODEL_SELECT: + if self.pipeline.st_model_select(): + self.pipeline.ev_model_selected() + case States.TASK_SPLIT: + if self.pipeline.st_task_split(): + if self.pipeline.is_direct(): + self.display("[yellow]√ Direct answer provided[/yellow]") + self.pipeline.ev_direct_answer() + else: + self.display(f"[green]√ Action plan created[/green]") + self.pipeline.ev_plan_created() + case States.EXECUTE_TASK: + if self.pipeline.st_execute_task(): + self.pipeline.ev_task_executed() + case States.ACC_CHECK: + acc_color: AccColor = self.pipeline.st_accuracy_check() + c_name: str = acc_color.color.casefold() + self.display( + f"[green]√ Accuracy check: [{c_name}]{c_name.upper()}[/{c_name}][/green]") + if acc_color.passed(AccColor.GOOD): + self.pipeline.ev_accuracy_passed() + elif acc_color.passed(AccColor.MODERATE): + self.pipeline.ev_refine_required() + else: + self.pipeline.ev_accuracy_failed() + case States.REFINE_ANSWER: + if self.pipeline.st_refine_answer(): + self.pipeline.ev_answer_refined() + case States.WRAP_ANSWER: + if self.pipeline.st_final_answer(): + self.pipeline.ev_final_answer() + case _: + self.display( + f"[red] Error: Machine halted before complete!({self.pipeline.state})[/red]", True) + break - execution_status: bool = self.pipeline.previous != self.pipeline.state - execution_status_str: str = ( - f"{'[green]√[/green]' if execution_status else '[red]X[/red]'}" - f" {str(self.pipeline.previous)}" - ) - self.pipeline.failures[self.pipeline.state.value] += 1 if not execution_status else 0 - self.display(f"[green]{execution_status_str}[/green]") - live.update(Spinner("dots", f"[green]{self.pipeline.state}…[/green]", style="green")) - self.pipeline.iteractions += 1 - cursor.erase_line() + execution_status: bool = self.pipeline.previous != self.pipeline.state + execution_status_str: str = ( + f"{'[green]√[/green]' if execution_status else '[red]X[/red]'}" + f" {str(self.pipeline.previous)}" + ) + self.pipeline.failures[self.pipeline.state.value] += 1 if not execution_status else 0 + self.display(f"[green]{execution_status_str}[/green]") + live.update(Spinner("dots", f"[green]{self.pipeline.state}…[/green]", style="green")) + self.pipeline.iteractions += 1 + cursor.erase_line() + except InaccurateResponse: + live.update(Spinner("dots", f"[red]AI failed to respond. Retrying…[/red]", style="green")) if configs.is_debug: final_state: States = self.pipeline.state diff --git a/src/main/askai/core/router/tools/terminal.py b/src/main/askai/core/router/tools/terminal.py index d23b02bb..bc0d87ff 100644 --- a/src/main/askai/core/router/tools/terminal.py +++ b/src/main/askai/core/router/tools/terminal.py @@ -46,7 +46,7 @@ def _build_filters_() -> str: if path_obj.exists and path_obj.is_dir: cmd_line: str = ( f'find {folder} -maxdepth 1 -type f {_build_filters_() if filters else ""} ' - f"-exec ls -lLht {{}} + 2>/dev/null | sort -k9,9" + f"! -name '.*' -exec ls -oLhtu {{}} + 2>/dev/null | sort -k9,9" ) status, output = execute_bash(cmd_line) if status: diff --git a/src/main/askai/language/ai_translator.py b/src/main/askai/language/ai_translator.py index f114aa5e..f68892e2 100644 --- a/src/main/askai/language/ai_translator.py +++ b/src/main/askai/language/ai_translator.py @@ -49,7 +49,7 @@ def translate(self, text: AnyStr) -> str: try: # Perform batch translation - translated_texts: list[str] = list(map(self.translate_text, texts_to_translate)) + translated_texts: list[str] = list(map(self._translate_text, texts_to_translate)) except Exception as err: events.reply.emit(reply=AIReply.debug(f"Error during batch translation: {err}")) return text @@ -63,7 +63,7 @@ def translate(self, text: AnyStr) -> str: return translated_text - def translate_text(self, text: AnyStr, **kwargs) -> str: + def _translate_text(self, text: AnyStr, **kwargs) -> str: """Translate text from the source language to the target language. :param text: Text to translate. :return: The translated text. diff --git a/src/main/askai/language/translators/argos_translator.py b/src/main/askai/language/translators/argos_translator.py index 440b1a8d..d66e6808 100644 --- a/src/main/askai/language/translators/argos_translator.py +++ b/src/main/askai/language/translators/argos_translator.py @@ -66,7 +66,7 @@ def __init__(self, from_idiom: Language, to_idiom: Language): self._argos_model = argos_model @lru_cache - def translate_text(self, text: AnyStr, **kwargs) -> str: + def _translate_text(self, text: AnyStr, **kwargs) -> str: """Translate text from the source language to the target language. :param text: Text to translate. :return: The translated text. diff --git a/src/main/askai/language/translators/deepl_translator.py b/src/main/askai/language/translators/deepl_translator.py index 340be1f1..5d580b10 100644 --- a/src/main/askai/language/translators/deepl_translator.py +++ b/src/main/askai/language/translators/deepl_translator.py @@ -33,7 +33,7 @@ def __init__(self, source_lang: Language, target_lang: Language): self._translator: Translator | None = None @lru_cache - def translate_text(self, text: AnyStr, **kwargs) -> str: + def _translate_text(self, text: AnyStr, **kwargs) -> str: """Translate text from the source language to the target language. :param text: Text to translate. :return: The translated text. diff --git a/src/main/askai/language/translators/marian_translator.py b/src/main/askai/language/translators/marian_translator.py index ccdc13f0..7343c89c 100644 --- a/src/main/askai/language/translators/marian_translator.py +++ b/src/main/askai/language/translators/marian_translator.py @@ -23,7 +23,7 @@ def __init__(self, from_idiom: Language, to_idiom: Language): self._tokenizer = MarianTokenizer.from_pretrained(self.MODEL_NAME) @lru_cache - def translate_text(self, text: AnyStr, **kwargs) -> str: + def _translate_text(self, text: AnyStr, **kwargs) -> str: """Translate text from the source language to the target language. :param text: Text to translate. :return: The translated text. diff --git a/src/main/askai/resources/conversation-starters.txt b/src/main/askai/resources/conversation-starters.txt new file mode 100644 index 00000000..f0594a3d --- /dev/null +++ b/src/main/askai/resources/conversation-starters.txt @@ -0,0 +1,27 @@ + +What is the size of the Moon? +Who is the current Prime Minister of the United Kingdom? +Explain the theory of relativity in simple terms. +What are the latest news headlines today? +Summarize the markdown files in my HomeSetup docs folder. +List all the files in my Downloads folder. +Show me all the image files in my Pictures directory. +Open the first PDF document in my Documents folder. +List my music library and check if there's any AC/DC song. If found, show the file name and play it. +Play the latest movie in my Videos folder. +Show me my favorite photos from last year. +Open the first reminder file in my Downloads and tell me what I need to do first. +List my current reminders and mark the completed ones. +Create a small Python program to calculate speed given time and distance, and save it as 'dist.py'. +Check for updates in my Git repositories. +What are the current weather conditions in San Francisco, CA today? +When is the next Los Angeles Lakers match? +Do I have any events scheduled for this weekend? +Tell me who is currently logged into my computer. +Describe the first image in my Downloads folder. +I have downloaded a QR logo, open it for me. +Tell me who you see using the webcam. Respond as if addressing an audience. +Change my desktop wallpaper to the first image in my Pictures folder. +Adjust my system volume to 50%. +Open my calendar and show today's schedule. +Search my Documents folder for any budget spreadsheets. diff --git a/src/main/askai/resources/prompts/taius/taius-refiner.txt b/src/main/askai/resources/prompts/taius/taius-refiner.txt index 7765fec7..87a572bd 100644 --- a/src/main/askai/resources/prompts/taius/taius-refiner.txt +++ b/src/main/askai/resources/prompts/taius/taius-refiner.txt @@ -3,7 +3,7 @@ You are 'Taius', the AskAI helpful and kind assistant. 'Taius' stands for *'T.A. Act as a text editor and formatter. Refine the AI response to ensure they are clear, localized for "{locale}", and adherent to formatting and detail requirements. Perform necessary conversions, such as from miles to kilometers, Currency, or *Fahrenheit* to *Celsius*, using current conversion rates where applicable. - - Translate any non-"{idiom}" text into "{idiom}", considering regional linguistic variations. + - Translate any non-"{locale}" text into "{locale}", considering regional linguistic variations. - Correct any semantic or syntax errors, and enhance the writing to align with regional expressions, style and commonly used words. @@ -16,9 +16,9 @@ Perform necessary conversions, such as from miles to kilometers, Currency, or *F - Do not omit any relevant information. 2. **Localization Adjustments:** - - Adapt text to use regional expressions, units of measurement, and currency specific to the "{idiom}" locale. + - Adapt text to use regional expressions, units of measurement, and currency specific to the "{locale}" locale. - Perform necessary conversions, such as from miles to kilometers or USD to BRL, Fahrenheit to Celsius, using current conversion rates where applicable. - - Translate any non-"{idiom}" text into "{idiom}", considering regional linguistic variations. + - Translate any non-"{locale}" text into "{locale}", considering regional linguistic variations. 3. **Detail Inclusion:** - Ensure that important details such as file names, folder paths, sizes, line numbers, and other pertinent specifics that could affect the user's understanding or implementation of the response are not omitted. diff --git a/src/main/askai/resources/prompts/taius/taius-tts.txt b/src/main/askai/resources/prompts/taius/taius-tts.txt index e5b84703..1c7176e8 100644 --- a/src/main/askai/resources/prompts/taius/taius-tts.txt +++ b/src/main/askai/resources/prompts/taius/taius-tts.txt @@ -4,31 +4,20 @@ Act as a means of digital inclusion for visually impaired individuals, specifica Before responding to the user, it is imperative that you follow the step-by-step instructions provided below in sequential order: -- Craft your reply solely based on the information given in the provided text output. +1. Craft your reply solely based on the information given in the provided context. -- Create a summarized and accessible version of the content while ensuring important details are included. +2. Create a summarized and accessible version of the content while ensuring important details are included. -- When displaying a list, limit it to five items. At the end, include a summary that states the total number of items and how many are not shown. +3. Remove any duplicate information from the final response. -If you have an answer, format like (attention to the new lines): +4. Do not display the parts of the context or question, and do not add extraneous explanations. -"""Here is an accessible version: +5. When listing items, limit to five entries and prefer rendering numbered than bulleted. For example, when displaying a file listing: "Total files: XX, Omitted: YY". Summarize with the total number of items and state any omissions. Include a markdown line '---' preceded by a new line, just before the summary. - +6. When the provided output enumerates files or folders, specify whether each item is a file or folder, a its size and modification date. ---- -Total files: XX, Omitted: YY +7. Begin your response by informing the user, with a few words, about the content you are about to provide and indicate that it is an accessible version of the original material. -Tip: -""" - -- When listing is required, prefer rendering numbered than bulleted lists. - -- When the provided output enumerates files or folders, specify whether each item is a file or folder, a its size and modification date. - -- If you are unable to improve the response, return the original text. - -Use the following context to create your answer, according to the question at the end. Context: @@ -37,3 +26,5 @@ Context: ``` Human Question: "{question}" + +Begin refining the response!