Skip to content

Commit

Permalink
Improving internet service and bugfixes
Browse files Browse the repository at this point in the history
  • Loading branch information
Hugo Saporetti Junior committed Mar 14, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
1 parent caf325e commit 50ec9ff
Showing 11 changed files with 72 additions and 21 deletions.
5 changes: 3 additions & 2 deletions src/main/askai/core/component/internet_service.py
Original file line number Diff line number Diff line change
@@ -68,9 +68,10 @@ def search_google(self, query: str, *sites: str) -> Optional[str]:
AskAiEvents.ASKAI_BUS.events.reply.emit(message=msg.searching())
if len(sites) > 0:
log.info("Searching GOOGLE for '%s' url: '%s'", query, str(sites))
search_results: str = ''
search_results: List[Document] = []
for url in sites:
search_results += str(self._tool.run(f"{query} site: {url}"))
content = str(self._tool.run(f"{query} site: {url}"))
search_results.append(Document(content))
prompt = ChatPromptTemplate.from_messages([("system", "{query}\n\n{context}")])
chain = create_stuff_documents_chain(lc_llm.create_chat_model(), prompt)
return chain.invoke({"query": query, "context": search_results})
45 changes: 45 additions & 0 deletions src/main/askai/core/engine/openai/temperatures.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from hspylib.core.enums.enumeration import Enumeration


class Temperatures(Enumeration):
"""Provide some recommended temperature x top_p combinations for ChatGPT prompts.
Ref:. https://community.openai.com/t/cheat-sheet-mastering-temperature-and-top-p-in-chatgpt-api/172683
- Lower temperature (e.g., 0.1 - 0.4): Produces more focused, conservative, and consistent responses.
This is useful when the marketer needs factual information, precise answers, or messaging that adheres closely
to a specific format or brand guideline.
- Moderate temperature (e.g., 0.5 - 0.7): Strikes a balance between creativity and consistency.
This setting can be useful for general content generation, where a blend of accuracy and inventiveness
is desired.
- Higher temperature (e.g., 0.8 - 1.0): Generates more creative, diverse, and unexpected outputs.
Marketers may prefer this setting when brainstorming innovative campaign ideas, crafting engaging social media
content, or seeking fresh perspectives on a topic.
"""

# fmt: off

# Generates code that adheres to established patterns and conventions. Output is more deterministic and focused.
# Useful for generating syntactically correct code.
CODE_GENERATION = 0.2, 0.1

# Generates creative and diverse text for storytelling. Output is more exploratory and less constrained by patterns.
CREATIVE_WRITING = 0.7, 0.8

# Generates conversational responses that balance coherence and diversity. Output is more natural and engaging.
CHATBOT_RESPONSES = 0.5, 0.5

# Generates code comments that are more likely to be concise and relevant. Output is more deterministic
# and adheres to conventions.
CODE_COMMENT_GENERATION = 0.3, 0.2

# Generates data analysis scripts that are more likely to be correct and efficient. Output is more deterministic
# and focused.
DATA_ANALYSIS = 0.2, 0.1

# Generates code that explores alternative solutions and creative approaches. Output is less constrained by
# established patterns.
EXPLORATORY_CODE_WRITING = 0.6, 0.7

# fmt: on
4 changes: 1 addition & 3 deletions src/main/askai/core/processor/ai_processor.py
Original file line number Diff line number Diff line change
@@ -25,8 +25,6 @@

import os

from askai.core.support.utilities import hash_text


class AIProcessor(metaclass=ABCMeta):
"""Abstract class that helps implementing AskAI processors."""
@@ -86,7 +84,7 @@ def supports(self, q_type: str) -> bool:

def processor_id(self) -> str:
"""Get the processor ID. the resulting ID is a string, composed by the processor name hash."""
return hash_text(self.__class__.__name__)
return str(abs(hash(self.__class__.__name__)))

def query_type(self) -> str:
"""Get the query type this processor can handle. By default, it's the name of the processor itself."""
3 changes: 2 additions & 1 deletion src/main/askai/core/processor/analysis_processor.py
Original file line number Diff line number Diff line change
@@ -18,6 +18,7 @@
from langchain_core.prompts import PromptTemplate

from askai.core.askai_messages import msg
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.processor.ai_processor import AIProcessor
@@ -47,7 +48,7 @@ def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
context: ContextRaw = shared.context.join("CONTEXT", "SETUP", "QUESTION")
log.info("Analysis::[QUESTION] '%s' context=%s", query_response.question, context)

if (response := shared.engine.ask(context, temperature=0.0, top_p=0.0)) and response.is_success:
if (response := shared.engine.ask(context, *Temperatures.DATA_ANALYSIS.value)) and response.is_success:
log.debug("Analysis::[RESPONSE] Received from AI: %s.", response)
if output := response.message:
shared.context.push("CONTEXT", query_response.question)
3 changes: 2 additions & 1 deletion src/main/askai/core/processor/command_processor.py
Original file line number Diff line number Diff line change
@@ -24,6 +24,7 @@
from askai.core.askai_events import AskAiEvents
from askai.core.askai_messages import msg
from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.model.terminal_command import TerminalCommand
@@ -60,7 +61,7 @@ def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
context: ContextRaw = shared.context.join("CONTEXT", "SETUP", "QUESTION")
log.info("Command::[QUESTION] '%s' context=%s", query_response.question, context)

if (response := shared.engine.ask(context, temperature=0.0, top_p=0.0)) and response.is_success:
if (response := shared.engine.ask(context, *Temperatures.DATA_ANALYSIS.value)) and response.is_success:
log.debug("Command::[RESPONSE] Received from AI: %s.", response)
shell, command = extract_command(response.message)
if command:
3 changes: 2 additions & 1 deletion src/main/askai/core/processor/generic_processor.py
Original file line number Diff line number Diff line change
@@ -20,6 +20,7 @@
from askai.core.askai_messages import msg
from askai.core.askai_prompt import prompt
from askai.core.component.cache_service import cache
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.processor.ai_processor import AIProcessor
@@ -47,7 +48,7 @@ def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
context: ContextRaw = shared.context.join("GENERAL", "INTERNET", "SUMMARY", "SETUP", "QUESTION")
log.info("Setup::[GENERIC] '%s' context=%s", query_response.question, context)

if (response := shared.engine.ask(context, temperature=1, top_p=1)) and response.is_success:
if (response := shared.engine.ask(context, *Temperatures.CREATIVE_WRITING.value)) and response.is_success:
output = response.message
shared.context.push("GENERAL", output, "assistant")
cache.save_reply(query_response.question, output)
3 changes: 2 additions & 1 deletion src/main/askai/core/processor/internet_processor.py
Original file line number Diff line number Diff line change
@@ -22,6 +22,7 @@
from askai.core.askai_messages import msg
from askai.core.component.cache_service import cache
from askai.core.component.internet_service import internet
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.model.search_result import SearchResult
@@ -46,7 +47,7 @@ def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
log.info("Setup::[INTERNET] '%s' context=%s", query_response.question, context)

if not (response := cache.read_reply(query_response.question)):
if (response := shared.engine.ask(context, temperature=0.0, top_p=0.0)) and response.is_success:
if (response := shared.engine.ask(context, *Temperatures.CHATBOT_RESPONSES.value)) and response.is_success:
search: SearchResult = object_mapper.of_json(response.message, SearchResult)
query = " + ".join(search.keywords)
fc_call = partial(internet.scrap_sites, query) \
3 changes: 2 additions & 1 deletion src/main/askai/core/processor/output_processor.py
Original file line number Diff line number Diff line change
@@ -19,6 +19,7 @@

from askai.core.askai_messages import msg
from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.processor.ai_processor import AIProcessor
@@ -43,7 +44,7 @@ def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
context: ContextRaw = shared.context.join("CONTEXT", "SETUP")
log.info("Output::[COMMAND] '%s' context=%s", commands, context)

if (response := shared.engine.ask(context, temperature=0.0, top_p=0.0)) and response.is_success:
if (response := shared.engine.ask(context, *Temperatures.DATA_ANALYSIS.value)) and response.is_success:
log.debug("Output::[RESPONSE] Received from AI: %s.", response)
if output := response.message:
shared.context.push("CONTEXT", output, "assistant")
5 changes: 3 additions & 2 deletions src/main/askai/core/processor/processor_proxy.py
Original file line number Diff line number Diff line change
@@ -21,6 +21,7 @@

from askai.core.askai_messages import msg
from askai.core.askai_prompt import prompt
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.processor.ai_processor import AIProcessor
@@ -49,14 +50,14 @@ def process(self, question: str) -> Tuple[bool, QueryResponse]:
:param question: The question to the AI engine.
"""
status = False
template = PromptTemplate(input_variables=[], template=self.template)
template = PromptTemplate(input_variables=['query_types'], template=self.template)
final_prompt = msg.translate(template.format(query_types=self.query_types))
shared.context.set("SETUP", final_prompt, "system")
shared.context.set("QUESTION", question)
context: ContextRaw = shared.context.join("INTERNET", "SUMMARY", "CONTEXT", "SETUP", "QUESTION")
log.info("Ask::[QUESTION] '%s' context=%s", question, context)

if (response := shared.engine.ask(context, temperature=0.0, top_p=0.0)) and response.is_success:
if (response := shared.engine.ask(context, *Temperatures.CODE_GENERATION.value)) and response.is_success:
log.info("Ask::[PROXY] Received from AI: %s.", str(response))
output = object_mapper.of_json(response.message, QueryResponse)
if not isinstance(output, QueryResponse):
3 changes: 2 additions & 1 deletion src/main/askai/core/processor/summary_processor.py
Original file line number Diff line number Diff line change
@@ -21,6 +21,7 @@
from askai.core.askai_prompt import prompt
from askai.core.component.cache_service import cache
from askai.core.component.summarizer import summarizer
from askai.core.engine.openai.temperatures import Temperatures
from askai.core.model.chat_context import ContextRaw
from askai.core.model.query_response import QueryResponse
from askai.core.model.summary_result import SummaryResult
@@ -47,7 +48,7 @@ def process(self, query_response: QueryResponse) -> Tuple[bool, Optional[str]]:
log.info("Setup::[SUMMARY] '%s' context=%s", query_response.question, context)

try:
if (response := shared.engine.ask(context, temperature=0.0, top_p=0.0)) and response.is_success:
if (response := shared.engine.ask(context, *Temperatures.CHATBOT_RESPONSES.value)) and response.is_success:
summary_result: SummaryResult = object_mapper.of_json(response.message, SummaryResult)
summarizer.generate(summary_result.folder, summary_result.glob)
if results := summarizer.query('Give me an overview of all the summarized content'):
16 changes: 8 additions & 8 deletions src/main/askai/core/support/object_mapper.py
Original file line number Diff line number Diff line change
@@ -9,17 +9,17 @@
Copyright·(c)·2024,·HSPyLib
"""
from askai.core.support.utilities import hash_text
from askai.exception.exceptions import InvalidJsonMapping, InvalidMapping
from hspylib.core.enums.enumeration import Enumeration
from hspylib.core.metaclass.singleton import Singleton
import inspect
import json
from inspect import isclass
from json import JSONDecodeError
from types import SimpleNamespace
from typing import Any, Callable, Dict, Optional, Type, TypeAlias

import inspect
import json
from hspylib.core.enums.enumeration import Enumeration
from hspylib.core.metaclass.singleton import Singleton

from askai.exception.exceptions import InvalidJsonMapping, InvalidMapping

FnConverter: TypeAlias = Callable[[Any, Type], Any]

@@ -40,8 +40,8 @@ class ConversionMode(Enumeration):
@staticmethod
def _hash(type_from: Any, type_to: Type) -> str:
"""Create a hash value for both classes in a way that"""
return hash_text(type_from.__name__) + hash_text(type_to.__name__) if isclass(type_from) \
else hash_text(type_from.__class__.__name__) + hash_text(type_to.__name__)
return str(hash(type_from.__name__) + hash(type_to.__name__)) if isclass(type_from) \
else str(hash(type_from.__class__.__name__) + hash(type_to.__name__))

@classmethod
def _strict_converter(cls, type1: Any, type2: Type) -> Any:

0 comments on commit 50ec9ff

Please sign in to comment.