Skip to content

Commit

Permalink
Absent deps fixed, Llama3 added. (#38)
Browse files Browse the repository at this point in the history
  • Loading branch information
s-nagaev authored Jul 12, 2024
1 parent ff11e05 commit de20255
Show file tree
Hide file tree
Showing 7 changed files with 244 additions and 93 deletions.
11 changes: 11 additions & 0 deletions changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased]

## [0.3.0] - 2024-07-12

### Changed
- The provider Llama 2 in the list of available providers replaced by Llama 3.

### Fixed
- Missing dependency (curl-cffi) required in new versions of gpt4free.
- A bug causing the bot under heavy load to "forget" to respond.



## [0.2.3] - 2024-04-17

### Changed
Expand Down
15 changes: 5 additions & 10 deletions hiroshi/services/gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,16 @@

MODELS_AND_PROVIDERS: dict[str, tuple[str, str]] = {
"Default": ("gpt_35_long", "Default"),
"GPT-3.5 (The best/fastest provider)": ("gpt_35_long", "Default"),
"GPT-4 (The best/fastest provider)": ("gpt_4", "Default"),
"GPT-3.5 (Fastest provider)": ("gpt_35_long", "Default"),
"GPT-4 (Fastest provider)": ("gpt_4", "Default"),
"Bing (GPT-4)": ("gpt_4", "Bing"),
"ChatBase (GPT-3.5)": ("gpt-3.5-turbo", "ChatBase"),
"ChatgptAi (GPT-3.5)": ("gpt-3.5-turbo", "ChatgptAi"),
"FreeGpt (GPT-3.5)": ("gpt-3.5-turbo", "FreeGpt"),
"GptGo (GPT-3.5)": ("gpt-3.5-turbo", "GptGo"),
"You (GPT-3.5)": ("gpt-3.5-turbo", "You"),
"Llama (Llama 2 7B)": ("meta/llama-2-7b-chat", "Llama2"),
"Llama (Llama 2 13B)": ("meta/llama-2-13b-chat", "Llama2"),
"Llama (Llama 2 70B)": ("meta/llama-2-70b-chat", "Llama2"),
"Llama (Llama 3 8B)": ("meta/meta-llama-3-8b-instruct", "Llama"),
"Llama (Llama 3 70B)": ("meta/meta-llama-3-70b-instruct", "Llama"),
}


Expand All @@ -44,8 +43,4 @@ async def get_chat_response(


def retrieve_available_providers() -> list[str]:
return [
key
for key in MODELS_AND_PROVIDERS
if is_provider_active(MODELS_AND_PROVIDERS[key]) or "Default" in MODELS_AND_PROVIDERS[key]
]
return [key for key, value in MODELS_AND_PROVIDERS.items() if is_provider_active(value) or "Default" in value]
2 changes: 2 additions & 0 deletions hiroshi/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,8 @@ async def run_monitoring(context: ContextTypes.DEFAULT_TYPE) -> None:

def is_provider_active(model_and_provider_names: tuple[str, str]) -> bool:
_, provider_name = model_and_provider_names
if provider_name == "Llama":
return True # TODO: Temporary solution, because Llama is turned off accidentally on the gpt4free side
if provider := ProviderUtils.convert.get(provider_name):
return bool(provider.working)
return False
20 changes: 14 additions & 6 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import asyncio
from asyncio import Task
from typing import Any, Coroutine

from loguru import logger
from telegram import (
Expand Down Expand Up @@ -38,6 +40,7 @@

class HiroshiBot:
def __init__(self) -> None:
self.background_tasks: set[Task[Any]] = set()
self.commands = [
BotCommand(command="about", description="About this bot"),
BotCommand(command="help", description="Show the help message"),
Expand All @@ -53,6 +56,11 @@ def __init__(self) -> None:
BotCommand(command="provider", description="Select GPT provider"),
]

def create_task(self, task: Coroutine[Any, Any, Any]) -> None:
task_scheduled = asyncio.create_task(task)
self.background_tasks.add(task_scheduled)
task_scheduled.add_done_callback(self.background_tasks.discard)

async def help(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
telegram_message = get_telegram_message(update=update)
commands = [f"/{command.command} - {command.description}" for command in self.commands]
Expand All @@ -75,7 +83,7 @@ async def about(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> Non
@check_user_allowance
@check_user_allow_to_apply_settings
async def reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
asyncio.create_task(handle_reset(update=update, context=context))
self.create_task(task=handle_reset(update=update, context=context))

@check_user_allowance
async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
Expand All @@ -93,11 +101,11 @@ async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> No
and not user_interacts_with_bot(update=update, context=context)
):
return None
asyncio.create_task(handle_prompt(update=update, context=context))
self.create_task(task=handle_prompt(update=update, context=context))

@check_user_allowance
async def ask(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
asyncio.create_task(handle_prompt(update=update, context=context))
self.create_task(task=handle_prompt(update=update, context=context))

@check_user_allowance
@check_user_allow_to_apply_settings
Expand All @@ -109,7 +117,7 @@ async def show_menu(self, update: Update, context: ContextTypes.DEFAULT_TYPE) ->

@check_user_allow_to_apply_settings
async def select_provider(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
asyncio.create_task(handle_provider_selection(update=update, context=context))
self.create_task(task=handle_provider_selection(update=update, context=context))

@check_user_allowance
async def inline_query(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
Expand Down Expand Up @@ -139,8 +147,8 @@ def run(self) -> None:
app = (
ApplicationBuilder()
.token(telegram_settings.token)
.proxy_url(telegram_settings.proxy)
.get_updates_proxy_url(telegram_settings.proxy)
.proxy(telegram_settings.proxy)
.get_updates_proxy(telegram_settings.proxy)
.post_init(self.post_init)
.build()
)
Expand Down
Loading

0 comments on commit de20255

Please sign in to comment.