Skip to content

Commit

Permalink
Support TitleGeneration, Reasoning in HuggingChat
Browse files Browse the repository at this point in the history
Improve model list in HuggingSpace, PollinationsAI
Fix Image Generation in PollinationsAI
Add Image Upload in PollinationsAI
Support Usage, FinishReason,  jsonMode in PollinationsAI
Add Reasoning to Web UI
Fix using provider api_keys in Web UI
  • Loading branch information
hlohaus committed Jan 23, 2025
1 parent 78fa745 commit cad3081
Show file tree
Hide file tree
Showing 15 changed files with 303 additions and 181 deletions.
203 changes: 97 additions & 106 deletions g4f/Provider/PollinationsAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,42 +3,45 @@
import json
import random
import requests
from urllib.parse import quote
from urllib.parse import quote_plus
from typing import Optional
from aiohttp import ClientSession

from .helper import filter_none
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages, ImagesType
from ..image import to_data_uri
from ..requests.raise_for_status import raise_for_status
from ..typing import AsyncResult, Messages
from ..image import ImageResponse
from ..requests.aiohttp import get_connector
from ..providers.response import ImageResponse, FinishReason, Usage

DEFAULT_HEADERS = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.9',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
}

class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Pollinations AI"
url = "https://pollinations.ai"

working = True
supports_stream = False
supports_system_message = True
supports_message_history = True

# API endpoints base
api_base = "https://text.pollinations.ai/openai"

# API endpoints
text_api_endpoint = "https://text.pollinations.ai/"
text_api_endpoint = "https://text.pollinations.ai/openai"
image_api_endpoint = "https://image.pollinations.ai/"

# Models configuration
default_model = "openai"
default_image_model = "flux"

image_models = []
models = []

additional_models_image = ["midjourney", "dall-e-3"]
additional_models_text = ["claude", "karma", "command-r", "llamalight", "mistral-large", "sur", "sur-mistral"]
default_vision_model = "gpt-4o"
extra_image_models = ["midjourney", "dall-e-3"]
vision_models = [default_vision_model, "gpt-4o-mini"]
extra_text_models = [*vision_models, "claude", "karma", "command-r", "llamalight", "mistral-large", "sur", "sur-mistral"]
model_aliases = {
"gpt-4o": default_model,
"qwen-2-72b": "qwen",
"qwen-2.5-coder-32b": "qwen-coder",
"llama-3.3-70b": "llama",
Expand All @@ -50,30 +53,25 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"deepseek-chat": "deepseek",
"llama-3.2-3b": "llamalight",
}
text_models = []

@classmethod
def get_models(cls, **kwargs):
# Initialize model lists if not exists
if not hasattr(cls, 'image_models'):
cls.image_models = []
if not hasattr(cls, 'text_models'):
cls.text_models = []

# Fetch image models if not cached
if not cls.image_models:
url = "https://image.pollinations.ai/models"
response = requests.get(url)
raise_for_status(response)
cls.image_models = response.json()
cls.image_models.extend(cls.additional_models_image)
cls.image_models.extend(cls.extra_image_models)

# Fetch text models if not cached
if not cls.text_models:
url = "https://text.pollinations.ai/models"
response = requests.get(url)
raise_for_status(response)
cls.text_models = [model.get("name") for model in response.json()]
cls.text_models.extend(cls.additional_models_text)
cls.text_models.extend(cls.extra_text_models)

# Return combined models
return cls.text_models + cls.image_models
Expand All @@ -94,22 +92,27 @@ async def create_async_generator(
enhance: bool = False,
safe: bool = False,
# Text specific parameters
temperature: float = 0.5,
presence_penalty: float = 0,
images: ImagesType = None,
temperature: float = None,
presence_penalty: float = None,
top_p: float = 1,
frequency_penalty: float = 0,
stream: bool = False,
frequency_penalty: float = None,
response_format: Optional[dict] = None,
cache: bool = False,
**kwargs
) -> AsyncResult:
if images is not None and not model:
model = cls.default_vision_model
model = cls.get_model(model)
if not cache and seed is None:
seed = random.randint(0, 100000)

# Check if models
# Image generation
if model in cls.image_models:
async for result in cls._generate_image(
yield await cls._generate_image(
model=model,
messages=messages,
prompt=prompt,
prompt=messages[-1]["content"] if prompt is None else prompt,
proxy=proxy,
width=width,
height=height,
Expand All @@ -118,27 +121,28 @@ async def create_async_generator(
private=private,
enhance=enhance,
safe=safe
):
yield result
)
else:
# Text generation
async for result in cls._generate_text(
model=model,
messages=messages,
images=images,
proxy=proxy,
temperature=temperature,
presence_penalty=presence_penalty,
top_p=top_p,
frequency_penalty=frequency_penalty,
stream=stream
response_format=response_format,
seed=seed,
cache=cache,
):
yield result

@classmethod
async def _generate_image(
cls,
model: str,
messages: Messages,
prompt: str,
proxy: str,
width: int,
Expand All @@ -148,16 +152,7 @@ async def _generate_image(
private: bool,
enhance: bool,
safe: bool
) -> AsyncResult:
if seed is None:
seed = random.randint(0, 10000)

headers = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.9',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
}

) -> ImageResponse:
params = {
"seed": seed,
"width": width,
Expand All @@ -168,85 +163,81 @@ async def _generate_image(
"enhance": enhance,
"safe": safe
}
params = {k: v for k, v in params.items() if v is not None}

async with ClientSession(headers=headers) as session:
prompt = messages[-1]["content"] if prompt is None else prompt
param_string = "&".join(f"{k}={v}" for k, v in params.items())
url = f"{cls.image_api_endpoint}/prompt/{quote(prompt)}?{param_string}"

async with session.head(url, proxy=proxy) as response:
if response.status == 200:
image_response = ImageResponse(images=url, alt=prompt)
yield image_response
params = {k: json.dumps(v) if isinstance(v, bool) else v for k, v in params.items() if v is not None}
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
async with session.head(f"{cls.image_api_endpoint}prompt/{quote_plus(prompt)}", params=params) as response:
await raise_for_status(response)
return ImageResponse(str(response.url), prompt)

@classmethod
async def _generate_text(
cls,
model: str,
messages: Messages,
images: Optional[ImagesType],
proxy: str,
temperature: float,
presence_penalty: float,
top_p: float,
frequency_penalty: float,
stream: bool,
seed: Optional[int] = None
) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
}

if seed is None:
seed = random.randint(0, 10000)

async with ClientSession(headers=headers) as session:
response_format: Optional[dict],
seed: Optional[int],
cache: bool
) -> AsyncResult:
jsonMode = False
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
jsonMode = True

if images is not None and messages:
last_message = messages[-1].copy()
last_message["content"] = [
*[{
"type": "image_url",
"image_url": {"url": to_data_uri(image)}
} for image, _ in images],
{
"type": "text",
"text": messages[-1]["content"]
}
]
messages[-1] = last_message

async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
data = {
"messages": messages,
"model": model,
"temperature": temperature,
"presence_penalty": presence_penalty,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"jsonMode": False,
"stream": stream,
"jsonMode": jsonMode,
"stream": False, # To get more informations like Usage and FinishReason
"seed": seed,
"cache": False
"cache": cache
}

async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
decoded_chunk = chunk.decode()

# Skip [DONE].
if "data: [DONE]" in decoded_chunk:
continue

# Processing plain text
if not decoded_chunk.startswith("data:"):
clean_text = decoded_chunk.strip()
if clean_text:
yield clean_text
continue

# Processing JSON format
try:
# Remove the prefix “data: “ and parse JSON
json_str = decoded_chunk.replace("data:", "").strip()
json_response = json.loads(json_str)

if "choices" in json_response and json_response["choices"]:
if "delta" in json_response["choices"][0]:
content = json_response["choices"][0]["delta"].get("content")
if content:
# Remove escaped slashes before parentheses
clean_content = content.replace("\\(", "(").replace("\\)", ")")
yield clean_content
except json.JSONDecodeError:
# If JSON could not be parsed, skip
continue
async with session.post(cls.text_api_endpoint, json=filter_none(**data)) as response:
await raise_for_status(response)
async for line in response.content:
decoded_chunk = line.decode(errors="replace")
# If [DONE].
if "data: [DONE]" in decoded_chunk:
break
# Processing JSON format
try:
# Remove the prefix “data: “ and parse JSON
json_str = decoded_chunk.replace("data:", "").strip()
data = json.loads(json_str)
choice = data["choices"][0]
if "usage" in data:
yield Usage(**data["usage"])
if "message" in choice and "content" in choice["message"] and choice["message"]["content"]:
yield choice["message"]["content"].replace("\\(", "(").replace("\\)", ")")
elif "delta" in choice and "content" in choice["delta"] and choice["delta"]["content"]:
yield choice["delta"]["content"].replace("\\(", "(").replace("\\)", ")")
if "finish_reason" in choice and choice["finish_reason"] is not None:
yield FinishReason(choice["finish_reason"])
break
except json.JSONDecodeError:
yield decoded_chunk.strip()
continue
1 change: 1 addition & 0 deletions g4f/Provider/hf_space/Qwen_QVQ_72B.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ class Qwen_QVQ_72B(AsyncGeneratorProvider, ProviderModelMixin):

default_model = "qwen-qvq-72b-preview"
models = [default_model]
vision_models = models
model_aliases = {"qwq-32b": default_model}

@classmethod
Expand Down
6 changes: 6 additions & 0 deletions g4f/Provider/hf_space/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,18 @@ def get_parameters(cls, **kwargs) -> dict:
def get_models(cls, **kwargs) -> list[str]:
if not cls.models:
models = []
image_models = []
vision_models = []
for provider in cls.providers:
models.extend(provider.get_models(**kwargs))
models.extend(provider.model_aliases.keys())
image_models.extend(provider.image_models)
vision_models.extend(provider.vision_models)
models = list(set(models))
models.sort()
cls.models = models
cls.image_models = list(set(image_models))
cls.vision_models = list(set(vision_models))
return cls.models

@classmethod
Expand Down
Loading

0 comments on commit cad3081

Please sign in to comment.