Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,9 @@ AGENT_DEBUG_MODE=false
# Model Provider Settings
# ============================================
# For more details, see the documentation in the /doc directory.
# To access full functionality, make sure to include an embedding service.
# To access full functionality, make sure to include an embedding-capable provider.
# You can configure multiple providers below.
# The primary setup uses OpenRouter, along with another provider that supports embeddings.

# Get your API key from: https://openrouter.ai/
# Note: OpenRouter does not currently support embedding or reranker models.
Expand All @@ -53,6 +54,10 @@ SILICONFLOW_API_KEY=
# Get your API key from: https://platform.openai.com/api-keys
OPENAI_API_KEY=

# You can set any OpenAI-compatitble API KEY, but you neeed to configure python/configs/providers/openai-compatible.yaml manully.
OPENAI_COMPATIBLE_API_KEY=
OPENAI_COMPATIBLE_BASE_URL=


# ============================================
# Research Agent Configurations
Expand Down
4 changes: 4 additions & 0 deletions python/configs/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ models:
openai:
config_file: "providers/openai.yaml"
api_key_env: "OPENAI_API_KEY"

openai-compatible:
config_file: "providers/openai-compatible.yaml"
api_key_env: "OPENAI_COMPATIBLE_API_KEY"

# Agent Configuration
agents:
Expand Down
72 changes: 72 additions & 0 deletions python/configs/providers/openai-compatible.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# ============================================
# OpenAI Compatible API Provider Configuration
# ============================================
# This configuration supports any OpenAI-compatible API including:

#
# Usage:
# 1. Set OPENAI_API_BASE environment variable to your API endpoint
# 2. Set OPENAI_API_KEY if your service requires authentication
# 3. Set PRIMARY_PROVIDER=openai-compatible
#
# Examples:
#
# # vLLM on remote server
# export OPENAI_API_BASE=http://your-vllm-server:8000/v1
# export PRIMARY_PROVIDER=openai-compatible
#
# # Together.ai
# export OPENAI_API_BASE=https://api.together.xyz/v1
# export OPENAI_API_KEY=your-together-api-key
# export PRIMARY_PROVIDER=openai-compatible

name: "OpenAI-Compatible"
provider_type: "openai-compatible"

enabled: true

# Connection Configuration
connection:
# Use environment variable with fallback to local Ollama
base_url: "${OPENAI_COMPATIBLE_BASE_URL:http://localhost:11434/v1}"
api_key_env: "OPENAI_COMPATIBLE_API_KEY"

# Default model if agents.yaml specified
# Example: qwen3-max
default_model: "qwen3-max"

# Model Parameters Defaults
defaults:
temperature: 0.7
max_tokens: 4096

# Available Models (examples - adjust based on your provider model id)
models:
- id: "qwen3-max"
name: "Qwen3 Max"
context_length: 256000
description: "Qwen3 Max model"


# ============================================
# Embedding Models Configuration
# ============================================
# Note: Embedding support depends on your provider

# Example: text-embedding-v4
embedding:
# Default embedding model
default_model: "text-embedding-v4"

# Default parameters
defaults:
dimensions: 2048
encoding_format: "float"

# Available embedding models
models:
- id: "text-embedding-v4"
name: "Text Embedding V4"
dimensions: 2048
max_input: 8192
description: "Text Embedding V4 model"
72 changes: 72 additions & 0 deletions python/valuecell/adapters/models/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,7 @@ def create_model(self, model_id: Optional[str] = None, **kwargs):
return OpenAIChat(
id=model_id,
api_key=self.config.api_key,
base_url=self.config.base_url,
temperature=params.get("temperature"),
max_tokens=params.get("max_tokens"),
top_p=params.get("top_p"),
Expand Down Expand Up @@ -302,13 +303,83 @@ def create_embedder(self, model_id: Optional[str] = None, **kwargs):
return OpenAIEmbedder(
id=model_id,
api_key=self.config.api_key,
base_url=self.config.base_url,
dimensions=int(params.get("dimensions", 1536))
if params.get("dimensions")
else None,
encoding_format=params.get("encoding_format", "float"),
)


class OpenAICompatibleProvider(ModelProvider):
"""OpenAI-compatible model provider with role compatibility handling

This provider handles OpenAI-compatible APIs (like DashScope, vLLM, etc.) that may not
support newer OpenAI features like the 'developer' role. It wraps the OpenAIChat model
and converts incompatible roles to compatible ones.
"""

def create_model(self, model_id: Optional[str] = None, **kwargs):
"""Create OpenAI-compatible model via agno with role compatibility"""
try:
from agno.models.openai import OpenAILike
except ImportError:
raise ImportError(
"agno package not installed. Install with: pip install agno"
)

model_id = model_id or self.config.default_model
params = {**self.config.parameters, **kwargs}

logger.info(
f"Creating OpenAI-compatible model: {model_id} (base_url: {self.config.base_url})"
)

# Create the base OpenAILike model
return OpenAILike(
id=model_id,
api_key=self.config.api_key,
base_url=self.config.base_url,
temperature=params.get("temperature"),
max_tokens=params.get("max_tokens"),
top_p=params.get("top_p"),
frequency_penalty=params.get("frequency_penalty"),
presence_penalty=params.get("presence_penalty"),
)

def create_embedder(self, model_id: Optional[str] = None, **kwargs):
"""Create embedder via OpenAI-compatible API"""
try:
from agno.knowledge.embedder.openai import OpenAIEmbedder
except ImportError:
raise ImportError("agno package not installed")

# Use provided model_id or default embedding model
model_id = model_id or self.config.default_embedding_model

if not model_id:
raise ValueError(
f"No embedding model specified for provider '{self.config.name}'"
)

# Merge parameters: provider embedding defaults < kwargs
params = {**self.config.embedding_parameters, **kwargs}

logger.info(f"Creating OpenAI-compatible embedder: {model_id}")

return OpenAIEmbedder(
id=model_id,
api_key=self.config.api_key,
base_url=self.config.base_url,
dimensions=int(params.get("dimensions", 1024)),
encoding_format=params.get("encoding_format"),
)

def is_available(self) -> bool:
"""Check if provider is available (needs both API key and base URL)"""
return bool(self.config.api_key and self.config.base_url)


class ModelFactory:
"""
Factory for creating model instances with provider abstraction
Expand All @@ -327,6 +398,7 @@ class ModelFactory:
"azure": AzureProvider,
"siliconflow": SiliconFlowProvider,
"openai": OpenAIProvider,
"openai-compatible": OpenAICompatibleProvider,
}

def __init__(self, config_manager: Optional[ConfigManager] = None):
Expand Down
1 change: 1 addition & 0 deletions python/valuecell/config/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ def primary_provider(self) -> str:
"siliconflow",
"google",
"openai",
"openai-compatible",
]

for preferred in preferred_order:
Expand Down