Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 20 additions & 17 deletions frontend/src/app/setting/components/models/model-detail.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,8 @@ export function ModelDetail({ provider }: ModelDetailProps) {
settingDefaultModel ||
settingDefaultProvider;

const isDashScope = (provider ?? "").toLowerCase() === "dashscope";

if (detailLoading) {
return (
<div className="text-gray-400 text-sm">Loading provider details...</div>
Expand Down Expand Up @@ -216,23 +218,24 @@ export function ModelDetail({ provider }: ModelDetailProps) {
)}
</configForm.Field>

{/* API Host section */}
<configForm.Field name="base_url">
{(field) => (
<Field className="text-gray-950">
<FieldLabel className="font-medium text-base">
API Host
</FieldLabel>
<Input
placeholder={providerDetail.base_url}
value={field.state.value}
onChange={(e) => field.handleChange(e.target.value)}
onBlur={() => configForm.handleSubmit()}
/>
<FieldError errors={field.state.meta.errors} />
</Field>
)}
</configForm.Field>
{!isDashScope && (
<configForm.Field name="base_url">
{(field) => (
<Field className="text-gray-950">
<FieldLabel className="font-medium text-base">
API Host
</FieldLabel>
<Input
placeholder={providerDetail.base_url}
value={field.state.value}
onChange={(e) => field.handleChange(e.target.value)}
onBlur={() => configForm.handleSubmit()}
/>
<FieldError errors={field.state.meta.errors} />
</Field>
)}
</configForm.Field>
)}
</FieldGroup>

{/* Models section */}
Expand Down
69 changes: 37 additions & 32 deletions python/configs/providers/dashscope.yaml
Original file line number Diff line number Diff line change
@@ -1,85 +1,90 @@
# ============================================
# DashScope Provider Configuration
# ============================================
# DashScope (Alibaba Cloud Bailian) exposes an OpenAI-compatible API for Qwen3 models.
# Configure the API key via DASHSCOPE_API_KEY or override using env vars at runtime.
name: "DashScope"
provider_type: "dashscope"

name: DashScope
provider_type: dashscope
enabled: true
enabled: true # Default is true if not specified

# Connection parameters for DashScope compatible-mode endpoint.
# Connection Configuration
connection:
base_url: https://dashscope.aliyuncs.com/compatible-mode/v1
api_key_env: DASHSCOPE_API_KEY
# DashScope (Alibaba Cloud Bailian) native HTTP uses fixed official host.
# base_url is not required; leave empty unless using a proxy.
api_key_env: "DASHSCOPE_API_KEY"
base_url: ""

# Default chat model used when no model_id is specified.
default_model: qwen3-max
# Default chat model if none specified
default_model: "qwen3-max"

# Global default inference parameters.
# Model Parameters Defaults
defaults:
temperature: 0.7
max_tokens: 16384

# Commonly used Qwen3 models available via DashScope.
# Available Models
models:
- id: qwen3-max
name: Qwen3 Max
- id: "qwen3-max"
name: "Qwen3 Max"
context_length: 256000
max_output_tokens: 16384
description: Qwen3 Max model with strongest performance
description: "Qwen3 Max model with strongest performance"
supported_inputs:
- text
supported_outputs:
- text

- id: qwen3-max-preview
name: Qwen3 Max Preview
- id: "qwen3-max-preview"
name: "Qwen3 Max Preview"
context_length: 256000
max_output_tokens: 16384
description: Qwen3 Max preview model
description: "Qwen3 Max preview model"
supported_inputs:
- text
supported_outputs:
- text

- id: qwen-plus
name: Qwen Plus
- id: "qwen-plus"
name: "Qwen Plus"
context_length: 256000
max_output_tokens: 16384
description: Qwen Plus model with balanced performance
description: "Qwen Plus model with balanced performance"
supported_inputs:
- text
supported_outputs:
- text

- id: qwen-flash
name: Qwen Flash
- id: "qwen-flash"
name: "Qwen Flash"
context_length: 256000
max_output_tokens: 16384
description: Qwen Flash model optimized for fast response
description: "Qwen Flash model optimized for fast response"
supported_inputs:
- text
supported_outputs:
- text

# Embedding configuration for DashScope text embedding models.
# ============================================
# Embedding Models Configuration
# ============================================
embedding:
default_model: text-embedding-v4
# Default embedding model
default_model: "text-embedding-v4"

# Default parameters
defaults:
dimensions: 2048
encoding_format: "float"

# Available embedding models
models:
- id: text-embedding-v4
name: Text Embedding V4
- id: "text-embedding-v4"
name: "Text Embedding V4"
dimensions: 2048
max_input: 8192
description: DashScope text embedding v4 model (latest)
description: "DashScope text embedding v4 model (latest)"

- id: text-embedding-v3
name: Text Embedding V3
- id: "text-embedding-v3"
name: "Text Embedding V3"
dimensions: 1024
max_input: 8192
description: DashScope text embedding v3 model
description: "DashScope text embedding v3 model"
4 changes: 4 additions & 0 deletions python/valuecell/adapters/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
OpenAIProvider,
OpenRouterProvider,
SiliconFlowProvider,
create_embedder,
create_embedder_for_agent,
create_model,
create_model_for_agent,
get_model_factory,
Expand All @@ -52,4 +54,6 @@
# Convenience functions
"create_model",
"create_model_for_agent",
"create_embedder",
"create_embedder_for_agent",
]
Loading