Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
0d45248
Previews
ChidcGithub Jan 31, 2026
7752d4c
Remove outdated API and performance sections from README
ChidcGithub Jan 31, 2026
5de751e
更新 README.md
ChidcGithub Jan 31, 2026
f78a118
更新 README.md
ChidcGithub Jan 31, 2026
80b50ab
更新 README.md
ChidcGithub Jan 31, 2026
dc15e40
更新 README.md
ChidcGithub Jan 31, 2026
107f12c
更新 README.md
ChidcGithub Jan 31, 2026
ce80fea
更新 README.md
ChidcGithub Jan 31, 2026
7073a5a
更新 README.md
ChidcGithub Jan 31, 2026
71d5695
添加冒号后面的空格
ChidcGithub Jan 31, 2026
f541e0c
更新 README.md
ChidcGithub Jan 31, 2026
b34c6c1
更新 README.md
ChidcGithub Jan 31, 2026
48cd012
更新 README.md
ChidcGithub Jan 31, 2026
da8d5e8
更新 README.md
ChidcGithub Jan 31, 2026
2fe92c1
更新 README.md
ChidcGithub Jan 31, 2026
1d02c06
更新 README.md
ChidcGithub Jan 31, 2026
8894335
更新 README.md
ChidcGithub Jan 31, 2026
6a9429c
更新 README.md
ChidcGithub Jan 31, 2026
ac39018
更新 README.md
ChidcGithub Jan 31, 2026
9fe1e19
更新 README.md
ChidcGithub Jan 31, 2026
ca970c4
更新 README.md
ChidcGithub Jan 31, 2026
19b8bf3
更新 README.md
ChidcGithub Jan 31, 2026
02f594d
更新 README.md
ChidcGithub Jan 31, 2026
dbb3591
更新 README.md
ChidcGithub Jan 31, 2026
cbd2255
更新 README.md
ChidcGithub Jan 31, 2026
38a5d57
更新 README.md
ChidcGithub Feb 1, 2026
49e08b8
更新 README.md
ChidcGithub Feb 1, 2026
00f7e16
Update fmt.Println message from 'Hello' to 'Goodbye'
ChidcGithub Feb 2, 2026
6050803
更新 README.md
ChidcGithub Feb 3, 2026
1485a12
Update README with compatibility and project details
ChidcGithub Feb 3, 2026
0ced2eb
更新 README.md 进度
ChidcGithub Feb 5, 2026
893ff20
更新 README.md
ChidcGithub Feb 5, 2026
7483010
Add important information
ChidcGithub Feb 5, 2026
3470632
Update README.MD Version Code
ChidcGithub Feb 5, 2026
4c53651
Fix README.md
ChidcGithub Feb 5, 2026
2b16719
更新 README.md
ChidcGithub Feb 7, 2026
a7c2257
更新 README.md
ChidcGithub Feb 7, 2026
2eb1d38
更新 README.md
ChidcGithub Feb 7, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2,221 changes: 128 additions & 2,093 deletions README.md

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions Start.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -105,10 +105,10 @@ Write-Host "系统信息" -ForegroundColor Cyan
Write-Host "==============================================================================================" -ForegroundColor Cyan
Write-Host ""
Write-Host "支持模式:" -ForegroundColor Yellow
Write-Host " [OK] NVIDIA GPU (CUDA)" -ForegroundColor Green
Write-Host " [OK] AMD GPU (ROCm)" -ForegroundColor Green
Write-Host " [OK] Intel GPU (CPU 回退)" -ForegroundColor Green
Write-Host " [OK] CPU 模式" -ForegroundColor Green
Write-Host "NVIDIA GPU (CUDA)" -ForegroundColor Green
Write-Host "AMD GPU (ROCm)" -ForegroundColor Green
Write-Host "Intel GPU (CPU 回退)" -ForegroundColor Green
Write-Host "CPU 模式" -ForegroundColor Green
Write-Host ""
Write-Host "==============================================================================================" -ForegroundColor Cyan
Write-Host ""
Expand All @@ -118,7 +118,7 @@ Write-Host ""
Write-Host "==============================================================================================" -ForegroundColor Cyan
Write-Host ""

& $pythonPath "app.py" --enable-auto-tune
& $pythonPath "app.py" --enable-auto-tune --config config/config.yaml

# 错误处理
if ($LASTEXITCODE -ne 0) {
Expand Down
848 changes: 827 additions & 21 deletions app.py

Large diffs are not rendered by default.

91 changes: 33 additions & 58 deletions config.yaml
Original file line number Diff line number Diff line change
@@ -1,58 +1,33 @@
# MLSharp-3D-Maker 配置文件
# 支持的格式: YAML

# 服务配置
server:
host: "127.0.0.1" # 服务主机地址
port: 8000 # 服务端口

# 启动模式
mode: "auto" # 启动模式: auto, gpu, cpu, nvidia, amd

# 浏览器配置
browser:
auto_open: true # 自动打开浏览器

# GPU 优化配置
gpu:
enable_amp: true # 启用混合精度推理 (AMP)
enable_cudnn_benchmark: true # 启用 cuDNN Benchmark
enable_tf32: true # 启用 TensorFloat32

# 日志配置
logging:
level: "INFO" # 日志级别: DEBUG, INFO, WARNING, ERROR
console: true # 控制台输出
file: false # 文件输出

# 模型配置
model:
checkpoint: "model_assets/sharp_2572gikvuh.pt" # 模型权重路径
temp_dir: "temp_workspace" # 临时工作目录

# 推理配置
inference:
input_size: [1536, 1536] # 输入图像尺寸 [宽度, 高度] (默认: 1536x1536)

# 优化配置
optimization:
gradient_checkpointing: false # 启用梯度检查点(减少显存占用,但会略微降低推理速度)
checkpoint_segments: 3 # 梯度检查点分段数(暂未使用)

# 缓存配置
cache:
enabled: true # 启用推理缓存(默认:启用)
size: 100 # 缓存最大条目数(默认:100)

# 监控配置
monitoring:
enabled: true # 启用监控
enable_gpu: true # 启用 GPU 监控
metrics_path: "/metrics" # Prometheus 指标端点路径

# 性能配置
performance:
max_workers: 4 # 最大工作线程数
max_concurrency: 10 # 最大并发数
timeout_keep_alive: 30 # 保持连接超时(秒)
max_requests: 1000 # 最大请求数
browser:
auto_open: true
cache:
enabled: true
size: 100
gpu:
enable_amp: true
enable_cudnn_benchmark: true
enable_tf32: true
inference:
input_size:
- 1536
- 1536
mode: auto
monitoring:
enable_gpu: true
enabled: true
metrics_path: /metrics
performance_cache:
best_config:
amp: false
cudnn_benchmark: false
description: 仅启用 TensorFloat32
name: 仅 TF32
tf32: true
gpu:
compute_capability: 89
name: NVIDIA GeForce RTX 4060 Laptop GPU
vendor: NVIDIA
last_test: '2026-01-31T04:59:43.901644+00:00'
server:
host: 127.0.0.1
port: 8000
44 changes: 43 additions & 1 deletion gpu_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,18 +68,55 @@ def check_rocm_available():
except Exception:
return False

def check_adreno_available():
"""检查 Adreno (Snapdragon) GPU 是否可用"""
try:
import torch
# Snapdragon GPU 通常通过 OpenCL/Vulkan,而不是 CUDA
# 检查是否为 Android 环境
if hasattr(torch, 'backends') and hasattr(torch.backends, 'opencl'):
if torch.backends.opencl.is_available():
return True
# 检查是否有 qnn 或 snpe 相关模块
try:
import importlib
if importlib.util.find_spec('qnn') or importlib.util.find_spec('snpe'):
return True
except:
pass
return False
except Exception:
return False


def get_gpu_info():
"""获取 GPU 详细信息"""
try:
import torch
if not torch.cuda.is_available():
# 检查是否为 Snapdragon GPU (通过 OpenCL)
if check_adreno_available():
return {
'name': 'Snapdragon Adreno GPU',
'count': 1,
'cuda_version': None,
'is_rocm': False,
'is_adreno': True,
'vendor': 'Qualcomm',
'compute_capability': 0,
'major': 0,
'minor': 0,
'memory_gb': 0,
'multi_processor_count': 0,
}
return None

gpu_info = {
'name': torch.cuda.get_device_name(0),
'count': torch.cuda.device_count(),
'cuda_version': torch.version.cuda,
'is_rocm': check_rocm_available(),
'is_adreno': False,
}

props = torch.cuda.get_device_properties(0)
Expand All @@ -101,11 +138,16 @@ def get_gpu_vendor(gpu_name=None):
gpu_info = get_gpu_info()
if gpu_info:
gpu_name = gpu_info.get('name', '')
# 直接从 gpu_info 检查 is_adreno 标记
if gpu_info.get('is_adreno'):
return 'Qualcomm'
else:
return 'Unknown'

name_lower = gpu_name.lower()
if 'nvidia' in name_lower or 'geforce' in name_lower or 'quadro' in name_lower or 'tesla' in name_lower or 'rtx' in name_lower or 'gtx' in name_lower:
if 'snapdragon' in name_lower or 'adreno' in name_lower or 'qualcomm' in name_lower:
return 'Qualcomm'
elif 'nvidia' in name_lower or 'geforce' in name_lower or 'quadro' in name_lower or 'tesla' in name_lower or 'rtx' in name_lower or 'gtx' in name_lower:
return 'NVIDIA'
elif 'amd' in name_lower or 'radeon' in name_lower or 'rx' in name_lower:
return 'AMD'
Expand Down
11 changes: 11 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,21 @@
torch>=2.0.0
torchvision>=0.15.0

# ONNX Runtime for GPU Acceleration
onnxruntime-gpu>=1.16.0; platform_system == "Windows"
onnxruntime>=1.16.0; platform_system != "Windows"

# FastAPI and Server
fastapi>=0.128.0
uvicorn[standard]>=0.40.0
python-multipart>=0.0.21
pydantic>=2.0.0

# Caching
redis>=5.0.0

# Webhook and HTTP
httpx>=0.25.0

# 3D Gaussian Splatting
sharp>=0.1.0
Expand Down
4 changes: 2 additions & 2 deletions viewer.html
Original file line number Diff line number Diff line change
Expand Up @@ -1404,7 +1404,7 @@ <h1>3DGS.ART</h1>
formData.append('file', file);

try {
const response = await fetch('/api/predict', {
const response = await fetch('/v1/predict', {
method: 'POST',
body: formData
});
Expand All @@ -1425,7 +1425,7 @@ <h1>3DGS.ART</h1>
} catch (err) {
clearInterval(timer);
console.error(err);
alert("Preview Mode: Backend not connected.\nUse '?url=...' to load external models.");
alert("Error: " + err.message + "\n\nPreview Mode: Backend not connected.\nUse '?url=...' to load external models.");
loadingSys.stop();
}
}
Expand Down