From ff2ac085316d98cb96195e183b77697900a6c247 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Sep 2024 14:53:23 +0300 Subject: [PATCH 1/9] CHINESE-TRANSLATION-1 --- LaunchFile/app.py | 17 +- Start.bat | 2 +- Start.sh | 2 +- translations/zh.json | 624 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 639 insertions(+), 6 deletions(-) create mode 100644 translations/zh.json diff --git a/LaunchFile/app.py b/LaunchFile/app.py index 9762e318..73938c60 100644 --- a/LaunchFile/app.py +++ b/LaunchFile/app.py @@ -330,7 +330,8 @@ def load_translation(lang): translations = { "EN": {}, - "RU": load_translation("ru") + "RU": load_translation("ru"), + "ZH": load_translation("zh") } def _(text, lang="EN"): @@ -10233,8 +10234,16 @@ def reload_interface(): wiki_interface = gr.Interface( fn=get_wiki_content, inputs=[ - gr.Textbox(label=_("Online Wiki", lang), value="https://github.com/Dartvauder/NeuroSandboxWebUI/wiki/EN‐Wiki" if lang == "EN" else "https://github.com/Dartvauder/NeuroSandboxWebUI/wiki/RU‐Wiki", interactive=False), - gr.Textbox(label=_("Local Wiki", lang), value="Wikies/WikiEN.md" if lang == "EN" else "Wikies/WikiRU.md", interactive=False) + gr.Textbox(label=_("Online Wiki", lang), value=( + "https://github.com/Dartvauder/NeuroSandboxWebUI/wiki/EN‐Wiki" if lang == "EN" else + "https://github.com/Dartvauder/NeuroSandboxWebUI/wiki/ZH‐Wiki" if lang == "ZH" else + "https://github.com/Dartvauder/NeuroSandboxWebUI/wiki/RU‐Wiki" + ), interactive=False), + gr.Textbox(label=_("Local Wiki", lang), value=( + "Wikies/WikiEN.md" if lang == "EN" else + "Wikies/WikiZH.md" if lang == "ZH" else + "Wikies/WikiRU.md" + ), interactive=False) ], outputs=gr.HTML(label=_("Wiki Content", lang)), title=_("NeuroSandboxWebUI - Wiki", lang), @@ -10289,7 +10298,7 @@ def reload_interface(): settings_interface = gr.Interface( fn=settings_interface, inputs=[ - gr.Radio(choices=["EN", "RU"], label=_("Language", lang), value=settings['language']), + gr.Radio(choices=["EN", "RU", "ZH"], label=_("Language", lang), value=settings['language']), gr.Radio(choices=["True", "False"], label=_("Share Mode", lang), value="False"), gr.Radio(choices=["True", "False"], label=_("Debug Mode", lang), value="False"), gr.Radio(choices=["True", "False"], label=_("Monitoring Mode", lang), value="False"), diff --git a/Start.bat b/Start.bat index ae6c57e3..5d8577ef 100644 --- a/Start.bat +++ b/Start.bat @@ -25,7 +25,7 @@ echo Logging in to Hugging Face... huggingface-cli login --token %HF_TOKEN% --add-to-git-credential cls -echo Launching app.py... +echo Launching NeuroSandboxWebUI... start /b py -c "import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__name__), 'LaunchFile')); import app" call "%CURRENT_DIR%venv\Scripts\deactivate.bat" \ No newline at end of file diff --git a/Start.sh b/Start.sh index fa723546..07eef4a4 100644 --- a/Start.sh +++ b/Start.sh @@ -28,7 +28,7 @@ echo "Logging in to Hugging Face..." huggingface-cli login --token "$HF_TOKEN" --add-to-git-credential clear -echo "Launching app.py..." +echo "Launching NeuroSandboxWebUI..." python -c "import os, sys; sys.path.insert(0, os.path.join('$(dirname "${BASH_SOURCE[0]}")', 'LaunchFile')); import app" & deactivate \ No newline at end of file diff --git a/translations/zh.json b/translations/zh.json new file mode 100644 index 00000000..0be01067 --- /dev/null +++ b/translations/zh.json @@ -0,0 +1,624 @@ +{ + "Enter your request": "输入您的请求", + "Enter your system prompt": "输入您的系统提示", + "Record your request (optional)": "记录您的请求(可选)", + "Select LLM model": "选择LLM模型", + "Select LoRA model (optional)": "选择LoRA模型(可选)", + "LLM and TTS Settings": "LLM和TTS设置", + "TTS and STT Settings": "TTS和STT设置", + "SeamlessM4Tv2 Settings": "SeamlessM4Tv2设置", + "Select TTS output format": "选择TTS输出格式", + "Select STT output format": "选择STT输出格式", + "Enable WebSearch": "启用网络搜索", + "Enable LibreTranslate": "启用LibreTranslate", + "Select target language": "选择目标语言", + "Enable OpenParse": "启用OpenParse", + "Upload PDF file (for OpenParse)": "上传PDF文件(用于OpenParse)", + "Enable Multimodal": "启用多模态", + "Upload your image (for Multimodal)": "上传您的图片(用于多模态)", + "Enable TTS": "启用TTS", + "

LLM Settings

": "LLM设置", + "Max length (for transformers type models)": "最大长度(用于transformers类型模型)", + "Max tokens (for llama type models)": "最大令牌数(用于llama类型模型)", + "Temperature": "温度", + "Top P": "Top P", + "Select chat history format": "选择聊天历史格式", + "

TTS Settings

": "TTS设置", + "Select voice": "选择声音", + "Select language": "选择语言", + "TTS Temperature": "TTS温度", + "TTS Top P": "TTS Top P", + "TTS Top K": "TTS Top K", + "TTS Speed": "TTS速度", + "Select output format": "选择输出格式", + "LLM text response": "LLM文本回复", + "LLM audio response": "LLM音频回复", + "NeuroSandboxWebUI - LLM": "NeuroSandboxWebUI - LLM", + "This user interface allows you to enter any text or audio and receive generated response. You can select the LLM model, avatar, voice and language for tts from the drop-down lists. You can also customize the model settings from the sliders. Try it and see what happens!": "此用户界面允许您输入任何文本或音频并接收生成的回复。您可以从下拉列表中选择LLM模型、头像、声音和TTS的语言。您还可以使用滑块自定义模型设置。试试看会发生什么!", + "Stop": "停止", + "Generate": "生成", + "Enter text for TTS": "输入TTS文本", + "Record audio for STT": "录制STT音频", + "TTS Audio": "TTS音频", + "STT Text": "STT文本", + "NeuroSandboxWebUI - TTS-STT": "NeuroSandboxWebUI - TTS-STT", + "This user interface allows you to enter text for Text-to-Speech(CoquiTTS) and record audio for Speech-to-Text(OpenAIWhisper). For TTS, you can select the voice and language, and customize the generation settings from the sliders. For STT, simply record your audio and the spoken text will be displayed. Try it and see what happens!": "此用户界面允许您输入文本用于文本到语音转换(CoquiTTS)和录制音频用于语音到文本转换(OpenAIWhisper)。对于TTS,您可以选择声音和语言,并使用滑块自定义生成设置。对于STT,只需录制您的音频,spoken文本将被显示。试试看会发生什么!", + "Enter text to synthesize": "输入要合成的文本", + "Synthesized speech": "合成的语音", + "Message": "消息", + "NeuroSandboxWebUI - MMS Text-to-Speech": "NeuroSandboxWebUI - MMS文本到语音", + "Generate speech from text using MMS TTS models.": "使用MMS TTS模型从文本生成语音。", + "Upload or record audio": "上传或录制音频", + "Transcription": "转录", + "NeuroSandboxWebUI - MMS Speech-to-Text": "NeuroSandboxWebUI - MMS语音到文本", + "Transcribe speech to text using MMS STT model.": "使用MMS STT模型将语音转录为文本。", + "Text-to-Speech": "文本到语音", + "Speech-to-Text": "语音到文本", + "Input Type": "输入类型", + "Input Text": "输入文本", + "Input Audio": "输入音频", + "Select source language": "选择源语言", + "Source Language": "源语言", + "Target Language": "目标语言", + "Dataset Language": "数据集语言", + "Enable Speech Generation": "启用语音生成", + "Speaker ID": "说话人ID", + "Text Num Beams": "文本束搜索数", + "Enable Text Sampling": "启用文本采样", + "Enable Speech Sampling": "启用语音采样", + "Speech Temperature": "语音温度", + "Text Temperature": "文本温度", + "Enable Both Generation": "启用两种生成", + "Task Type": "任务类型", + "Text Output Format": "文本输出格式", + "Audio Output Format": "音频输出格式", + "Generated Text": "生成的文本", + "Generated Audio": "生成的音频", + "NeuroSandboxWebUI - SeamlessM4Tv2": "NeuroSandboxWebUI - SeamlessM4Tv2", + "This interface allows you to use the SeamlessM4Tv2 model for various translation and speech tasks.": "此界面允许您使用SeamlessM4Tv2模型进行各种翻译和语音任务。", + "Translate": "翻译", + "Enter text to translate": "输入要翻译的文本", + "Enable translate history save": "启用翻译历史保存", + "Select translate history format": "选择翻译历史格式", + "Upload text file (optional)": "上传文本文件(可选)", + "Translated text": "翻译后的文本", + "Additional LibreTranslate Settings": "额外的LibreTranslate设置", + "NeuroSandboxWebUI - LibreTranslate": "NeuroSandboxWebUI - LibreTranslate", + "This user interface allows you to enter text and translate it using LibreTranslate. Select the source and target languages and click Submit to get the translation. Try it and see what happens!": "此用户界面允许您输入文本并使用LibreTranslate进行翻译。选择源语言和目标语言,然后点击提交以获取翻译。试试看会发生什么!", + "Enter your prompt": "输入您的提示", + "Enter your negative prompt": "输入您的负面提示", + "Select StableDiffusion model": "选择StableDiffusion模型", + "Select VAE model (optional)": "选择VAE模型(可选)", + "Select LORA models (optional)": "选择LORA模型(可选)", + "LoRA Scales": "LoRA比例", + "Select Embedding models (optional)": "选择嵌入模型(可选)", + "

StableDiffusion Settings

": "StableDiffusion设置", + "StableDiffusion Settings": "StableDiffusion设置", + "Strength": "强度", + "Additional StableDiffusion Settings": "额外的StableDiffusion设置", + "Select scheduler": "选择调度器", + "Steps": "步骤", + "CFG": "CFG", + "Width": "宽度", + "Height": "高度", + "Clip skip": "Clip跳过", + "Number of images to generate": "要生成的图像数量", + "Seed (optional)": "种子(可选)", + "Stop generation": "停止生成", + "Enable FreeU": "启用FreeU", + "FreeU-S1": "FreeU-S1", + "FreeU-S2": "FreeU-S2", + "FreeU-B1": "FreeU-B1", + "FreeU-B2": "FreeU-B2", + "Enable SAG": "启用SAG", + "SAG Scale": "SAG比例", + "Enable PAG": "启用PAG", + "PAG Scale": "PAG比例", + "Enable Token Merging": "启用令牌合并", + "Token Merging Ratio": "令牌合并比率", + "Enable DeepCache": "启用DeepCache", + "DeepCache Interval": "DeepCache间隔", + "DeepCache BranchID": "DeepCache分支ID", + "Enable T-GATE": "启用T-GATE", + "T-GATE steps": "T-GATE步骤", + "Enable MagicPrompt": "启用MagicPrompt", + "MagicPrompt Max New Tokens": "MagicPrompt最大新令牌数", + "Generated images": "生成的图像", + "Generation process": "生成过程", + "NeuroSandboxWebUI - StableDiffusion (txt2img)": "NeuroSandboxWebUI - StableDiffusion(文本到图像)", + "This user interface allows you to enter any text and generate images using StableDiffusion. You can select the model and customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入任何文本并使用StableDiffusion生成图像。您可以选择模型并使用滑块自定义生成设置。试试看会发生什么!", + "Initial image": "初始图像", + "Strength (Initial image)": "强度(初始图像)", + "NeuroSandboxWebUI - StableDiffusion (img2img)": "NeuroSandboxWebUI - StableDiffusion(图像到图像)", + "This user interface allows you to enter any text and image to generate new images using StableDiffusion. You can select the model and customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入任何文本和图像,使用StableDiffusion生成新图像。您可以选择模型并使用滑块自定义生成设置。试试看会发生什么!", + "NeuroSandboxWebUI - StableDiffusion (depth2img)": "NeuroSandboxWebUI - StableDiffusion(深度到图像)", + "This user interface allows you to enter a prompt, an initial image to generate depth-aware images using StableDiffusion. Try it and see what happens!": "此用户界面允许您输入提示和初始图像,使用StableDiffusion生成深度感知图像。试试看会发生什么!", + "Input image": "输入图像", + "Num inference steps": "推理步骤数", + "Ensemble size": "集成大小", + "Depth image": "深度图像", + "Normals image": "法线图像", + "NeuroSandboxWebUI - Marigold": "NeuroSandboxWebUI - Marigold", + "This interface allows you to generate depth and normal maps using Marigold models. Upload an image and adjust the inference steps and ensemble size. The model will generate both depth and normal maps.": "此界面允许您使用Marigold模型生成深度和法线贴图。上传图像并调整推理步骤和集成大小。模型将生成深度和法线贴图。", + "NeuroSandboxWebUI - StableDiffusion (pix2pix)": "NeuroSandboxWebUI - StableDiffusion(像素到像素)", + "This user interface allows you to enter a prompt and an initial image to generate new images using Pix2Pix. You can customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入提示和初始图像,使用Pix2Pix生成新图像。您可以使用滑块自定义生成设置。试试看会发生什么!", + "Select ControlNet model": "选择ControlNet模型", + "ControlNet conditioning scale": "ControlNet条件缩放", + "NeuroSandboxWebUI - StableDiffusion (controlnet)": "NeuroSandboxWebUI - StableDiffusion(ControlNet)", + "This user interface allows you to generate images using ControlNet models. Upload an initial image, enter a prompt, select a Stable Diffusion model, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用ControlNet模型生成图像。上传初始图像,输入提示,选择Stable Diffusion模型,并自定义生成设置。试试看会发生什么!", + "Prompt (optional)": "提示(可选)", + "Upscale size": "放大尺寸", + "Upscale-latent Settings": "潜在空间放大设置", + "Pix2Pix Settings": "Pix2Pix设置", + "Upscale-SUPIR Settings": "SUPIR放大设置", + "Inpaint Settings": "修复设置", + "Outpaint Settings": "扩绘设置", + "DiffEdit Settings": "DiffEdit设置", + "SD-Video Settings": "SD-Video设置", + "LDM3D Settings": "LDM3D设置", + "StableDiffusion3 Settings": "StableDiffusion3设置", + "ControlNet control images": "ControlNet控制图像", + "StableCascade Settings": "StableCascade设置", + "Kandinsky Settings": "Kandinsky设置", + "Generated image": "生成的图像", + "Generated image (Stage I)": "生成的图像(第一阶段)", + "Generated image (Stage II)": "生成的图像(第二阶段)", + "Generated image (Stage III)": "生成的图像(第三阶段)", + "Max Sequence Length": "最大序列长度", + "Wav2Lip Settings": "Wav2Lip设置", + "Pads": "填充", + "Strength (Video to enhance)": "强度(要增强的视频)", + "StableFast3D Settings": "StableFast3D设置", + "Initial image (optional)": "初始图像(可选)", + "Inference steps": "推理步骤", + "AudioCraft Settings": "AudioCraft设置", + "NeuroSandboxWebUI - StableDiffusion (upscale-latent)": "NeuroSandboxWebUI - StableDiffusion(潜在空间放大)", + "This user interface allows you to upload an image and latent-upscale it using x2 or x4 upscale factor": "此用户界面允许您上传图像并使用2倍或4倍放大因子在潜在空间中放大它", + "Upscale": "放大", + "SUPIR model": "SUPIR模型", + "Upscale factor": "放大因子", + "Minimum size": "最小尺寸", + "EDM steps": "EDM步骤", + "S Stage1": "S阶段1", + "S Churn": "S扰动", + "S Noise": "S噪声", + "S CFG": "S CFG", + "S Stage2": "S阶段2", + "Prompt": "提示", + "Negative Prompt": "负面提示", + "Color Fix Type": "颜色修复类型", + "Enable Linearly (with sigma)": "启用线性(带sigma)", + "Linear CFG": "线性CFG", + "Linear S Stage2": "线性S阶段2", + "SPT Linear CFG": "SPT线性CFG", + "SPT Linear S Stage2": "SPT线性S阶段2", + "NeuroSandboxWebUI - Upscale (SUPIR)": "NeuroSandboxWebUI - 放大(SUPIR)", + "This user interface allows you to upscale images using SUPIR (Super-Resolution Plugin). Upload an image and customize the upscaling settings. Try it and see what happens!": "此用户界面允许您使用SUPIR(超分辨率插件)放大图像。上传图像并自定义放大设置。试试看会发生什么!", + "Refined image": "精细化图像", + "NeuroSandboxWebUI - SDXL Refiner": "NeuroSandboxWebUI - SDXL精细化器", + "This interface allows you to refine images using the SDXL Refiner model. Enter a prompt, upload an initial image, and see the refined result.": "此界面允许您使用SDXL精细化器模型细化图像。输入提示,上传初始图像,查看精细化结果。", + "Refine": "精细化", + "Mask image": "蒙版图像", + "Mask Blur Factor": "蒙版模糊因子", + "Select Inpaint model": "选择修复模型", + "NeuroSandboxWebUI - StableDiffusion (inpaint)": "NeuroSandboxWebUI - StableDiffusion(修复)", + "This user interface allows you to enter a prompt, an initial image, and a mask image to inpaint using StableDiffusion. You can select the model and customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入提示、初始图像和蒙版图像,使用StableDiffusion进行修复。您可以选择模型并使用滑块自定义生成设置。试试看会发生什么!", + "Outpaint direction": "扩绘方向", + "Expansion percentage": "扩展百分比", + "NeuroSandboxWebUI - StableDiffusion (outpaint)": "NeuroSandboxWebUI - StableDiffusion(扩绘)", + "This user interface allows you to expand an existing image using outpainting with StableDiffusion. Upload an image, enter a prompt, select a model type and direction to expand, and customize the generation settings. The image will be expanded according to the chosen percentage. Try it and see what happens!": "此用户界面允许您使用StableDiffusion的扩绘功能扩展现有图像。上传图像,输入提示,选择模型类型和扩展方向,并自定义生成设置。图像将根据选择的百分比进行扩展。试试看会发生什么!", + "Enter GLIGEN phrases": "输入GLIGEN短语", + "Enter GLIGEN boxes": "输入GLIGEN框", + "NeuroSandboxWebUI - StableDiffusion (gligen)": "NeuroSandboxWebUI - StableDiffusion(GLIGEN)", + "This user interface allows you to generate images using Stable Diffusion and insert objects using GLIGEN. Select the Stable Diffusion model, customize the generation settings, enter a prompt, GLIGEN phrases, and bounding boxes. Try it and see what happens!": "此用户界面允许您使用Stable Diffusion生成图像并使用GLIGEN插入对象。选择Stable Diffusion模型,自定义生成设置,输入提示、GLIGEN短语和边界框。试试看会发生什么!", + "Source Prompt": "源提示", + "Source Negative Prompt": "源负面提示", + "Target Prompt": "目标提示", + "Target Negative Prompt": "目标负面提示", + "Guidance Scale": "引导尺度", + "NeuroSandboxWebUI - StableDiffusion (DiffEdit)": "NeuroSandboxWebUI - StableDiffusion(DiffEdit)", + "This user interface allows you to edit images using DiffEdit. Upload an image, provide source and target prompts, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用DiffEdit编辑图像。上传图像,提供源和目标提示,并自定义生成设置。试试看会发生什么!", + "Conditioning Image": "条件图像", + "Conditioning Subject": "条件主体", + "Target Subject": "目标主体", + "Inference Steps": "推理步骤", + "Output Format": "输出格式", + "Generated Image": "生成的图像", + "NeuroSandboxWebUI - BlipDiffusion": "NeuroSandboxWebUI - BlipDiffusion", + "This interface allows you to generate images using BlipDiffusion. Upload a conditioning image, provide text prompts and subjects, and customize generation parameters.": "此界面允许您使用BlipDiffusion生成图像。上传条件图像,提供文本提示和主体,并自定义生成参数。", + "Initial GIF": "初始GIF", + "Strength (Initial GIF)": "强度(初始GIF)", + "Select Motion LORA (Optional)": "选择运动LORA(可选)", + "Frames": "帧数", + "AnimateDiff Settings": "AnimateDiff设置", + "Generated GIF": "生成的GIF", + "NeuroSandboxWebUI - StableDiffusion (animatediff)": "NeuroSandboxWebUI - StableDiffusion(AnimateDiff)", + "This user interface allows you to enter a prompt and generate animated GIFs using AnimateDiff. You can select the model and customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入提示并使用AnimateDiff生成动画GIF。您可以选择模型并使用滑块自定义生成设置。试试看会发生什么!", + "Video Length (frames)": "视频长度(帧数)", + "Video Duration (seconds)": "视频持续时间(秒)", + "HotShot-XL Settings": "HotShot-XL设置", + "NeuroSandboxWebUI - Hotshot-XL": "NeuroSandboxWebUI - Hotshot-XL", + "This user interface allows you to generate animated GIFs using Hotshot-XL. Enter a prompt and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用Hotshot-XL生成动画GIF。输入提示并自定义生成设置。试试看会发生什么!", + "

SVD Settings (mp4)

": "SVD设置(mp4)", + "Motion Bucket ID": "运动桶ID", + "Noise Augmentation Strength": "噪声增强强度", + "FPS": "帧率", + "Decode Chunk Size": "解码块大小", + "

I2VGen-xl Settings (gif)

": "I2VGen-xl设置(gif)", + "Generated video": "生成的视频", + "NeuroSandboxWebUI - StableDiffusion (video)": "NeuroSandboxWebUI - StableDiffusion(视频)", + "This user interface allows you to enter an initial image and generate a video using StableVideoDiffusion(mp4) and I2VGen-xl(gif). You can customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入初始图像并使用StableVideoDiffusion(mp4)和I2VGen-xl(gif)生成视频。您可以使用滑块自定义生成设置。试试看会发生什么!", + "Generated RGBs": "生成的RGB图像", + "Generated Depth images": "生成的深度图像", + "NeuroSandboxWebUI - StableDiffusion (LDM3D)": "NeuroSandboxWebUI - StableDiffusion(LDM3D)", + "This user interface allows you to enter a prompt and generate RGB and Depth images using LDM3D. You can customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入提示并使用LDM3D生成RGB和深度图像。您可以使用滑块自定义生成设置。试试看会发生什么!", + "Max Length": "最大长度", + "NeuroSandboxWebUI - StableDiffusion 3 (txt2img)": "NeuroSandboxWebUI - StableDiffusion 3(文本到图像)", + "This user interface allows you to enter any text and generate images using Stable Diffusion 3. You can customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入任何文本并使用Stable Diffusion 3生成图像。您可以使用滑块自定义生成设置。试试看会发生什么!", + "NeuroSandboxWebUI - StableDiffusion 3 (img2img)": "NeuroSandboxWebUI - StableDiffusion 3(图像到图像)", + "This user interface allows you to enter any text and initial image to generate new images using Stable Diffusion 3. You can customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入任何文本和初始图像,使用Stable Diffusion 3生成新图像。您可以使用滑块自定义生成设置。试试看会发生什么!", + "NeuroSandboxWebUI - StableDiffusion 3 (ControlNet)": "NeuroSandboxWebUI - StableDiffusion 3(ControlNet)", + "This user interface allows you to use ControlNet models with Stable Diffusion 3. You can customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您将ControlNet模型与Stable Diffusion 3一起使用。您可以使用滑块自定义生成设置。试试看会发生什么!", + "NeuroSandboxWebUI - StableDiffusion 3 (Inpaint)": "NeuroSandboxWebUI - StableDiffusion 3(修复)", + "This user interface allows you to perform inpainting using Stable Diffusion 3. You can customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您使用Stable Diffusion 3进行图像修复。您可以使用滑块自定义生成设置。试试看会发生什么!", + "Prior Steps": "先验步骤", + "Prior Guidance Scale": "先验引导尺度", + "Decoder Steps": "解码器步骤", + "Decoder Guidance Scale": "解码器引导尺度", + "NeuroSandboxWebUI - StableDiffusion (cascade)": "NeuroSandboxWebUI - StableDiffusion(级联)", + "This user interface allows you to enter a prompt and generate images using Stable Cascade. You can customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入提示并使用Stable Cascade生成图像。您可以使用滑块自定义生成设置。试试看会发生什么!", + "IP-Adapter Image": "IP-Adapter图像", + "T2I IP-Adapter Settings": "T2I IP-Adapter设置", + "NeuroSandboxWebUI - StableDiffusion (T2I IP-Adapter)": "NeuroSandboxWebUI - StableDiffusion(T2I IP-Adapter)", + "This user interface allows you to generate images using T2I IP-Adapter. Upload an image, enter a prompt, select a Stable Diffusion model, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用T2I IP-Adapter生成图像。上传图像,输入提示,选择Stable Diffusion模型,并自定义生成设置。试试看会发生什么!", + "Face image": "人脸图像", + "Scale (Face image)": "缩放(人脸图像)", + "IP-Adapter FaceID Settings": "IP-Adapter FaceID设置", + "NeuroSandboxWebUI - StableDiffusion (IP-Adapter FaceID)": "NeuroSandboxWebUI - StableDiffusion(IP-Adapter FaceID)", + "This user interface allows you to generate images using IP-Adapter FaceID. Upload a face image, enter a prompt, select a Stable Diffusion model, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用IP-Adapter FaceID生成图像。上传人脸图像,输入提示,选择Stable Diffusion模型,并自定义生成设置。试试看会发生什么!", + "Riffusion Settings": "Riffusion设置", + "NeuroSandboxWebUI - Riffusion (Text-to-Image)": "NeuroSandboxWebUI - Riffusion(文本到图像)", + "Generate a spectrogram image from text using Riffusion.": "使用Riffusion从文本生成频谱图图像。", + "Input spectrogram image": "输入频谱图图像", + "Generated audio": "生成的音频", + "NeuroSandboxWebUI - Riffusion (Image-to-Audio)": "NeuroSandboxWebUI - Riffusion(图像到音频)", + "Convert a spectrogram image to audio using Riffusion.": "使用Riffusion将频谱图图像转换为音频。", + "Convert": "转换", + "Input audio": "输入音频", + "Generated spectrogram image": "生成的频谱图图像", + "NeuroSandboxWebUI - Riffusion (Audio-to-Image)": "NeuroSandboxWebUI - Riffusion(音频到图像)", + "Convert audio to a spectrogram image using Riffusion.": "使用Riffusion将音频转换为频谱图图像。", + "Kandinsky Version": "Kandinsky版本", + "NeuroSandboxWebUI - Kandinsky (txt2img)": "NeuroSandboxWebUI - Kandinsky(文本到图像)", + "This user interface allows you to generate images using Kandinsky models. You can select between versions 2.1, 2.2, and 3, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用Kandinsky模型生成图像。您可以在2.1、2.2和3版本之间选择,并自定义生成设置。试试看会发生什么!", + "NeuroSandboxWebUI - Kandinsky (img2img)": "NeuroSandboxWebUI - Kandinsky(图像到图像)", + "NeuroSandboxWebUI - Kandinsky (inpaint)": "NeuroSandboxWebUI - Kandinsky(修复)", + "This user interface allows you to perform inpainting using Kandinsky models. You can select between versions 2.1 and 2.2, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用Kandinsky模型进行图像修复。您可以在2.1和2.2版本之间选择,并自定义生成设置。试试看会发生什么!", + "Select Flux model": "选择Flux模型", + "Select quantized Flux model (optional if enabled quantize)": "选择量化Flux模型(如果启用量化则可选)", + "Enable Quantize": "启用量化", + "Flux Settings": "Flux设置", + "NeuroSandboxWebUI - Flux": "NeuroSandboxWebUI - Flux", + "This user interface allows you to generate images using Flux models. You can select between Schnell and Dev models, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用Flux模型生成图像。您可以在Schnell和Dev模型之间选择,并自定义生成设置。试试看会发生什么!", + "HunyuanDiT Settings": "HunyuanDiT设置", + "NeuroSandboxWebUI - HunyuanDiT (txt2img)": "NeuroSandboxWebUI - HunyuanDiT(文本到图像)", + "This user interface allows you to generate images using HunyuanDiT model. Enter a prompt (in English or Chinese) and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用HunyuanDiT模型生成图像。输入提示(英文或中文)并自定义生成设置。试试看会发生什么!", + "NeuroSandboxWebUI - HunyuanDiT (ControlNet)": "NeuroSandboxWebUI - HunyuanDiT(ControlNet)", + "This user interface allows you to generate images using HunyuanDiT ControlNet models. Enter a prompt, upload an input image, select a ControlNet model, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用HunyuanDiT ControlNet模型生成图像。输入提示,上传输入图像,选择ControlNet模型,并自定义生成设置。试试看会发生什么!", + "Lumina-T2X Settings": "Lumina-T2X设置", + "NeuroSandboxWebUI - Lumina-T2X": "NeuroSandboxWebUI - Lumina-T2X", + "This user interface allows you to generate images using the Lumina-T2X model. Enter a prompt and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用Lumina-T2X模型生成图像。输入提示并自定义生成设置。试试看会发生什么!", + "Kolors Settings": "Kolors设置", + "NeuroSandboxWebUI - Kolors (txt2img)": "NeuroSandboxWebUI - Kolors(文本到图像)", + "This user interface allows you to generate images using the Kolors model. Enter a prompt and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用Kolors模型生成图像。输入提示并自定义生成设置。试试看会发生什么!", + "NeuroSandboxWebUI - Kolors (img2img)": "NeuroSandboxWebUI - Kolors(图像到图像)", + "NeuroSandboxWebUI - Kolors (ip-adapter-plus)": "NeuroSandboxWebUI - Kolors(ip-adapter-plus)", + "AuraFlow Settings": "AuraFlow设置", + "Enable AuraSR": "启用AuraSR", + "NeuroSandboxWebUI - AuraFlow": "NeuroSandboxWebUI - AuraFlow", + "This user interface allows you to generate images using the AuraFlow model. Enter a prompt and customize the generation settings. You can also enable AuraSR for 4x upscaling of the generated image. Try it and see what happens!": "此用户界面允许您使用AuraFlow模型生成图像。输入提示并自定义生成设置。您还可以启用AuraSR对生成的图像进行4倍放大。试试看会发生什么!", + "Würstchen Settings": "Würstchen设置", + "NeuroSandboxWebUI - Würstchen": "NeuroSandboxWebUI - Würstchen", + "This user interface allows you to generate images using the Würstchen model. Enter a prompt and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用Würstchen模型生成图像。输入提示并自定义生成设置。试试看会发生什么!", + "DeepFloyd IF Settings": "DeepFloyd IF设置", + "NeuroSandboxWebUI - DeepFloyd IF (txt2img)": "NeuroSandboxWebUI - DeepFloyd IF(文本到图像)", + "This user interface allows you to generate images using the DeepFloyd IF model. Enter a prompt and customize the generation settings. The process includes three stages of generation, each producing an image of increasing quality. Try it and see what happens!": "此用户界面允许您使用DeepFloyd IF模型生成图像。输入提示并自定义生成设置。生成过程包括三个阶段,每个阶段生成的图像质量逐步提高。试试看会发生什么!", + "NeuroSandboxWebUI - DeepFloyd IF (img2img)": "NeuroSandboxWebUI - DeepFloyd IF(图像到图像)", + "This interface allows you to generate images using DeepFloyd IF's image-to-image pipeline. Enter a prompt, upload an initial image, and customize the generation settings. The process includes three stages of generation, each producing an image of increasing quality. Try it and see what happens!": "此界面允许您使用DeepFloyd IF的图像到图像管道生成图像。输入提示,上传初始图像,并自定义生成设置。生成过程包括三个阶段,每个阶段生成的图像质量逐步提高。试试看会发生什么!", + "NeuroSandboxWebUI - DeepFloyd IF (inpaint)": "NeuroSandboxWebUI - DeepFloyd IF(修复)", + "This interface allows you to perform inpainting using DeepFloyd IF. Enter a prompt, upload an initial image and a mask image, and customize the generation settings. The process includes three stages of generation, each producing an image of increasing quality. Try it and see what happens!": "此界面允许您使用DeepFloyd IF进行图像修复。输入提示,上传初始图像和蒙版图像,并自定义生成设置。生成过程包括三个阶段,每个阶段生成的图像质量逐步提高。试试看会发生什么!", + "PixArt Version": "PixArt版本", + "PixArt Settings": "PixArt设置", + "NeuroSandboxWebUI - PixArt": "NeuroSandboxWebUI - PixArt", + "This user interface allows you to generate images using PixArt models. You can select between Alpha and Sigma versions, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用PixArt模型生成图像。您可以在Alpha和Sigma版本之间选择,并自定义生成设置。试试看会发生什么!", + "PlaygroundV2.5 Settings": "PlaygroundV2.5设置", + "NeuroSandboxWebUI - PlaygroundV2.5": "NeuroSandboxWebUI - PlaygroundV2.5", + "This user interface allows you to generate images using PlaygroundV2.5. Enter a prompt and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用PlaygroundV2.5生成图像。输入提示并自定义生成设置。试试看会发生什么!", + "Face Detection Batch Size": "人脸检测批量大小", + "Wav2Lip Batch Size": "Wav2Lip批量大小", + "Resize Factor": "调整大小因子", + "Crop": "裁剪", + "Enable no smooth": "启用无平滑", + "Generated lip-sync": "生成的唇形同步", + "NeuroSandboxWebUI - Wav2Lip": "NeuroSandboxWebUI - Wav2Lip", + "This user interface allows you to generate talking head videos by combining an image and an audio file using Wav2Lip. Upload an image and an audio file, and click Generate to create the talking head video. Try it and see what happens!": "此用户界面允许您使用Wav2Lip通过组合图像和音频文件生成说话的头像视频。上传图像和音频文件,然后点击生成以创建说话的头像视频。试试看会发生什么!", + "Animate": "动画", + "Source image": "源图像", + "Driving video": "驱动视频", + "NeuroSandboxWebUI - LivePortrait": "NeuroSandboxWebUI - LivePortrait", + "This user interface allows you to animate a source image based on the movements in a driving video using LivePortrait. Upload a source image and a driving video, then click Generate to create the animated video. Try it and see what happens!": "此用户界面允许您使用LivePortrait根据驱动视频中的动作为源图像制作动画。上传源图像和驱动视频,然后点击生成以创建动画视频。试试看会发生什么!", + "Number of Frames": "帧数", + "ModelScope Settings": "ModelScope设置", + "NeuroSandboxWebUI - ModelScope": "NeuroSandboxWebUI - ModelScope", + "This user interface allows you to generate videos using ModelScope. Enter a prompt and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用ModelScope生成视频。输入提示并自定义生成设置。试试看会发生什么!", + "Video to enhance (optional)": "要增强的视频(可选)", + "Enable Video Enhancement": "启用视频增强", + "ZeroScope 2 Settings": "ZeroScope 2设置", + "NeuroSandboxWebUI - ZeroScope 2": "NeuroSandboxWebUI - ZeroScope 2", + "This user interface allows you to generate and enhance videos using ZeroScope 2 models. You can enter a text prompt, upload an optional video for enhancement, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用ZeroScope 2模型生成和增强视频。您可以输入文本提示,上传可选的视频进行增强,并自定义生成设置。试试看会发生什么!", + "Select CogVideoX model version": "选择CogVideoX模型版本", + "CogVideoX Settings": "CogVideoX设置", + "NeuroSandboxWebUI - CogVideoX": "NeuroSandboxWebUI - CogVideoX", + "This user interface allows you to generate videos using CogVideoX. Enter a prompt and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用CogVideoX生成视频。输入提示并自定义生成设置。试试看会发生什么!", + "Video Length": "视频长度", + "Latte Settings": "Latte设置", + "NeuroSandboxWebUI - Latte": "NeuroSandboxWebUI - Latte", + "This user interface allows you to generate GIFs using Latte. Enter a prompt and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用Latte生成GIF。输入提示并自定义生成设置。试试看会发生什么!", + "Texture Resolution": "纹理分辨率", + "Foreground Ratio": "前景比例", + "Remesh Option": "重新网格化选项", + "Generated 3D object": "生成的3D对象", + "NeuroSandboxWebUI - StableFast3D": "NeuroSandboxWebUI - StableFast3D", + "This user interface allows you to generate 3D objects from images using StableFast3D. Upload an image and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用StableFast3D从图像生成3D对象。上传图像并自定义生成设置。试试看会发生什么!", + "Frame size": "帧大小", + "Shap-E Settings": "Shap-E设置", + "NeuroSandboxWebUI - Shap-E": "NeuroSandboxWebUI - Shap-E", + "This user interface allows you to generate 3D objects using Shap-E. You can enter a text prompt or upload an initial image, and customize the generation settings. Try it and see what happens!": "此用户界面允许您使用Shap-E生成3D对象。您可以输入文本提示或上传初始图像,并自定义生成设置。试试看会发生什么!", + "Input file (Image for 3D-U and 3D-P, MP4 video for 4D)": "输入文件(3D-U和3D-P使用图像,4D使用MP4视频)", + "Version": "版本", + "Elevation Degree (for 3D-P only)": "仰角(仅适用于3D-P)", + "Generated output": "生成的输出", + "NeuroSandboxWebUI - SV34D": "NeuroSandboxWebUI - SV34D", + "This interface allows you to generate 3D and 4D content using SV34D models. Upload an image (PNG, JPG, JPEG) for 3D-U and 3D-P versions, or an MP4 video for 4D version. Select the version and customize settings as needed.": "此界面允许您使用SV34D模型生成3D和4D内容。上传图像(PNG、JPG、JPEG)用于3D-U和3D-P版本,或上传MP4视频用于4D版本。选择版本并根据需要自定义设置。", + "NeuroSandboxWebUI - Zero123Plus": "NeuroSandboxWebUI - Zero123Plus", + "This user interface allows you to generate 3D-like images using Zero123Plus. Upload an input image and customize the number of inference steps. Try it and see what happens!": "此用户界面允许您使用Zero123Plus生成类3D图像。上传输入图像并自定义推理步骤数。试试看会发生什么!", + "Audio Length (seconds)": "音频长度(秒)", + "Audio Start (seconds)": "音频开始时间(秒)", + "Number of Waveforms": "波形数量", + "StableAudio Settings": "StableAudio设置", + "NeuroSandboxWebUI - StableAudio": "NeuroSandboxWebUI - StableAudio", + "This user interface allows you to enter any text and generate audio using StableAudio. You can customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入任何文本并使用StableAudio生成音频。您可以使用滑块自定义生成设置。试试看会发生什么!", + "Melody audio (optional)": "旋律音频(可选)", + "Select AudioCraft model": "选择AudioCraft模型", + "Select model type": "选择模型类型", + "Duration (seconds)": "持续时间(秒)", + "Top K": "Top K", + "Min CFG coef (Magnet model only)": "最小CFG系数(仅限Magnet模型)", + "Max CFG coef (Magnet model only)": "最大CFG系数(仅限Magnet模型)", + "Enable Multiband Diffusion (Musicgen model only)": "启用多频带扩散(仅限Musicgen模型)", + "Select output format (Works only without Multiband Diffusion)": "选择输出格式(仅在不使用多频带扩散时有效)", + "Mel-Spectrogram": "梅尔频谱图", + "NeuroSandboxWebUI - AudioCraft": "NeuroSandboxWebUI - AudioCraft", + "This user interface allows you to enter any text and generate audio using AudioCraft. You can select the model and customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入任何文本并使用AudioCraft生成音频。您可以选择模型并使用滑块自定义生成设置。试试看会发生什么!", + "Select AudioLDM 2 model": "选择AudioLDM 2模型", + "Length (seconds)": "长度(秒)", + "Waveforms number": "波形数量", + "AudioLDM 2 Settings": "AudioLDM 2设置", + "NeuroSandboxWebUI - AudioLDM 2": "NeuroSandboxWebUI - AudioLDM 2", + "This user interface allows you to enter any text and generate audio using AudioLDM 2. You can select the model and customize the generation settings from the sliders. Try it and see what happens!": "此用户界面允许您输入任何文本并使用AudioLDM 2生成音频。您可以选择模型并使用滑块自定义生成设置。试试看会发生什么!", + "Enter text for the request": "输入请求文本", + "Select voice preset": "选择语音预设", + "Max length": "最大长度", + "Fine temperature": "精细温度", + "Coarse temperature": "粗糙温度", + "Bark Settings": "Bark设置", + "NeuroSandboxWebUI - SunoBark": "NeuroSandboxWebUI - SunoBark", + "This user interface allows you to enter text and generate audio using SunoBark. You can select the voice preset and customize the max length. Try it and see what happens!": "此用户界面允许您输入文本并使用SunoBark生成音频。您可以选择语音预设并自定义最大长度。试试看会发生什么!", + "Select RVC model": "选择RVC模型", + "RVC Method": "RVC方法", + "Up-key": "升调", + "Index rate": "索引率", + "Filter radius": "过滤半径", + "Resample-Sr": "重采样率", + "RMS Mixrate": "RMS混合率", + "Protection": "保护", + "RVC Settings": "RVC设置", + "Processed audio": "处理后的音频", + "NeuroSandboxWebUI - RVC": "NeuroSandboxWebUI - RVC", + "This user interface allows you to process audio using RVC (Retrieval-based Voice Conversion). Upload an audio file, select an RVC model, and choose the output format. Try it and see what happens!": "此用户界面允许您使用RVC(基于检索的语音转换)处理音频。上传音频文件,选择RVC模型,并选择输出格式。试试看会发生什么!", + "Conversion": "转换", + "Audio file to separate": "要分离的音频文件", + "Normalization Threshold": "归一化阈值", + "Sample Rate": "采样率", + "Vocals": "人声", + "Instrumental": "乐器声", + "NeuroSandboxWebUI - UVR": "NeuroSandboxWebUI - UVR", + "This user interface allows you to upload an audio file and separate it into vocals and instrumental using Ultimate Vocal Remover (UVR). Try it and see what happens!": "此用户界面允许您上传音频文件,并使用Ultimate Vocal Remover(UVR)将其分离为人声和乐器声。试试看会发生什么!", + "Separate": "分离", + "Vocal": "人声", + "NeuroSandboxWebUI - Demucs": "NeuroSandboxWebUI - Demucs", + "This user interface allows you to upload an audio file and separate it into vocal and instrumental using Demucs. Try it and see what happens!": "此用户界面允许您上传音频文件,并使用Demucs将其分离为人声和乐器声。试试看会发生什么!", + "Image to modify": "要修改的图像", + "Remove BackGround": "移除背景", + "Enable FaceRestore": "启用人脸修复", + "Fidelity weight (For FaceRestore)": "保真度权重(用于人脸修复)", + "Upscale (For FaceRestore)": "放大(用于人脸修复)", + "Enable PixelOE": "启用PixelOE", + "PixelOE Mode": "PixelOE模式", + "Target Size (For PixelOE)": "目标大小(用于PixelOE)", + "Patch Size (For PixelOE)": "补丁大小(用于PixelOE)", + "Thickness (For PixelOE)": "厚度(用于PixelOE)", + "contrast (For PixelOE)": "对比度(用于PixelOE)", + "saturation (For PixelOE)": "饱和度(用于PixelOE)", + "Enable DDColor": "启用DDColor", + "Input Size (For DDColor)": "输入大小(用于DDColor)", + "Enable DownScale": "启用缩小", + "DownScale Factor": "缩小因子", + "Enable Format Changer": "启用格式转换器", + "New Image Format": "新图像格式", + "Enable Encryption": "启用加密", + "Enable Decryption": "启用解密", + "Decryption Key": "解密密钥", + "Modified image": "修改后的图像", + "NeuroSandboxWebUI - Extras (Image)": "NeuroSandboxWebUI - 附加功能(图像)", + "This interface allows you to modify images": "此界面允许您修改图像", + "Modify": "修改", + "Video to modify": "要修改的视频", + "New Video Format": "新视频格式", + "Modified video": "修改后的视频", + "NeuroSandboxWebUI - Extras (Video)": "NeuroSandboxWebUI - 附加功能(视频)", + "This interface allows you to modify videos": "此界面允许您修改视频", + "Audio to modify": "要修改的音频", + "New Audio Format": "新音频格式", + "Enable AudioSR": "启用AudioSR", + "Enable Downscale": "启用缩小", + "Downscale Factor": "缩小因子", + "Modified audio": "修改后的音频", + "NeuroSandboxWebUI - Extras (Audio)": "NeuroSandboxWebUI - 附加功能(音频)", + "This interface allows you to modify audio files": "此界面允许您修改音频文件", + "Image to upscale": "要放大的图像", + "Input video": "输入视频", + "Select model": "选择模型", + "Enable Face Enhance": "启用人脸增强", + "Tile": "平铺", + "Tile pad": "平铺填充", + "Pre pad": "预填充", + "Denoise strength": "降噪强度", + "Face Enhance Settings": "人脸增强设置", + "Upscaled image": "放大后的图像", + "Upscaled video": "放大后的视频", + "NeuroSandboxWebUI - Upscale (Real-ESRGAN)": "NeuroSandboxWebUI - 放大(Real-ESRGAN)", + "This user interface allows you to upload an image and upscale it using Real-ESRGAN models": "此用户界面允许您上传图像并使用Real-ESRGAN模型进行放大", + "Source Image": "源图像", + "Target Image": "目标图像", + "Target Video": "目标视频", + "Enable many faces": "启用多个人脸", + "Reference face position": "参考人脸位置", + "Reference frame number": "参考帧号", + "Fidelity weight": "保真度权重", + "FaceSwap (Roop) Settings": "人脸交换(Roop)设置", + "Processed image": "处理后的图像", + "Processed video": "处理后的视频", + "NeuroSandboxWebUI - FaceSwap (Roop)": "NeuroSandboxWebUI - 人脸交换(Roop)", + "This user interface allows you to perform face swapping on images or videos and optional face restoration.": "此用户界面允许您在图像或视频上执行人脸交换,并可选择进行人脸修复。", + "Swap": "交换", + "Upload file": "上传文件", + "Metadata": "元数据", + "NeuroSandboxWebUI - Metadata-Info": "NeuroSandboxWebUI - 元数据信息", + "This interface allows you to view generation metadata for image, video, and audio files.": "此界面允许您查看图像、视频和音频文件的生成元数据。", + "View": "查看", + "Image": "图像", + "Video": "视频", + "Audio": "音频", + "Upscale (Real-ESRGAN)": "放大(Real-ESRGAN)", + "FaceSwap": "人脸交换", + "Metadata-Info": "元数据信息", + "Online Wiki": "在线维基", + "Local Wiki": "本地维基", + "Wiki Content": "维基内容", + "NeuroSandboxWebUI - Wiki": "NeuroSandboxWebUI - 维基", + "This interface displays the Wiki content from the specified URL or local file.": "此界面显示来自指定URL或本地文件的维基内容。", + "Learn": "学习", + "Text Files": "文本文件", + "Image Files": "图像文件", + "Video Files": "视频文件", + "Audio Files": "音频文件", + "3D Model Files": "3D模型文件", + "Text": "文本", + "3D Model": "3D模型", + "NeuroSandboxWebUI - Gallery": "NeuroSandboxWebUI - 图库", + "This interface allows you to view files from the outputs directory": "此界面允许您查看输出目录中的文件", + "Download LLM model": "下载LLM模型", + "Download StableDiffusion model": "下载StableDiffusion模型", + "NeuroSandboxWebUI - ModelDownloader": "NeuroSandboxWebUI - 模型下载器", + "This user interface allows you to download LLM and StableDiffusion models": "此用户界面允许您下载LLM和StableDiffusion模型", + "Download": "下载", + "Share Mode": "共享模式", + "Debug Mode": "调试模式", + "Monitoring Mode": "监控模式", + "Enable AutoLaunch": "启用自动启动", + "Show API": "显示API", + "Open API": "打开API", + "Queue max size": "队列最大大小", + "Queue status update rate": "队列状态更新率", + "Gradio Auth": "Gradio认证", + "Server Name": "服务器名称", + "Server Port": "服务器端口", + "Hugging Face Token": "Hugging Face令牌", + "Theme": "主题", + "Enable Custom Theme": "启用自定义主题", + "Primary Hue": "主要色调", + "Secondary Hue": "次要色调", + "Neutral Hue": "中性色调", + "Spacing Size": "间距大小", + "Radius Size": "圆角大小", + "Text Size": "文本大小", + "Font": "字体", + "Monospaced Font": "等宽字体", + "Theme builder": "主题构建器", + "NeuroSandboxWebUI - Settings": "NeuroSandboxWebUI - 设置", + "This user interface allows you to change settings of the application": "此用户界面允许您更改应用程序的设置", + "Update": "更新", + "GPU Total Memory": "GPU总内存", + "GPU Used Memory": "GPU已用内存", + "GPU Free Memory": "GPU可用内存", + "GPU Temperature": "GPU温度", + "CPU Temperature": "CPU温度", + "RAM Total": "总RAM", + "RAM Used": "已用RAM", + "RAM Free": "可用RAM", + "Disk Total Space": "磁盘总空间", + "Disk Free Space": "磁盘可用空间", + "Application Folder Size": "应用程序文件夹大小", + "NeuroSandboxWebUI - System": "NeuroSandboxWebUI - 系统", + "This interface displays system information": "此界面显示系统信息", + "Display": "显示", + "LLM": "LLM", + "TTS-STT": "TTS-STT", + "MMS": "MMS", + "SeamlessM4Tv2": "SeamlessM4Tv2", + "LibreTranslate": "LibreTranslate", + "StableDiffusion": "StableDiffusion", + "Kandinsky": "Kandinsky", + "Flux": "Flux", + "HunyuanDiT": "HunyuanDiT", + "Lumina-T2X": "Lumina-T2X", + "Kolors": "Kolors", + "AuraFlow": "AuraFlow", + "Würstchen": "Würstchen", + "DeepFloydIF": "DeepFloydIF", + "PixArt": "PixArt", + "PlaygroundV2.5": "PlaygroundV2.5", + "Wav2Lip": "Wav2Lip", + "LivePortrait": "LivePortrait", + "ModelScope": "ModelScope", + "ZeroScope2": "ZeroScope2", + "CogVideoX": "CogVideoX", + "Latte": "Latte", + "StableFast3D": "StableFast3D", + "Shap-E": "Shap-E", + "SV34D": "SV34D", + "Zero123Plus": "Zero123Plus", + "StableAudio": "StableAudio", + "AudioCraft": "AudioCraft", + "AudioLDM2": "AudioLDM2", + "SunoBark": "SunoBark", + "RVC": "RVC", + "UVR": "UVR", + "Demucs": "Demucs", + "Extras": "附加功能", + "Wiki": "维基", + "Gallery": "图库", + "ModelDownloader": "模型下载器", + "Settings": "设置", + "System": "系统", + "3D": "3D", + "Interface": "界面", + "Reload interface": "重新加载界面", + "Close terminal": "关闭终端", + "Outputs": "输出", + "GitHub": "GitHub", + "Hugging Face": "Hugging Face", + "Welcome to NeuroSandboxWebUI!": "欢迎使用NeuroSandboxWebUI!", + "Language": "语言" +} \ No newline at end of file From e6de75f774d0b391d55118fdbbcea008c59687f5 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Sep 2024 14:57:37 +0300 Subject: [PATCH 2/9] Update README_ZH.md --- Readmes/README_ZH.md | 138 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 137 insertions(+), 1 deletion(-) diff --git a/Readmes/README_ZH.md b/Readmes/README_ZH.md index fb669068..bb1d62e9 100644 --- a/Readmes/README_ZH.md +++ b/Readmes/README_ZH.md @@ -1 +1,137 @@ -# Soon... +## [功能](/#功能) | [依赖](/#必需依赖) | [系统要求](/#最低系统要求) | [安装](/#如何安装) | [Wiki](/#Wiki) | [致谢](/#致开发者的感谢) | [许可证](/#第三方许可证) + +# ![主图](https://github.com/Dartvauder/NeuroSandboxWebUI/assets/140557322/4ea0d891-8979-45ad-b052-626c41ae991a) +* 仍在进行中但已稳定! +* [English](/README.md) | [Русский](/Readmes/README_RU.md) | 漢語 + +## 描述: + +一个简单方便的界面,用于使用各种神经网络模型。您可以通过文本、语音和图像输入与LLM和Moondream2进行通信;使用StableDiffusion、Kandinsky、Flux、HunyuanDiT、Lumina-T2X、Kolors、AuraFlow、Würstchen、DeepFloydIF、PixArt和PlaygroundV2.5生成图像;使用ModelScope、ZeroScope 2、CogVideoX和Latte生成视频;使用StableFast3D、Shap-E、SV34D和Zero123Plus生成3D对象;使用StableAudioOpen、AudioCraft和AudioLDM 2生成音乐和音频;使用CoquiTTS、MMS和SunoBark进行文本到语音转换;使用OpenAI-Whisper和MMS进行语音到文本转换;使用Wav2Lip进行唇形同步;使用LivePortrait为图像添加动画;使用Roop进行换脸;使用Rembg移除背景;使用CodeFormer修复面部;使用PixelOE进行图像像素化;使用DDColor为图像上色;使用LibreTranslate和SeamlessM4Tv2进行文本翻译;使用Demucs和UVR进行音频文件分离;使用RVC进行语音转换。您还可以在图库中查看输出目录中的文件,下载LLM和StableDiffusion模型,在界面内更改应用程序设置并检查系统传感器。 + +项目目标 - 创建一个尽可能简单易用的神经网络模型应用程序 + +### 文本:1zh + +### 图像:2zh + +### 视频:3zh + +### 3D:4zh + +### 音频:5zh + +### 额外功能:6zh + +### 界面:7zh + +## 功能: + +* 通过install.bat(Windows)或install.sh(Linux)轻松安装 +* 您可以通过移动设备在本地主机(通过IPv4)或在线任何地方(通过Share)使用应用程序 +* 灵活且优化的界面(由Gradio提供) +* 从`Install`和`Update`文件进行调试日志记录 +* 提供三种语言版本 +* 支持Transformers和llama.cpp模型(LLM) +* 支持diffusers和safetensors模型(StableDiffusion)- txt2img、img2img、depth2img、marigold、pix2pix、controlnet、upscale(latent)、upscale(SUPIR)、refiner、inpaint、outpaint、gligen、diffedit、blip-diffusion、animatediff、hotshot-xl、video、ldm3d、sd3、cascade、t2i-ip-adapter、ip-adapter-faceid和riffusion标签 +* 支持stable-diffusion-cpp模型用于FLUX +* 支持额外的图像生成模型:Kandinsky(txt2img、img2img、inpaint)、Flux(支持LoRA)、HunyuanDiT(txt2img、controlnet)、Lumina-T2X、Kolors(支持LoRA的txt2img、img2img、ip-adapter-plus)、AuraFlow(支持LoRA和AuraSR)、Würstchen、DeepFloydIF(txt2img、img2img、inpaint)、PixArt和PlaygroundV2.5 +* 支持使用Rembg、CodeFormer、PixelOE、DDColor、DownScale、格式转换器、换脸(Roop)和放大(Real-ESRGAN)模型进行图像、视频和音频的额外处理 +* 支持StableAudio +* 支持AudioCraft(模型:musicgen、audiogen和magnet) +* 支持AudioLDM 2(模型:audio和music) +* 支持TTS和Whisper模型(用于LLM和TTS-STT) +* 支持MMS进行文本到语音和语音到文本转换 +* 支持Lora、Textual inversion(embedding)、Vae、MagicPrompt、Img2img、Depth、Marigold、Pix2Pix、Controlnet、Upscalers(latent和SUPIR)、Refiner、Inpaint、Outpaint、GLIGEN、DiffEdit、BLIP-Diffusion、AnimateDiff、HotShot-XL、Videos、LDM3D、SD3、Cascade、T2I-IP-ADAPTER、IP-Adapter-FaceID和Riffusion模型(用于StableDiffusion) +* 支持Multiband Diffusion模型(用于AudioCraft) +* 支持LibreTranslate(本地API)和SeamlessM4Tv2进行语言翻译 +* 支持ModelScope、ZeroScope 2、CogVideoX和Latte进行视频生成 +* 支持SunoBark +* 支持Demucs和UVR进行音频文件分离 +* 支持RVC进行语音转换 +* 支持StableFast3D、Shap-E、SV34D和Zero123Plus进行3D生成 +* 支持Wav2Lip +* 支持LivePortrait为图像添加动画 +* 支持LLM的多模态(Moondream 2)、PDF解析(OpenParse)、TTS(CoquiTTS)、STT(Whisper)、LORA和网络搜索(使用DuckDuckGo) +* 用于生成图像、视频和音频的元数据信息查看器 +* 界面内的模型设置 +* 在线和离线Wiki +* 图库 +* 模型下载器(用于LLM和StableDiffusion) +* 应用程序设置 +* 能够查看系统传感器 + +## 必需依赖: + +* [Python](https://www.python.org/downloads/)(3.10.11) +* [Git](https://git-scm.com/downloads) +* [CUDA](https://developer.nvidia.com/cuda-downloads)(12.4)和[cuDNN](https://developer.nvidia.com/cudnn-downloads)(9.1) +* [FFMPEG](https://ffmpeg.org/download.html) +- C++编译器 + - Windows:[VisualStudio](https://visualstudio.microsoft.com/ru/)、[VisualStudioCode](https://code.visualstudio.com)和[Cmake](https://cmake.org) + - Linux:[GCC](https://gcc.gnu.org/)、[VisualStudioCode](https://code.visualstudio.com)和[Cmake](https://cmake.org) + +## 最低系统要求: + +* 系统:Windows或Linux +* GPU:6GB+或CPU:8核3.6GHZ +* RAM:16GB+ +* 磁盘空间:20GB+ +* 需要互联网连接以下载模型和进行安装 + +## 如何安装: + +### Windows + +1) 首先安装所有[必需依赖](/#必需依赖) +2) 在任意位置执行`Git clone https://github.com/Dartvauder/NeuroSandboxWebUI.git` +3) 运行`Install.bat`并等待安装完成 +4) 安装完成后,运行`Start.bat` +5) 选择文件版本并等待应用程序启动 +6) 现在您可以开始生成了! + +要获取更新,请运行`Update.bat` +要通过终端使用虚拟环境,请运行`Venv.bat` + +### Linux + +1) 首先安装所有[必需依赖](/#必需依赖) +2) 在任意位置执行`Git clone https://github.com/Dartvauder/NeuroSandboxWebUI.git` +3) 在终端中运行`./Install.sh`并等待所有依赖项安装完成 +4) 安装完成后,运行`./Start.sh` +5) 等待应用程序启动 +6) 现在您可以开始生成了! + +要获取更新,请运行`./Update.sh` +要通过终端使用虚拟环境,请运行`./Venv.sh` + +## Wiki + +* https://github.com/Dartvauder/NeuroSandboxWebUI/wiki + +## 致开发者的感谢 + +#### 非常感谢这些项目,因为正是通过他们的应用程序/库,我才能够创建我的应用程序: + +首先,我要感谢[PyCharm](https://www.jetbrains.com/pycharm/)和[GitHub](https://desktop.github.com)的开发者。借助他们的应用程序,我能够创建并分享我的代码 + +[列出了所有使用的库和项目] + +## 第三方许可证: + +#### 许多模型都有自己的使用许可证。在使用之前,我建议您熟悉它们: + +[列出了所有使用的模型及其许可证链接] + +#### 这些第三方仓库代码也在我的项目中使用: + +[列出了所有使用的第三方仓库代码] + +## 捐赠 + +### *如果您喜欢我的项目并想要捐赠,这里有捐赠选项。非常感谢您的支持!* + +* [!["给我买杯咖啡"](https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png)](https://www.buymeacoffee.com/Dartvauder) + +## Star历史 + +[![Star历史图表](https://api.star-history.com/svg?repos=Dartvauder/NeuroSandboxWebUI&type=Date)](https://star-history.com/#Dartvauder/NeuroSandboxWebUI&Date) From 5439c19944adbd26ded03fcc708ad132bef66e23 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Sep 2024 14:59:25 +0300 Subject: [PATCH 3/9] Update README_ZH.md --- Readmes/README_ZH.md | 109 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 106 insertions(+), 3 deletions(-) diff --git a/Readmes/README_ZH.md b/Readmes/README_ZH.md index bb1d62e9..18958d0a 100644 --- a/Readmes/README_ZH.md +++ b/Readmes/README_ZH.md @@ -114,17 +114,120 @@ 首先,我要感谢[PyCharm](https://www.jetbrains.com/pycharm/)和[GitHub](https://desktop.github.com)的开发者。借助他们的应用程序,我能够创建并分享我的代码 -[列出了所有使用的库和项目] +* `gradio` - https://github.com/gradio-app/gradio +* `transformers` - https://github.com/huggingface/transformers +* `tts` - https://github.com/coqui-ai/TTS +* `openai-whisper` - https://github.com/openai/whisper +* `torch` - https://github.com/pytorch/pytorch +* `soundfile` - https://github.com/bastibe/python-soundfile +* `cuda-python` - https://github.com/NVIDIA/cuda-python +* `gitpython` - https://github.com/gitpython-developers/GitPython +* `diffusers` - https://github.com/huggingface/diffusers +* `llama.cpp-python` - https://github.com/abetlen/llama-cpp-python +* `stable-diffusion-cpp-python` - https://github.com/william-murray1204/stable-diffusion-cpp-python +* `audiocraft` - https://github.com/facebookresearch/audiocraft +* `AudioLDM2` - https://github.com/haoheliu/AudioLDM2 +* `xformers` - https://github.com/facebookresearch/xformers +* `demucs` - https://github.com/facebookresearch/demucs +* `libretranslate` - https://github.com/LibreTranslate/LibreTranslate +* `libretranslatepy` - https://github.com/argosopentech/LibreTranslate-py +* `rembg` - https://github.com/danielgatis/rembg +* `trimesh` - https://github.com/mikedh/trimesh +* `suno-bark` - https://github.com/suno-ai/bark +* `IP-Adapter` - https://github.com/tencent-ailab/IP-Adapter +* `PyNanoInstantMeshes` - https://github.com/vork/PyNanoInstantMeshes +* `CLIP` - https://github.com/openai/CLIP +* `rvc-python` - https://github.com/daswer123/rvc-python +* `audio-separator` - https://github.com/nomadkaraoke/python-audio-separator +* `pixeloe` - https://github.com/KohakuBlueleaf/PixelOE +* `k-diffusion` - https://github.com/crowsonkb/k-diffusion +* `open-parse` - https://github.com/Filimoa/open-parse +* `AudioSR` - https://github.com/haoheliu/versatile_audio_super_resolution ## 第三方许可证: #### 许多模型都有自己的使用许可证。在使用之前,我建议您熟悉它们: -[列出了所有使用的模型及其许可证链接] +* [Transformers](https://github.com/huggingface/transformers/blob/main/LICENSE) +* [llama.cpp](https://github.com/ggerganov/llama.cpp/blob/master/LICENSE) +* [stable-diffusion.cpp](https://github.com/leejet/stable-diffusion.cpp/blob/master/LICENSE) +* [CoquiTTS](https://coqui.ai/cpml) +* [OpenAI-Whisper](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [LibreTranslate](https://github.com/LibreTranslate/LibreTranslate/blob/main/LICENSE) +* [Diffusers](https://github.com/huggingface/diffusers/blob/main/LICENSE) +* [StableDiffusion1.5](https://huggingface.co/spaces/CompVis/stable-diffusion-license) +* [StableDiffusion2](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md) +* [StableDiffusion3](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers/blob/main/LICENSE) +* [StableDiffusionXL](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md) +* [StableCascade](https://huggingface.co/stabilityai/stable-cascade/blob/main/LICENSE) +* [LatentDiffusionModel3D](https://huggingface.co/spaces/CompVis/stable-diffusion-license) +* [StableVideoDiffusion](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt-1-1/blob/main/LICENSE) +* [I2VGen-XL](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/mit.md) +* [Rembg](https://github.com/danielgatis/rembg/blob/main/LICENSE.txt) +* [Shap-E](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/mit.md) +* [StableAudioOpen](https://huggingface.co/stabilityai/stable-audio-open-1.0/blob/main/LICENSE) +* [AudioCraft](https://spdx.org/licenses/CC-BY-NC-4.0) +* [AudioLDM2](https://spdx.org/licenses/CC-BY-NC-SA-4.0) +* [Demucs](https://github.com/facebookresearch/demucs/blob/main/LICENSE) +* [SunoBark](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/mit.md) +* [Moondream2](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [ZeroScope2](https://spdx.org/licenses/CC-BY-NC-4.0) +* [GLIGEN](https://huggingface.co/spaces/CompVis/stable-diffusion-license) +* [Wav2Lip](https://github.com/Rudrabha/Wav2Lip) +* [Roop](https://github.com/s0md3v/roop/blob/main/LICENSE) +* [CodeFormer](https://github.com/sczhou/CodeFormer/blob/master/LICENSE) +* [ControlNet](https://github.com/lllyasviel/ControlNet/blob/main/LICENSE) +* [AnimateDiff](https://github.com/guoyww/AnimateDiff/blob/main/LICENSE.txt) +* [Pix2Pix](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/mit.md) +* [Kandinsky 2.1; 2.2; 3](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [Flux-schnell](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [Flux-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md) +* [HunyuanDiT](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/blob/main/LICENSE.txt) +* [Lumina-T2X](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [DeepFloydIF](https://huggingface.co/spaces/DeepFloyd/deepfloyd-if-license) +* [PixArt](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md) +* [CogVideoX](https://huggingface.co/THUDM/CogVideoX-2b/blob/main/LICENSE) +* [Latte](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [Kolors](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [AuraFlow](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [Würstchen](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/mit.md) +* [ModelScope](https://spdx.org/licenses/CC-BY-NC-4.0) +* [StableFast3D](https://github.com/Stability-AI/stable-fast-3d/blob/main/LICENSE.md) +* [SV34D](https://huggingface.co/stabilityai/sv4d/blob/main/LICENSE.md) +* [Zero123Plus](https://huggingface.co/blog/open_rail) +* [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE) +* [Refiner](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md) +* [PlaygroundV2.5](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic/blob/main/LICENSE.md) +* [AuraSR](https://huggingface.co/fal/AuraSR/blob/main/LICENSE.md) +* [IP-Adapter-FaceID](https://huggingface.co/h94/IP-Adapter-FaceID) +* [T2I-IP-Adapter](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [MMS](https://spdx.org/licenses/CC-BY-NC-4.0) +* [SeamlessM4Tv2](https://spdx.org/licenses/CC-BY-NC-4.0) +* [HotShot-XL](https://github.com/hotshotco/Hotshot-XL/blob/main/LICENSE) +* [Riffusion](https://huggingface.co/spaces/CompVis/stable-diffusion-license) +* [MozillaCommonVoice17](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/cc0-1.0.md) +* [UVR-MDX](https://github.com/kuielab/mdx-net/blob/main/LICENSE) +* [RVC](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/LICENSE) +* [DDColor](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [PixelOE](https://github.com/KohakuBlueleaf/PixelOE/blob/main/LICENSE) +* [LivePortrait](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/mit.md) +* [SUPIR](https://github.com/Fanghua-Yu/SUPIR/blob/master/LICENSE) +* [MagicPrompt](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/mit.md) +* [Marigold](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) +* [BLIP-Diffusion](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md) #### 这些第三方仓库代码也在我的项目中使用: -[列出了所有使用的第三方仓库代码] +* [Generative-Models for SV34D](https://github.com/Stability-AI/generative-models) +* [CodeFormer for extras](https://github.com/sczhou/CodeFormer) +* [Real-ESRGAN for upscale](https://github.com/xinntao/Real-ESRGAN) +* [HotShot-XL for StableDiffusion](https://github.com/hotshotco/Hotshot-XL) +* [Roop for extras](https://github.com/s0md3v/roop) +* [StableFast3D for 3D](https://github.com/Stability-AI/stable-fast-3d) +* [Riffusion for StableDiffusion](https://github.com/riffusion/riffusion-hobby) +* [DDColor for extras](https://github.com/piddnad/DDColor) +* [LivePortrait for video](https://github.com/KwaiVGI/LivePortrait) +* [SUPIR for StableDiffusion](https://github.com/Fanghua-Yu/SUPIR) ## 捐赠 From 3f2f6502bad0060b3b62dc287ad68afbc2308ccc Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Sep 2024 14:59:28 +0300 Subject: [PATCH 4/9] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 35e64097..6063adab 100644 --- a/README.md +++ b/README.md @@ -142,7 +142,7 @@ First of all, I want to thank the developers of [PyCharm](https://www.jetbrains. * `pixeloe` - https://github.com/KohakuBlueleaf/PixelOE * `k-diffusion` - https://github.com/crowsonkb/k-diffusion * `open-parse` - https://github.com/Filimoa/open-parse -* `AudioSR` - https://github.com/haoheliu/versatile_audio_super_resolution/ +* `AudioSR` - https://github.com/haoheliu/versatile_audio_super_resolution ## Third Party Licenses: From 955f1dca7786f31ef0a94bc574400dc3baa12472 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Sep 2024 15:00:33 +0300 Subject: [PATCH 5/9] Update README_RU.md --- Readmes/README_RU.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Readmes/README_RU.md b/Readmes/README_RU.md index cb90b33b..f5f46c81 100644 --- a/Readmes/README_RU.md +++ b/Readmes/README_RU.md @@ -142,7 +142,7 @@ * `pixeloe` - https://github.com/KohakuBlueleaf/PixelOE * `k-diffusion` - https://github.com/crowsonkb/k-diffusion * `open-parse` - https://github.com/Filimoa/open-parse -* `AudioSR` - https://github.com/haoheliu/versatile_audio_super_resolution/ +* `AudioSR` - https://github.com/haoheliu/versatile_audio_super_resolution ## Лицензии третьих сторон: From cbafb992d95479f2db6346e527641841c066558e Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Sep 2024 15:03:08 +0300 Subject: [PATCH 6/9] Update README_RU.md From f4212ff7dd16a0b0e016f76b78d958d0486d963a Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Sep 2024 15:03:18 +0300 Subject: [PATCH 7/9] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6063adab..2ed7a35d 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ To work with the virtual environment through the terminal, run `./Venv.sh` ## Wiki -* https://github.com/Dartvauder/NeuroSandboxWebUI/wiki +* https://github.com/Dartvauder/NeuroSandboxWebUI/wiki/EN‐Wiki ## Acknowledgment to developers From 73a734bef0723b48f11727407d56075327e4522f Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Sep 2024 15:03:35 +0300 Subject: [PATCH 8/9] Update README_ZH.md --- Readmes/README_ZH.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Readmes/README_ZH.md b/Readmes/README_ZH.md index 18958d0a..b8ef0f96 100644 --- a/Readmes/README_ZH.md +++ b/Readmes/README_ZH.md @@ -106,7 +106,7 @@ ## Wiki -* https://github.com/Dartvauder/NeuroSandboxWebUI/wiki +* https://github.com/Dartvauder/NeuroSandboxWebUI/wiki/ZH‐Wiki ## 致开发者的感谢 From 7bb4ae32a27371ab69fa52d05704f8bce8eebc90 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Sep 2024 15:06:26 +0300 Subject: [PATCH 9/9] Update WikiZH.md --- Wikies/WikiZH.md | 501 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 500 insertions(+), 1 deletion(-) diff --git a/Wikies/WikiZH.md b/Wikies/WikiZH.md index 1fcddfd7..58b701a6 100644 --- a/Wikies/WikiZH.md +++ b/Wikies/WikiZH.md @@ -1 +1,500 @@ -Soon... \ No newline at end of file +# 使用方法: + +#### 界面有七个主选项卡(文本、图像、视频、3D、音频、附加功能和界面),共四十一个子选项卡(部分带有自己的子选项卡):LLM、TTS-STT、MMS、SeamlessM4Tv2、LibreTranslate、StableDiffusion、Kandinsky、Flux、HunyuanDiT、Lumina-T2X、Kolors、AuraFlow、Würstchen、DeepFloydIF、PixArt、PlaygroundV2.5、Wav2Lip、LivePortrait、ModelScope、ZeroScope 2、CogVideoX、Latte、StableFast3D、Shap-E、SV34D、Zero123Plus、StableAudio、AudioCraft、AudioLDM 2、SunoBark、RVC、UVR、Demucs、Upscale (Real-ESRGAN)、FaceSwap、MetaData-Info、Wiki、Gallery、ModelDownloader、Settings和System。选择您需要的选项卡并按照以下说明操作 + +# 文本: + +### LLM: + +1) 首先将您的模型上传到文件夹:*inputs/text/llm_models* +2) 从下拉列表中选择您的模型 +3) 选择模型类型(`transformers`或`llama`) +4) 根据您需要的参数设置模型 +5) 输入(或说出)您的请求 +6) 点击`Submit`按钮接收生成的文本和音频响应 +#### 可选:您可以启用`TTS`模式,选择所需的`voice`和`language`以接收音频响应。您可以启用`multimodal`并上传图像以获取其描述。您可以启用`websearch`以访问互联网。您可以启用`libretranslate`以获得翻译。您可以启用`OpenParse`以处理PDF文件。您还可以选择`LORA`模型来改进生成 +#### 语音样本 = *inputs/audio/voices* +#### LORA = *inputs/text/llm_models/lora* +#### 语音必须预处理(22050 kHz,单声道,WAV) +#### LLM的头像,您可以在*avatars*文件夹中更改 + +### TTS-STT: + +1) 输入文本进行文本到语音转换 +2) 输入音频进行语音到文本转换 +3) 点击`Submit`按钮接收生成的文本和音频响应 +#### 语音样本 = *inputs/audio/voices* +#### 语音必须预处理(22050 kHz,单声道,WAV) + +### MMS(文本到语音和语音到文本): + +1) 输入文本进行文本到语音转换 +2) 输入音频进行语音到文本转换 +3) 点击`Submit`按钮接收生成的文本或音频响应 + +### SeamlessM4Tv2: + +1) 输入(或说出)您的请求 +2) 选择源语言、目标语言和数据集语言 +3) 根据您需要的参数设置模型 +4) 点击`Submit`按钮获取翻译 + +### LibreTranslate: + +* 首先您需要安装并运行[LibreTranslate](https://github.com/LibreTranslate/LibreTranslate) +1) 选择源语言和目标语言 +2) 点击`Submit`按钮获取翻译 +#### 可选:您可以通过打开相应的按钮来保存翻译历史记录 + +# 图像: + +### StableDiffusion - 有二十四个子选项卡: + +#### txt2img: + +1) 首先将您的模型上传到文件夹:*inputs/image/sd_models* +2) 从下拉列表中选择您的模型 +3) 选择模型类型(`SD`、`SD2`或`SDXL`) +4) 根据您需要的参数设置模型 +5) 输入您的请求(+和-用于提示权重) +6) 点击`Submit`按钮获取生成的图像 +#### 可选:您可以选择您的`vae`、`embedding`和`lora`模型,还可以启用`MagicPrompt`来改进生成方法 +#### vae = *inputs/image/sd_models/vae* +#### lora = *inputs/image/sd_models/lora* +#### embedding = *inputs/image/sd_models/embedding* + +#### img2img: + +1) 首先将您的模型上传到文件夹:*inputs/image/sd_models* +2) 从下拉列表中选择您的模型 +3) 选择模型类型(`SD`、`SD2`或`SDXL`) +4) 根据您需要的参数设置模型 +5) 上传将进行生成的初始图像 +6) 输入您的请求(+和-用于提示权重) +7) 点击`Submit`按钮获取生成的图像 +#### 可选:您可以选择您的`vae`、`embedding`和`lora`模型,还可以启用`MagicPrompt`来改进生成方法 +#### vae = *inputs/image/sd_models/vae* +#### lora = *inputs/image/sd_models/lora* +#### embedding = *inputs/image/sd_models/embedding* + +#### depth2img: + +1) 上传初始图像 +2) 根据您需要的参数设置模型 +3) 输入您的请求(+和-用于提示权重) +4) 点击`Submit`按钮获取生成的图像 + +#### marigold: + +1) 上传初始图像 +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮获取生成的深度图像 + +#### pix2pix: + +1) 上传初始图像 +2) 根据您需要的参数设置模型 +3) 输入您的请求(+和-用于提示权重) +4) 点击`Submit`按钮获取生成的图像 + +#### controlnet: + +1) 首先将您的stable diffusion模型上传到文件夹:*inputs/image/sd_models* +2) 上传初始图像 +3) 从下拉列表中选择您的stable diffusion和controlnet模型 +4) 根据您需要的参数设置模型 +5) 输入您的请求(+和-用于提示权重) +6) 点击`Submit`按钮获取生成的图像 + +#### upscale(潜在): + +1) 上传初始图像 +2) 选择您的模型 +3) 根据您需要的参数设置模型 +4) 点击`Submit`按钮获取放大的图像 + +#### upscale(SUPIR): + +1) 上传初始图像 +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮获取放大的图像 +#### 警告:您需要自行从[SUPIR模型的Google驱动器](https://drive.google.com/file/d/1ohCIBV_RAej1zuiidHph5qXNuD4GRxO3/view?usp=drive_link)和[最佳基础模型的HuggingFace](https://huggingface.co/RunDiffusion/Juggernaut-XL-v9/blob/main/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors)下载模型,并将它们放在以下路径:*/ThirdPartyRepository/SUPIR/options* + +#### refiner(SDXL): + +1) 上传初始图像 +2) 点击`Submit`按钮获取精修后的图像 + +#### inpaint: + +1) 首先将您的模型上传到文件夹:*inputs/image/sd_models/inpaint* +2) 从下拉列表中选择您的模型 +3) 选择模型类型(`SD`、`SD2`或`SDXL`) +4) 根据您需要的参数设置模型 +5) 将要进行生成的图像上传到`initial image`和`mask image` +6) 在`mask image`中,选择画笔,然后选择调色板并将颜色更改为`#FFFFFF` +7) 绘制生成区域并输入您的请求(+和-用于提示权重) +8) 点击`Submit`按钮获取修复后的图像 +#### 可选:您可以选择您的`vae`模型来改进生成方法 +#### vae = *inputs/image/sd_models/vae* + +#### outpaint: + +1) 首先将您的模型上传到文件夹:*inputs/image/sd_models/inpaint* +2) 从下拉列表中选择您的模型 +3) 选择模型类型(`SD`、`SD2`或`SDXL`) +4) 根据您需要的参数设置模型 +5) 将要进行生成的图像上传到`initial image` +6) 输入您的请求(+和-用于提示权重) +7) 点击`Submit`按钮获取扩展后的图像 + +#### gligen: + +1) 首先将您的模型上传到文件夹:*inputs/image/sd_models* +2) 从下拉列表中选择您的模型 +3) 选择模型类型(`SD`、`SD2`或`SDXL`) +4) 根据您需要的参数设置模型 +5) 输入您的提示请求(+和-用于提示权重)和GLIGEN短语(在""中表示框) +6) 输入GLIGEN框(例如[0.1387, 0.2051, 0.4277, 0.7090]表示一个框) +7) 点击`Submit`按钮获取生成的图像 + +#### diffedit: + +1) 输入您的源提示和源负面提示以进行图像遮罩 +2) 输入您的目标提示和目标负面提示以进行图像差异编辑 +3) 上传初始图像 +4) 根据您需要的参数设置模型 +5) 点击`Submit`按钮获取生成的图像 + +#### blip-diffusion: + +1) 输入您的提示 +2) 上传初始图像 +3) 输入您的条件和目标主题 +4) 根据您需要的参数设置模型 +5) 点击`Submit`按钮获取生成的图像 + +#### animatediff: + +1) 首先将您的模型上传到文件夹:*inputs/image/sd_models* +2) 从下拉列表中选择您的模型 +3) 根据您需要的参数设置模型 +4) 输入您的请求(+和-用于提示权重) +5) 点击`Submit`按钮获取生成的图像动画 +#### 可选:您可以选择运动LORA来控制生成 + +#### hotshot-xl + +1) 输入您的请求 +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮获取生成的GIF图像 + +#### video: + +1) 上传初始图像 +2) 选择您的模型 +3) 输入您的请求(适用于IV2Gen-XL) +4) 根据您需要的参数设置模型 +5) 点击`Submit`按钮获取从图像生成的视频 + +#### ldm3d: + +1) 输入您的请求 +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮获取生成的图像 + +#### sd3(txt2img、img2img、controlnet、inpaint): + +1) 输入您的请求 +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮获取生成的图像 + +#### cascade: + +1) 输入您的请求 +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮获取生成的图像 + +#### t2i-ip-adapter: + +1) 上传初始图像 +2) 选择您需要的选项 +3) 点击`Submit`按钮获取修改后的图像 + +#### ip-adapter-faceid: + +1) 上传初始图像 +2) 选择您需要的选项 +3) 点击`Submit`按钮获取修改后的图像 + +#### riffusion(文本到图像、图像到音频、音频到图像): + +- 文本到图像: + - 1) 输入您的请求 + 2) 根据您需要的参数设置模型 + 3) 点击`Submit`按钮获取生成的图像 +- 图像到音频: + - 1) 上传初始图像 + 2) 选择您需要的选项 + 3) 点击`Submit`按钮获取从图像生成的音频 +- 音频到图像: + - 1) 上传初始音频 + 2) 选择您需要的选项 + 3) 点击`Submit`按钮获取从音频生成的图像 + +### Kandinsky(txt2img、img2img、inpaint): + +1) 输入您的提示 +2) 从下拉列表中选择一个模型 +3) 根据您需要的参数设置模型 +4) 点击`Submit`获取生成的图像 + +### Flux: + +1) 输入您的提示 +2) 选择您的模型 +3) 根据您需要的参数设置模型 +4) 点击`Submit`获取生成的图像 +#### 可选:您可以选择您的`lora`模型来改进生成方法。如果您的VRAM较低,还可以通过点击`Enable quantize`按钮使用量化模型,但您需要自行下载模型:[FLUX.1-dev](https://huggingface.co/city96/FLUX.1-dev-gguf/tree/main)或[FLUX.1-schnell](https://huggingface.co/city96/FLUX.1-schnell-gguf/tree/main),以及[VAE](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/ae.safetensors)、[CLIP](https://huggingface.co/comfyanonymous/flux_text_encoders/blob/main/clip_l.safetensors)和[T5XXL](https://huggingface.co/comfyanonymous/flux_text_encoders/blob/main/t5xxl_fp16.safetensors) +#### lora = *inputs/image/flux-lora* +#### 量化模型 = *inputs/image/quantize-flux* + +### HunyuanDiT(txt2img、controlnet): + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的图像 + +### Lumina-T2X: + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的图像 + +### Kolors(txt2img、img2img、ip-adapter-plus): + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的图像 +#### 可选:您可以选择您的`lora`模型来改进生成方法 +#### lora = *inputs/image/kolors-lora* + +### AuraFlow: + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的图像 +#### 可选:您可以选择您的`lora`模型并启用`AuraSR`来改进生成方法 +#### lora = *inputs/image/auraflow-lora* + +### Würstchen: + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的图像 + +### DeepFloydIF(txt2img、img2img、inpaint): + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的图像 + +### PixArt: + +1) 输入您的提示 +2) 选择您的模型 +3) 根据您需要的参数设置模型 +4) 点击`Submit`获取生成的图像 + +### PlaygroundV2.5: + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的图像 + +# 视频: + +### Wav2Lip: + +1) 上传初始面部图像 +2) 上传初始语音音频 +3) 根据您需要的参数设置模型 +4) 点击`Submit`按钮接收唇形同步结果 + +### LivePortrait: + +1) 上传初始面部图像 +2) 上传初始面部移动视频 +3) 点击`Submit`按钮接收动画面部图像 + +### ModelScope: + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的视频 + +### ZeroScope 2: + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的视频 + +### CogVideoX: + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的视频 + +### Latte: + +1) 输入您的提示 +2) 根据您需要的参数设置模型 +3) 点击`Submit`获取生成的视频 + +# 3D: + +### StableFast3D: + +1) 上传初始图像 +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮获取生成的3D对象 + +### Shap-E: + +1) 输入您的请求或上传初始图像 +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮获取生成的3D对象 + +### SV34D: + +1) 上传初始图像(用于3D)或视频(用于4D) +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮获取生成的3D视频 + +### Zero123Plus: + +1) 上传初始图像 +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮获取生成的图像3D旋转 + +# 音频: + +### StableAudio: + +1) 根据您需要的参数设置模型 +2) 输入您的请求 +3) 点击`Submit`按钮获取生成的音频 + +### AudioCraft: + +1) 从下拉列表中选择一个模型 +2) 选择模型类型(`musicgen`、`audiogen`或`magnet`) +3) 根据您需要的参数设置模型 +4) 输入您的请求 +5) (可选)如果您使用`melody`模型,请上传初始音频 +6) 点击`Submit`按钮获取生成的音频 +#### 可选:您可以启用`multiband diffusion`来改进生成的音频 + +### AudioLDM 2: + +1) 从下拉列表中选择一个模型 +2) 根据您需要的参数设置模型 +3) 输入您的请求 +4) 点击`Submit`按钮获取生成的音频 + +### SunoBark: + +1) 输入您的请求 +2) 根据您需要的参数设置模型 +3) 点击`Submit`按钮接收生成的音频响应 + +### RVC: + +1) 首先将您的模型上传到文件夹:*inputs/audio/rvc_models* +2) 上传初始音频 +3) 从下拉列表中选择您的模型 +4) 根据您需要的参数设置模型 +5) 点击`Submit`按钮接收生成的语音克隆 + +### UVR: + +1) 上传要分离的初始音频 +2) 点击`Submit`按钮获取分离后的音频 + +### Demucs: + +1) 上传要分离的初始音频 +2) 点击`Submit`按钮获取分离后的音频 + +# 附加功能(图像、视频、音频): + +1) 上传初始文件 +2) 选择您需要的选项 +3) 点击`Submit`按钮获取修改后的文件 + +### Upscale(Real-ESRGAN): + +1) 上传初始图像 +2) 选择您的模型 +3) 根据您需要的参数设置模型 +4) 点击`Submit`按钮获取放大后的图像 + +### FaceSwap: + +1) 上传源面部图像 +2) 上传目标面部图像或视频 +3) 选择您需要的选项 +4) 点击`Submit`按钮获取换脸后的图像 +#### 可选:您可以启用FaceRestore来放大和恢复您的面部图像/视频 + +### MetaData-Info: + +1) 上传生成的文件 +2) 点击`Submit`按钮获取文件的元数据信息 + +# 界面: + +### Wiki: + +* 在这里您可以查看项目的在线或离线wiki + +### Gallery: + +* 在这里您可以查看outputs目录中的文件 + +### ModelDownloader: + +* 在这里您可以下载`LLM`和`StableDiffusion`模型。只需从下拉列表中选择模型,然后点击`Submit`按钮 +#### `LLM`模型下载到这里:*inputs/text/llm_models* +#### `StableDiffusion`模型下载到这里:*inputs/image/sd_models* + +### Settings: + +* 在这里您可以更改应用程序设置 + +### System: + +* 在这里您可以查看计算机传感器的指标 + +### 附加信息: + +1) 所有生成的内容都保存在*outputs*文件夹中。您可以使用`Outputs`按钮打开*outputs*文件夹 +2) 您可以使用`Close terminal`按钮关闭应用程序 + +## 我在哪里可以获取模型和语音? + +* LLM模型可以从[HuggingFace](https://huggingface.co/models)获取,或者从界面内的ModelDownloader获取 +* StableDiffusion、vae、inpaint、embedding和lora模型可以从[CivitAI](https://civitai.com/models)获取,或者从界面内的ModelDownloader获取 +* RVC模型可以从[VoiceModels](https://voice-models.com)获取 +* StableAudio、AudioCraft、AudioLDM 2、TTS、Whisper、MMS、SeamlessM4Tv2、Wav2Lip、LivePortrait、SunoBark、MoonDream2、Upscalers(Latent和Real-ESRGAN)、Refiner、GLIGEN、DiffEdit、BLIP-Diffusion、Depth、Marigold、Pix2Pix、Controlnet、AnimateDiff、HotShot-XL、Videos、LDM3D、SD3、Cascade、T2I-IP-ADAPTER、IP-Adapter-FaceID、Riffusion、Rembg、Roop、CodeFormer、DDColor、PixelOE、Real-ESRGAN、StableFast3D、Shap-E、SV34D、Zero123Plus、UVR、Demucs、Kandinsky、Flux、HunyuanDiT、Lumina-T2X、Kolors、AuraFlow、AuraSR、Würstchen、DeepFloydIF、PixArt、PlaygroundV2.5、ModelScope、ZeroScope 2、CogVideoX、MagicPrompt、Latte和Multiband diffusion模型在使用时会自动下载到*inputs*文件夹中 +* 您可以从任何地方获取语音。录制您自己的声音或从互联网上获取录音。或者直接使用项目中已有的语音。主要是要经过预处理! + +## 已知问题: + +* SeamlessM4T的`both generations`参数不适用于音频 +* FLUX的`Enable quantized`和`Quantized models`参数完全不起作用 +* RVC、Supir和SV34D完全无法工作 + +## 未来计划: + +* FLUX:制作img2img、inpainting、controlnet-union子选项卡,并提供相应功能