diff --git a/Install.bat b/Install.bat
index 97a6514b..950192e9 100644
--- a/Install.bat
+++ b/Install.bat
@@ -57,6 +57,7 @@ if "%INSTALL_TYPE%"=="CPU" (
)
echo INSTALL_TYPE=%INSTALL_TYPE%> "%CURRENT_DIR%install_config.txt"
+pip install https://huggingface.co/madbuda/triton-windows-builds/resolve/main/triton-3.0.0-cp310-cp310-win_amd64.whl 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/PanQiWei/AutoGPTQ.git#egg=auto_gptq@v0.7.1 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/casper-hansen/AutoAWQ.git#egg=autoawq@v0.2.6 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/turboderp/exllamav2.git#egg=exllamav2@v0.2.3 2>> %ERROR_LOG%
diff --git a/Install.sh b/Install.sh
index 3d4d7bd8..f26002a0 100644
--- a/Install.sh
+++ b/Install.sh
@@ -56,6 +56,7 @@ else
fi
echo "INSTALL_TYPE=$INSTALL_TYPE" > "$CURRENT_DIR/install_config.txt"
+pip install triton==3.0.0 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/PanQiWei/AutoGPTQ.git#egg=auto_gptq@v0.7.1 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/casper-hansen/AutoAWQ.git#egg=autoawq@v0.2.6 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/turboderp/exllamav2.git#egg=exllamav2@v0.2.3 2>> "$ERROR_LOG"
diff --git a/LaunchFile/app.py b/LaunchFile/app.py
index b260ef3a..1e3019de 100644
--- a/LaunchFile/app.py
+++ b/LaunchFile/app.py
@@ -15,6 +15,7 @@
temp_dir = os.path.join("temp")
os.makedirs(temp_dir, exist_ok=True)
os.environ["TMPDIR"] = temp_dir
+sys.modules['triton'] = None
from threading import Thread
import gradio as gr
import langdetect
@@ -115,6 +116,7 @@ def wrapper():
diffusers = lazy_import('diffusers', '')
BlipDiffusionPipeline = lazy_import('diffusers.pipelines', 'BlipDiffusionPipeline')
StableDiffusionPipeline = lazy_import('diffusers', 'StableDiffusionPipeline')
+StableDiffusionPanoramaPipeline = lazy_import('diffusers', 'StableDiffusionPanoramaPipeline')
StableDiffusion3Pipeline = lazy_import('diffusers', 'StableDiffusion3Pipeline')
StableDiffusionXLPipeline = lazy_import('diffusers', 'StableDiffusionXLPipeline')
StableDiffusionImg2ImgPipeline = lazy_import('diffusers', 'StableDiffusionImg2ImgPipeline')
@@ -2047,7 +2049,7 @@ def generate_image_txt2img(prompt, negative_prompt, style_name, stable_diffusion
enable_freeu, freeu_s1, freeu_s2, freeu_b1, freeu_b2,
enable_sag, sag_scale, enable_pag, pag_scale, enable_token_merging, ratio,
enable_deepcache, cache_interval, cache_branch_id, enable_tgate, gate_step,
- enable_magicprompt, magicprompt_max_new_tokens, enable_cdvae, enable_taesd, output_format, progress=gr.Progress()):
+ enable_magicprompt, magicprompt_max_new_tokens, enable_cdvae, enable_taesd, enable_multidiffusion, circular_padding, output_format, progress=gr.Progress()):
global stop_signal
stop_signal = False
stop_idx = None
@@ -2145,6 +2147,10 @@ def generate_image_txt2img(prompt, negative_prompt, style_name, stable_diffusion
stable_diffusion_model = StableDiffusionXLPipeline().StableDiffusionXLPipeline.from_single_file(
stable_diffusion_model_path, use_safetensors=True, device_map="auto", attention_slice=1,
torch_dtype=torch_dtype, variant=variant, vae=vae_xl)
+ elif enable_multidiffusion:
+ stable_diffusion_model = StableDiffusionPanoramaPipeline().StableDiffusionPanoramaPipeline.from_single_file(
+ stable_diffusion_model_path, use_safetensors=True, device_map="auto",
+ torch_dtype=torch_dtype, variant=variant)
else:
if stable_diffusion_model_type == "SD":
stable_diffusion_model = StableDiffusionPipeline().StableDiffusionPipeline.from_single_file(
@@ -2494,6 +2500,21 @@ def combined_callback(stable_diffusion_model, i, t, callback_kwargs):
num_images_per_prompt=num_images_per_prompt,
generator=generator, callback_on_step_end=combined_callback,
callback_on_step_end_tensor_inputs=["latents"]).images
+ elif enable_multidiffusion:
+ images = stable_diffusion_model(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_inference_steps=stable_diffusion_steps,
+ guidance_scale=stable_diffusion_cfg,
+ height=stable_diffusion_height,
+ width=stable_diffusion_width,
+ clip_skip=stable_diffusion_clip_skip,
+ circular_padding=circular_padding,
+ num_images_per_prompt=num_images_per_prompt,
+ generator=generator,
+ callback_on_step_end=combined_callback,
+ callback_on_step_end_tensor_inputs=["latents"]
+ ).images
else:
compel_proc = Compel(tokenizer=stable_diffusion_model.tokenizer,
text_encoder=stable_diffusion_model.text_encoder)
@@ -10583,10 +10604,10 @@ def create_footer():
footer_html = """
🔥 diffusers: 0.31.0
- 📄 transformers: 4.45.2
+ 📄 transformers: 4.46.0
🦙 llama-cpp-python: 0.3.1
🖼️ stable-diffusion-cpp-python: 0.1.8
- ℹ️ gradio: 5.3.0
+ ℹ️ gradio: 5.4.0
"""
return gr.Markdown(footer_html)
@@ -10859,6 +10880,8 @@ def create_footer():
gr.Slider(minimum=32, maximum=256, value=50, step=1, label=_("MagicPrompt Max New Tokens", lang)),
gr.Checkbox(label=_("Enable CDVAE", lang), value=False),
gr.Checkbox(label=_("Enable TAESD", lang), value=False),
+ gr.Checkbox(label=_("Enable MultiDiffusion", lang), value=False),
+ gr.Checkbox(label=_("Enable Circular padding (for MultiDiffusion)", lang), value=False),
gr.Radio(choices=["png", "jpeg"], label=_("Select output format", lang), value="png", interactive=True)
],
additional_inputs_accordion=gr.Accordion(label=_("Additional StableDiffusion Settings", lang), open=False),
diff --git a/RequirementsFiles/requirements.txt b/RequirementsFiles/requirements.txt
index 369c0ed0..bbaed389 100644
--- a/RequirementsFiles/requirements.txt
+++ b/RequirementsFiles/requirements.txt
@@ -86,7 +86,7 @@ GitPython==3.1.43
google-pasta==0.2.0
GPUtil==1.4.0
gpytoolbox==0.3.2
-gradio==5.3.0
+gradio==5.4.0
gradio_client==1.4.2
grpcio==1.62.2
gruut==2.2.3
@@ -217,7 +217,7 @@ python-crfsuite==0.9.10
python-dateutil==2.9.0.post0
python-dotenv==1.0.1
python-ffmpeg==2.0.12
-python-multipart==0.0.9
+python-multipart==0.0.12
pytorch-lightning==2.4.0
pytz==2024.1
PyWavelets==1.6.0
@@ -232,6 +232,7 @@ rich==13.7.1
rpds-py==0.18.0
ruff==0.4.2
safetensors==0.4.3
+safehttpx==0.1.1
scikit-image==0.23.2
scikit-learn==1.4.2
scipy==1.11.4
@@ -281,7 +282,7 @@ torchsde==0.2.6
tqdm==4.66.5
trainer==0.0.36
trampoline==0.1.2
-transformers==4.45.2
+transformers==4.46.0
treetable==0.2.5
trimesh==4.4.7
trio==0.25.0
diff --git a/Update.bat b/Update.bat
index e9f36758..449451f9 100644
--- a/Update.bat
+++ b/Update.bat
@@ -49,6 +49,7 @@ if "%INSTALL_TYPE%"=="CPU" (
pip install --no-deps -r "%CURRENT_DIR%RequirementsFiles\requirements-stable-diffusion-cpp.txt" 2>> %ERROR_LOG%
)
+pip install https://huggingface.co/madbuda/triton-windows-builds/resolve/main/triton-3.0.0-cp310-cp310-win_amd64.whl 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/PanQiWei/AutoGPTQ.git#egg=auto_gptq@v0.7.1 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/casper-hansen/AutoAWQ.git#egg=autoawq@v0.2.6 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/turboderp/exllamav2.git#egg=exllamav2@v0.2.3 2>> %ERROR_LOG%
diff --git a/Update.sh b/Update.sh
index d6bc2a66..e90ea4b5 100644
--- a/Update.sh
+++ b/Update.sh
@@ -48,6 +48,7 @@ else
pip install --no-deps -r "$CURRENT_DIR/RequirementsFiles/requirements-stable-diffusion-cpp.txt" 2>> "$ERROR_LOG"
fi
+pip install triton==3.0.0 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/PanQiWei/AutoGPTQ.git#egg=auto_gptq@v0.7.1 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/casper-hansen/AutoAWQ.git#egg=autoawq@v0.2.6 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/turboderp/exllamav2.git#egg=exllamav2@v0.2.3 2>> "$ERROR_LOG"
diff --git a/Venv.bat b/Venv.bat
index 6e857716..8ecacfac 100644
--- a/Venv.bat
+++ b/Venv.bat
@@ -32,9 +32,8 @@ if "%choice%"=="3" (
if /i "%confirm%"=="y" (
echo Deleting application...
cd ..
- rd /s /q "%~dp0"
- echo Application deleted successfully.
- pause
+ set DELETE_DIR=%CURRENT_DIR%
+ start cmd /c "timeout /t 1 & rd /s /q "%DELETE_DIR%" & exit"
exit
)
goto menu
diff --git a/Venv.sh b/Venv.sh
index 3a77500a..642f32e8 100644
--- a/Venv.sh
+++ b/Venv.sh
@@ -30,7 +30,7 @@ while true; do
read -p "Are you sure you want to delete the application? (y/n): " confirm
if [[ $confirm == [Yy]* ]]; then
echo "Deleting application..."
- cd ..
+ cd .. || exit 1
rm -rf "${CURRENT_DIR}"
echo "Application deleted successfully."
exit 0
diff --git a/first_setup.py b/first_setup.py
index 89963c30..6326d36f 100644
--- a/first_setup.py
+++ b/first_setup.py
@@ -1,7 +1,7 @@
import json
import os
import sys
-from typing import Dict, Optional
+from typing import Dict, Optional, Tuple
def load_settings() -> Dict:
@@ -55,6 +55,28 @@ def select_auto_launch() -> bool:
print("\nНеверный выбор! / Invalid choice! / 选择无效!")
+def input_auth_credentials() -> Tuple[str, str]:
+ print("\nВведите логин и пароль (формат login:password) или нажмите Enter для пропуска:")
+ print("Enter login and password (format login:password) or press Enter to skip:")
+ print("输入登录名和密码(格式 login:password)或按 Enter 跳过:")
+
+ credentials = input().strip()
+
+ if not credentials:
+ return "admin", "admin"
+
+ try:
+ username, password = credentials.split(':')
+ if username and password:
+ return username.strip(), password.strip()
+ raise ValueError
+ except ValueError:
+ print("\nНеверный формат! Используется значение по умолчанию (admin:admin)")
+ print("Invalid format! Using default value (admin:admin)")
+ print("格式无效!使用默认值 (admin:admin)")
+ return "admin", "admin"
+
+
def input_hf_token() -> Optional[str]:
print("\nВведите ваш Hugging Face токен (или нажмите Enter для пропуска):")
print("Enter your Hugging Face token (or press Enter to skip):")
@@ -90,6 +112,10 @@ def main():
auto_launch = select_auto_launch()
settings['auto_launch'] = auto_launch
+ username, password = input_auth_credentials()
+ settings['auth']['username'] = username
+ settings['auth']['password'] = password
+
token = input_hf_token()
if token:
settings['hf_token'] = token
diff --git a/translations/ru.json b/translations/ru.json
index e1495f66..57d735e6 100644
--- a/translations/ru.json
+++ b/translations/ru.json
@@ -18,6 +18,8 @@
"Context batch (N_BATCH) for llama type models": "Размер пакета контекста (N_BATCH) для моделей типа llama",
"Min P": "Минимальное P",
"Typical P": "Типичное P",
+ "Enable MultiDiffusion": "Включить мультидиффузию",
+ "Enable Circular padding (for MultiDiffusion)": "Включить круговое заполнение (для мультидиффузии)",
"Stop sequences (optional)": "Последовательности остановки (необязательно)",
"TTS Repetition penalty": "TTS Штраф за повторение",
"TTS Length penalty": "TTS Штраф за длину",
diff --git a/translations/zh.json b/translations/zh.json
index 8ba8e7b8..970255d8 100644
--- a/translations/zh.json
+++ b/translations/zh.json
@@ -18,6 +18,8 @@
"Context batch (N_BATCH) for llama type models": "llama类型模型的上下文批次 (N_BATCH)",
"Min P": "最小P值",
"Typical P": "典型P值",
+ "Enable MultiDiffusion": "启用多重扩散",
+ "Enable Circular padding (for MultiDiffusion)": "启用循环填充(用于多重扩散)",
"Stop sequences (optional)": "停止序列(可选)",
"TTS Repetition penalty": "TTS重复惩罚",
"TTS Length penalty": "TTS长度惩罚",