diff --git a/.github/workflows/test-ui.yaml b/.github/workflows/test-ui.yaml
new file mode 100644
index 00000000000..95069175517
--- /dev/null
+++ b/.github/workflows/test-ui.yaml
@@ -0,0 +1,26 @@
+name: Tests CI
+
+on: [push, pull_request]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-node@v3
+ with:
+ node-version: 18
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+ - name: Install requirements
+ run: |
+ python -m pip install --upgrade pip
+ pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
+ pip install -r requirements.txt
+ - name: Run Tests
+ run: |
+ npm ci
+ npm run test:generate
+ npm test
+ working-directory: ./tests-ui
diff --git a/.github/workflows/windows_release_dependencies.yml b/.github/workflows/windows_release_dependencies.yml
new file mode 100644
index 00000000000..aafe8a21444
--- /dev/null
+++ b/.github/workflows/windows_release_dependencies.yml
@@ -0,0 +1,67 @@
+name: "Windows Release dependencies"
+
+on:
+ workflow_dispatch:
+ inputs:
+ xformers:
+ description: 'xformers version'
+ required: false
+ type: string
+ default: ""
+ cu:
+ description: 'cuda version'
+ required: true
+ type: string
+ default: "121"
+
+ python_minor:
+ description: 'python minor version'
+ required: true
+ type: string
+ default: "11"
+
+ python_patch:
+ description: 'python patch version'
+ required: true
+ type: string
+ default: "6"
+# push:
+# branches:
+# - master
+
+jobs:
+ build_dependencies:
+ runs-on: windows-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: 3.${{ inputs.python_minor }}.${{ inputs.python_patch }}
+
+ - shell: bash
+ run: |
+ echo "@echo off
+ ..\python_embeded\python.exe .\update.py ..\ComfyUI\\
+ echo -
+ echo This will try to update pytorch and all python dependencies, if you get an error wait for pytorch/xformers to fix their stuff
+ echo You should not be running this anyways unless you really have to
+ echo -
+ echo If you just want to update normally, close this and run update_comfyui.bat instead.
+ echo -
+ pause
+ ..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2
+ pause" > update_comfyui_and_python_dependencies.bat
+
+ python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r requirements.txt pygit2 -w ./temp_wheel_dir
+ python -m pip install --no-cache-dir ./temp_wheel_dir/*
+ echo installed basic
+ ls -lah temp_wheel_dir
+ mv temp_wheel_dir cu${{ inputs.cu }}_python_deps
+ tar cf cu${{ inputs.cu }}_python_deps.tar cu${{ inputs.cu }}_python_deps
+
+ - uses: actions/cache/save@v3
+ with:
+ path: |
+ cu${{ inputs.cu }}_python_deps.tar
+ update_comfyui_and_python_dependencies.bat
+ key: ${{ runner.os }}-build-cu${{ inputs.cu }}-${{ inputs.python_minor }}
diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml
index 319942e7c58..b793f7fe2b2 100644
--- a/.github/workflows/windows_release_nightly_pytorch.yml
+++ b/.github/workflows/windows_release_nightly_pytorch.yml
@@ -20,12 +20,12 @@ jobs:
persist-credentials: false
- uses: actions/setup-python@v4
with:
- python-version: '3.11.3'
+ python-version: '3.11.6'
- shell: bash
run: |
cd ..
cp -r ComfyUI ComfyUI_copy
- curl https://www.python.org/ftp/python/3.11.3/python-3.11.3-embed-amd64.zip -o python_embeded.zip
+ curl https://www.python.org/ftp/python/3.11.6/python-3.11.6-embed-amd64.zip -o python_embeded.zip
unzip python_embeded.zip -d python_embeded
cd python_embeded
echo 'import site' >> ./python311._pth
diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml
new file mode 100644
index 00000000000..87d37c24d89
--- /dev/null
+++ b/.github/workflows/windows_release_package.yml
@@ -0,0 +1,100 @@
+name: "Windows Release packaging"
+
+on:
+ workflow_dispatch:
+ inputs:
+ cu:
+ description: 'cuda version'
+ required: true
+ type: string
+ default: "121"
+
+ python_minor:
+ description: 'python minor version'
+ required: true
+ type: string
+ default: "11"
+
+ python_patch:
+ description: 'python patch version'
+ required: true
+ type: string
+ default: "6"
+# push:
+# branches:
+# - master
+
+jobs:
+ package_comfyui:
+ permissions:
+ contents: "write"
+ packages: "write"
+ pull-requests: "read"
+ runs-on: windows-latest
+ steps:
+ - uses: actions/cache/restore@v3
+ id: cache
+ with:
+ path: |
+ cu${{ inputs.cu }}_python_deps.tar
+ update_comfyui_and_python_dependencies.bat
+ key: ${{ runner.os }}-build-cu${{ inputs.cu }}-${{ inputs.python_minor }}
+ - shell: bash
+ run: |
+ mv cu${{ inputs.cu }}_python_deps.tar ../
+ mv update_comfyui_and_python_dependencies.bat ../
+ cd ..
+ tar xf cu${{ inputs.cu }}_python_deps.tar
+ pwd
+ ls
+
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ persist-credentials: false
+ - shell: bash
+ run: |
+ cd ..
+ cp -r ComfyUI ComfyUI_copy
+ curl https://www.python.org/ftp/python/3.${{ inputs.python_minor }}.${{ inputs.python_patch }}/python-3.${{ inputs.python_minor }}.${{ inputs.python_patch }}-embed-amd64.zip -o python_embeded.zip
+ unzip python_embeded.zip -d python_embeded
+ cd python_embeded
+ echo 'import site' >> ./python3${{ inputs.python_minor }}._pth
+ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
+ ./python.exe get-pip.py
+ ./python.exe -s -m pip install ../cu${{ inputs.cu }}_python_deps/*
+ sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth
+ cd ..
+
+ git clone https://github.com/comfyanonymous/taesd
+ cp taesd/*.pth ./ComfyUI_copy/models/vae_approx/
+
+ mkdir ComfyUI_windows_portable
+ mv python_embeded ComfyUI_windows_portable
+ mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI
+
+ cd ComfyUI_windows_portable
+
+ mkdir update
+ cp -r ComfyUI/.ci/update_windows/* ./update/
+ cp -r ComfyUI/.ci/windows_base_files/* ./
+ cp ../update_comfyui_and_python_dependencies.bat ./update/
+
+ cd ..
+
+ "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable
+ mv ComfyUI_windows_portable.7z ComfyUI/new_ComfyUI_windows_portable_nvidia_cu${{ inputs.cu }}_or_cpu.7z
+
+ cd ComfyUI_windows_portable
+ python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
+
+ ls
+
+ - name: Upload binaries to release
+ uses: svenstaro/upload-release-action@v2
+ with:
+ repo_token: ${{ secrets.GITHUB_TOKEN }}
+ file: new_ComfyUI_windows_portable_nvidia_cu${{ inputs.cu }}_or_cpu.7z
+ tag: "latest"
+ overwrite: true
+
diff --git a/.gitignore b/.gitignore
index 98d91318d3d..43c038e4161 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,3 +14,4 @@ venv/
/web/extensions/*
!/web/extensions/logging.js.example
!/web/extensions/core/
+/tests-ui/data/object_info.json
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 00000000000..202121e10fc
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,9 @@
+{
+ "path-intellisense.mappings": {
+ "../": "${workspaceFolder}/web/extensions/core"
+ },
+ "[python]": {
+ "editor.defaultFormatter": "ms-python.autopep8"
+ },
+ "python.formatting.provider": "none"
+}
diff --git a/README.md b/README.md
index d83b4bdac7f..af1f2281158 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
ComfyUI
=======
-A powerful and modular stable diffusion GUI and backend.
+The most powerful and modular stable diffusion GUI and backend.
-----------
![ComfyUI Screenshot](comfyui_screenshot.png)
@@ -11,7 +11,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin
## Features
- Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything.
-- Fully supports SD1.x, SD2.x and SDXL
+- Fully supports SD1.x, SD2.x, [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/) and [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/)
- Asynchronous Queue system
- Many optimizations: Only re-executes the parts of the workflow that changes between executions.
- Command line option: ```--lowvram``` to make it work on GPUs with less than 3GB vram (enabled automatically on GPUs with low vram)
@@ -30,6 +30,8 @@ This ui will let you design and execute advanced stable diffusion pipelines usin
- [unCLIP Models](https://comfyanonymous.github.io/ComfyUI_examples/unclip/)
- [GLIGEN](https://comfyanonymous.github.io/ComfyUI_examples/gligen/)
- [Model Merging](https://comfyanonymous.github.io/ComfyUI_examples/model_merging/)
+- [LCM models and Loras](https://comfyanonymous.github.io/ComfyUI_examples/lcm/)
+- [SDXL Turbo](https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/)
- Latent previews with [TAESD](#how-to-show-high-quality-previews)
- Starts up very fast.
- Works fully offline: will never download anything.
@@ -46,6 +48,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git
| Ctrl + S | Save workflow |
| Ctrl + O | Load workflow |
| Ctrl + A | Select all nodes |
+| Alt + C | Collapse/uncollapse selected nodes |
| Ctrl + M | Mute/unmute selected nodes |
| Ctrl + B | Bypass selected nodes (acts like the node was removed from the graph and the wires reconnected through) |
| Delete/Backspace | Delete selected nodes |
@@ -69,7 +72,7 @@ Ctrl can also be replaced with Cmd instead for macOS users
There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases).
-### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z)
+### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_cu121_or_cpu.7z)
Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints
@@ -89,19 +92,21 @@ Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints
Put your VAE in: models/vae
+Note: pytorch does not support python 3.12 yet so make sure your python version is 3.11 or earlier.
+
### AMD GPUs (Linux only)
AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version:
-```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2```
+```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6```
-This is the command to install the nightly with ROCm 5.6 that supports the 7000 series and might have some performance improvements:
-```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.6```
+This is the command to install the nightly with ROCm 5.7 that might have some performance improvements:
+```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.7```
### NVIDIA
-Nvidia users should install torch and xformers using this command:
+Nvidia users should install pytorch using this command:
-```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers```
+```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu121```
#### Troubleshooting
diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py
index 25148313117..76a525b378a 100644
--- a/comfy/cldm/cldm.py
+++ b/comfy/cldm/cldm.py
@@ -27,15 +27,13 @@ def __init__(
model_channels,
hint_channels,
num_res_blocks,
- attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
- use_fp16=False,
- use_bf16=False,
+ dtype=torch.float32,
num_heads=-1,
num_head_channels=-1,
num_heads_upsample=-1,
@@ -53,8 +51,10 @@ def __init__(
use_linear_in_transformer=False,
adm_in_channels=None,
transformer_depth_middle=None,
+ transformer_depth_output=None,
device=None,
operations=comfy.ops,
+ **kwargs,
):
super().__init__()
assert use_spatial_transformer == True, "use_spatial_transformer has to be true"
@@ -80,10 +80,7 @@ def __init__(
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
- if isinstance(transformer_depth, int):
- transformer_depth = len(channel_mult) * [transformer_depth]
- if transformer_depth_middle is None:
- transformer_depth_middle = transformer_depth[-1]
+
if isinstance(num_res_blocks, int):
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
else:
@@ -91,25 +88,22 @@ def __init__(
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
"as a list/tuple (per-level) with the same length as channel_mult")
self.num_res_blocks = num_res_blocks
+
if disable_self_attentions is not None:
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
assert len(disable_self_attentions) == len(channel_mult)
if num_attention_blocks is not None:
assert len(num_attention_blocks) == len(self.num_res_blocks)
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
- print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
- f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
- f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
- f"attention will still not be set.")
- self.attention_resolutions = attention_resolutions
+ transformer_depth = transformer_depth[:]
+
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.dtype = th.bfloat16 if use_bf16 else self.dtype
+ self.dtype = dtype
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
@@ -182,11 +176,14 @@ def __init__(
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
- operations=operations
+ dtype=self.dtype,
+ device=device,
+ operations=operations,
)
]
ch = mult * model_channels
- if ds in attention_resolutions:
+ num_transformers = transformer_depth.pop(0)
+ if num_transformers > 0:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
@@ -203,9 +200,9 @@ def __init__(
if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
layers.append(
SpatialTransformer(
- ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim,
+ ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
- use_checkpoint=use_checkpoint, operations=operations
+ use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
@@ -225,11 +222,13 @@ def __init__(
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
+ dtype=self.dtype,
+ device=device,
operations=operations
)
if resblock_updown
else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch, operations=operations
+ ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device, operations=operations
)
)
)
@@ -247,7 +246,7 @@ def __init__(
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- self.middle_block = TimestepEmbedSequential(
+ mid_block = [
ResBlock(
ch,
time_embed_dim,
@@ -255,12 +254,15 @@ def __init__(
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
+ dtype=self.dtype,
+ device=device,
operations=operations
- ),
- SpatialTransformer( # always uses a self-attn
+ )]
+ if transformer_depth_middle >= 0:
+ mid_block += [SpatialTransformer( # always uses a self-attn
ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim,
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
- use_checkpoint=use_checkpoint, operations=operations
+ use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations
),
ResBlock(
ch,
@@ -269,9 +271,11 @@ def __init__(
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
+ dtype=self.dtype,
+ device=device,
operations=operations
- ),
- )
+ )]
+ self.middle_block = TimestepEmbedSequential(*mid_block)
self.middle_block_out = self.make_zero_conv(ch, operations=operations)
self._feature_size += ch
diff --git a/comfy/cli_args.py b/comfy/cli_args.py
index ffae81c49d1..72fce10872f 100644
--- a/comfy/cli_args.py
+++ b/comfy/cli_args.py
@@ -36,9 +36,12 @@ def __call__(self, parser, namespace, values, option_string=None):
parser.add_argument("--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0", help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)")
parser.add_argument("--port", type=int, default=8188, help="Set the listen port.")
parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.")
+parser.add_argument("--max-upload-size", type=float, default=100, help="Set the maximum upload size in MB.")
+
parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.")
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory).")
+parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory.")
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
@@ -52,11 +55,20 @@ def __call__(self, parser, namespace, values, option_string=None):
fp_group.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).")
fp_group.add_argument("--force-fp16", action="store_true", help="Force fp16.")
+parser.add_argument("--bf16-unet", action="store_true", help="Run the UNET in bf16. This should only be used for testing stuff.")
+
fpvae_group = parser.add_mutually_exclusive_group()
fpvae_group.add_argument("--fp16-vae", action="store_true", help="Run the VAE in fp16, might cause black images.")
fpvae_group.add_argument("--fp32-vae", action="store_true", help="Run the VAE in full precision fp32.")
fpvae_group.add_argument("--bf16-vae", action="store_true", help="Run the VAE in bf16.")
+fpte_group = parser.add_mutually_exclusive_group()
+fpte_group.add_argument("--fp8_e4m3fn-text-enc", action="store_true", help="Store text encoder weights in fp8 (e4m3fn variant).")
+fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true", help="Store text encoder weights in fp8 (e5m2 variant).")
+fpte_group.add_argument("--fp16-text-enc", action="store_true", help="Store text encoder weights in fp16.")
+fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text encoder weights in fp32.")
+
+
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize when loading models with Intel GPUs.")
diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py
index 1206c680d61..9e2e03d7238 100644
--- a/comfy/clip_vision.py
+++ b/comfy/clip_vision.py
@@ -1,5 +1,5 @@
-from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor, modeling_utils
-from .utils import load_torch_file, transformers_convert
+from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, modeling_utils
+from .utils import load_torch_file, transformers_convert, common_upscale
import os
import torch
import contextlib
@@ -7,6 +7,18 @@
import comfy.ops
import comfy.model_patcher
import comfy.model_management
+import comfy.utils
+
+def clip_preprocess(image, size=224):
+ mean = torch.tensor([ 0.48145466,0.4578275,0.40821073], device=image.device, dtype=image.dtype)
+ std = torch.tensor([0.26862954,0.26130258,0.27577711], device=image.device, dtype=image.dtype)
+ scale = (size / min(image.shape[1], image.shape[2]))
+ image = torch.nn.functional.interpolate(image.movedim(-1, 1), size=(round(scale * image.shape[1]), round(scale * image.shape[2])), mode="bicubic", antialias=True)
+ h = (image.shape[2] - size)//2
+ w = (image.shape[3] - size)//2
+ image = image[:,:,h:h+size,w:w+size]
+ image = torch.clip((255. * image), 0, 255).round() / 255.0
+ return (image - mean.view([3,1,1])) / std.view([3,1,1])
class ClipVisionModel():
def __init__(self, json_config):
@@ -23,25 +35,12 @@ def __init__(self, json_config):
self.model.to(self.dtype)
self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device)
- self.processor = CLIPImageProcessor(crop_size=224,
- do_center_crop=True,
- do_convert_rgb=True,
- do_normalize=True,
- do_resize=True,
- image_mean=[ 0.48145466,0.4578275,0.40821073],
- image_std=[0.26862954,0.26130258,0.27577711],
- resample=3, #bicubic
- size=224)
-
def load_sd(self, sd):
return self.model.load_state_dict(sd, strict=False)
def encode_image(self, image):
- img = torch.clip((255. * image), 0, 255).round().int()
- img = list(map(lambda a: a, img))
- inputs = self.processor(images=img, return_tensors="pt")
comfy.model_management.load_model_gpu(self.patcher)
- pixel_values = inputs['pixel_values'].to(self.load_device)
+ pixel_values = clip_preprocess(image.to(self.load_device))
if self.dtype != torch.float32:
precision_scope = torch.autocast
@@ -92,8 +91,11 @@ def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_g.json")
elif "vision_model.encoder.layers.30.layer_norm1.weight" in sd:
json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_h.json")
- else:
+ elif "vision_model.encoder.layers.22.layer_norm1.weight" in sd:
json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_vitl.json")
+ else:
+ return None
+
clip = ClipVisionModel(json_config)
m, u = clip.load_sd(sd)
if len(m) > 0:
diff --git a/comfy/conds.py b/comfy/conds.py
new file mode 100644
index 00000000000..6cff2518400
--- /dev/null
+++ b/comfy/conds.py
@@ -0,0 +1,79 @@
+import enum
+import torch
+import math
+import comfy.utils
+
+
+def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9)
+ return abs(a*b) // math.gcd(a, b)
+
+class CONDRegular:
+ def __init__(self, cond):
+ self.cond = cond
+
+ def _copy_with(self, cond):
+ return self.__class__(cond)
+
+ def process_cond(self, batch_size, device, **kwargs):
+ return self._copy_with(comfy.utils.repeat_to_batch_size(self.cond, batch_size).to(device))
+
+ def can_concat(self, other):
+ if self.cond.shape != other.cond.shape:
+ return False
+ return True
+
+ def concat(self, others):
+ conds = [self.cond]
+ for x in others:
+ conds.append(x.cond)
+ return torch.cat(conds)
+
+class CONDNoiseShape(CONDRegular):
+ def process_cond(self, batch_size, device, area, **kwargs):
+ data = self.cond[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
+ return self._copy_with(comfy.utils.repeat_to_batch_size(data, batch_size).to(device))
+
+
+class CONDCrossAttn(CONDRegular):
+ def can_concat(self, other):
+ s1 = self.cond.shape
+ s2 = other.cond.shape
+ if s1 != s2:
+ if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen
+ return False
+
+ mult_min = lcm(s1[1], s2[1])
+ diff = mult_min // min(s1[1], s2[1])
+ if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much
+ return False
+ return True
+
+ def concat(self, others):
+ conds = [self.cond]
+ crossattn_max_len = self.cond.shape[1]
+ for x in others:
+ c = x.cond
+ crossattn_max_len = lcm(crossattn_max_len, c.shape[1])
+ conds.append(c)
+
+ out = []
+ for c in conds:
+ if c.shape[1] < crossattn_max_len:
+ c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result
+ out.append(c)
+ return torch.cat(out)
+
+class CONDConstant(CONDRegular):
+ def __init__(self, cond):
+ self.cond = cond
+
+ def process_cond(self, batch_size, device, **kwargs):
+ return self._copy_with(self.cond)
+
+ def can_concat(self, other):
+ if self.cond != other.cond:
+ return False
+ return True
+
+ def concat(self, others):
+ return self.cond
diff --git a/comfy/controlnet.py b/comfy/controlnet.py
index ea219c7e560..433381df6ec 100644
--- a/comfy/controlnet.py
+++ b/comfy/controlnet.py
@@ -33,7 +33,7 @@ def __init__(self, device=None):
self.cond_hint_original = None
self.cond_hint = None
self.strength = 1.0
- self.timestep_percent_range = (1.0, 0.0)
+ self.timestep_percent_range = (0.0, 1.0)
self.timestep_range = None
if device is None:
@@ -42,7 +42,7 @@ def __init__(self, device=None):
self.previous_controlnet = None
self.global_average_pooling = False
- def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(1.0, 0.0)):
+ def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(0.0, 1.0)):
self.cond_hint_original = cond_hint
self.strength = strength
self.timestep_percent_range = timestep_percent_range
@@ -132,6 +132,7 @@ def __init__(self, control_model, global_average_pooling=False, device=None):
self.control_model = control_model
self.control_model_wrapped = comfy.model_patcher.ModelPatcher(self.control_model, load_device=comfy.model_management.get_torch_device(), offload_device=comfy.model_management.unet_offload_device())
self.global_average_pooling = global_average_pooling
+ self.model_sampling_current = None
def get_control(self, x_noisy, t, cond, batched_number):
control_prev = None
@@ -156,10 +157,13 @@ def get_control(self, x_noisy, t, cond, batched_number):
context = cond['c_crossattn']
- y = cond.get('c_adm', None)
+ y = cond.get('y', None)
if y is not None:
y = y.to(self.control_model.dtype)
- control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=t, context=context.to(self.control_model.dtype), y=y)
+ timestep = self.model_sampling_current.timestep(t)
+ x_noisy = self.model_sampling_current.calculate_input(t, x_noisy)
+
+ control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=timestep.float(), context=context.to(self.control_model.dtype), y=y)
return self.control_merge(None, control, control_prev, output_dtype)
def copy(self):
@@ -172,6 +176,14 @@ def get_models(self):
out.append(self.control_model_wrapped)
return out
+ def pre_run(self, model, percent_to_timestep_function):
+ super().pre_run(model, percent_to_timestep_function)
+ self.model_sampling_current = model.model_sampling
+
+ def cleanup(self):
+ self.model_sampling_current = None
+ super().cleanup()
+
class ControlLoraOps:
class Linear(torch.nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool = True,
@@ -292,8 +304,8 @@ def load_controlnet(ckpt_path, model=None):
controlnet_config = None
if "controlnet_cond_embedding.conv_in.weight" in controlnet_data: #diffusers format
- use_fp16 = comfy.model_management.should_use_fp16()
- controlnet_config = comfy.model_detection.unet_config_from_diffusers_unet(controlnet_data, use_fp16)
+ unet_dtype = comfy.model_management.unet_dtype()
+ controlnet_config = comfy.model_detection.unet_config_from_diffusers_unet(controlnet_data, unet_dtype)
diffusers_keys = comfy.utils.unet_to_diffusers(controlnet_config)
diffusers_keys["controlnet_mid_block.weight"] = "middle_block_out.0.weight"
diffusers_keys["controlnet_mid_block.bias"] = "middle_block_out.0.bias"
@@ -353,8 +365,8 @@ def load_controlnet(ckpt_path, model=None):
return net
if controlnet_config is None:
- use_fp16 = comfy.model_management.should_use_fp16()
- controlnet_config = comfy.model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16, True).unet_config
+ unet_dtype = comfy.model_management.unet_dtype()
+ controlnet_config = comfy.model_detection.model_config_from_unet(controlnet_data, prefix, unet_dtype, True).unet_config
controlnet_config.pop("out_channels")
controlnet_config["hint_channels"] = controlnet_data["{}input_hint_block.0.weight".format(prefix)].shape[1]
control_model = comfy.cldm.cldm.ControlNet(**controlnet_config)
@@ -383,8 +395,7 @@ class WeightsLoader(torch.nn.Module):
missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
print(missing, unexpected)
- if use_fp16:
- control_model = control_model.half()
+ control_model = control_model.to(unet_dtype)
global_average_pooling = False
filename = os.path.splitext(ckpt_path)[0]
@@ -417,7 +428,7 @@ def get_control(self, x_noisy, t, cond, batched_number):
if control_prev is not None:
return control_prev
else:
- return {}
+ return None
if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
if self.cond_hint is not None:
diff --git a/comfy/diffusers_load.py b/comfy/diffusers_load.py
index a52e0102b73..c0b420e7966 100644
--- a/comfy/diffusers_load.py
+++ b/comfy/diffusers_load.py
@@ -31,6 +31,7 @@ def load_diffusers(model_path, output_vae=True, output_clip=True, embedding_dire
vae = None
if output_vae:
- vae = comfy.sd.VAE(ckpt_path=vae_path)
+ sd = comfy.utils.load_torch_file(vae_path)
+ vae = comfy.sd.VAE(sd=sd)
return (unet, clip, vae)
diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py
index 7eaf6ff62b6..08bf0fc9e67 100644
--- a/comfy/extra_samplers/uni_pc.py
+++ b/comfy/extra_samplers/uni_pc.py
@@ -688,7 +688,7 @@ def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order,
x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
else:
x_t_ = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dimss) * x
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
- expand_dims(sigma_t * h_phi_1, dims) * model_prev_0
)
if x_t is None:
@@ -713,8 +713,8 @@ def sample(self, x, timesteps, t_start=None, t_end=None, order=3, skip_type='tim
method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
atol=0.0078, rtol=0.05, corrector=False, callback=None, disable_pbar=False
):
- t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
- t_T = self.noise_schedule.T if t_start is None else t_start
+ # t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
+ # t_T = self.noise_schedule.T if t_start is None else t_start
device = x.device
steps = len(timesteps) - 1
if method == 'multistep':
@@ -769,8 +769,8 @@ def sample(self, x, timesteps, t_start=None, t_end=None, order=3, skip_type='tim
callback(step_index, model_prev_list[-1], x, steps)
else:
raise NotImplementedError()
- if denoise_to_zero:
- x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
+ # if denoise_to_zero:
+ # x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
return x
@@ -833,21 +833,39 @@ def expand_dims(v, dims):
return v[(...,) + (None,)*(dims - 1)]
+class SigmaConvert:
+ schedule = ""
+ def marginal_log_mean_coeff(self, sigma):
+ return 0.5 * torch.log(1 / ((sigma * sigma) + 1))
-def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'):
- to_zero = False
- if sigmas[-1] == 0:
- timesteps = torch.nn.functional.interpolate(sigmas[None,None,:-1], size=(len(sigmas),), mode='linear')[0][0]
- to_zero = True
- else:
- timesteps = sigmas.clone()
+ def marginal_alpha(self, t):
+ return torch.exp(self.marginal_log_mean_coeff(t))
+
+ def marginal_std(self, t):
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
+
+ def marginal_lambda(self, t):
+ """
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
+ """
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
+ return log_mean_coeff - log_std
- alphas_cumprod = model.inner_model.alphas_cumprod
+def predict_eps_sigma(model, input, sigma_in, **kwargs):
+ sigma = sigma_in.view(sigma_in.shape[:1] + (1,) * (input.ndim - 1))
+ input = input * ((sigma ** 2 + 1.0) ** 0.5)
+ return (input - model(input, sigma_in, **kwargs)) / sigma
- for s in range(timesteps.shape[0]):
- timesteps[s] = (model.sigma_to_discrete_timestep(timesteps[s]) / 1000) + (1 / len(alphas_cumprod))
- ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
+def sample_unipc(model, noise, image, sigmas, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'):
+ timesteps = sigmas.clone()
+ if sigmas[-1] == 0:
+ timesteps = sigmas[:]
+ timesteps[-1] = 0.001
+ else:
+ timesteps = sigmas.clone()
+ ns = SigmaConvert()
if image is not None:
img = image * ns.marginal_alpha(timesteps[0])
@@ -859,25 +877,18 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex
else:
img = noise
- if to_zero:
- timesteps[-1] = (1 / len(alphas_cumprod))
-
- device = noise.device
-
-
model_type = "noise"
model_fn = model_wrapper(
- model.predict_eps_discrete_timestep,
+ lambda input, sigma, **kwargs: predict_eps_sigma(model, input, sigma, **kwargs),
ns,
model_type=model_type,
guidance_type="uncond",
model_kwargs=extra_args,
)
- order = min(3, len(timesteps) - 1)
+ order = min(3, len(timesteps) - 2)
uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, noise_mask=noise_mask, masked_image=image, noise=noise, variant=variant)
x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True, callback=callback, disable_pbar=disable)
- if not to_zero:
- x /= ns.marginal_alpha(timesteps[-1])
+ x /= ns.marginal_alpha(timesteps[-1])
return x
diff --git a/comfy/k_diffusion/external.py b/comfy/k_diffusion/external.py
deleted file mode 100644
index c1a137d9c0c..00000000000
--- a/comfy/k_diffusion/external.py
+++ /dev/null
@@ -1,190 +0,0 @@
-import math
-
-import torch
-from torch import nn
-
-from . import sampling, utils
-
-
-class VDenoiser(nn.Module):
- """A v-diffusion-pytorch model wrapper for k-diffusion."""
-
- def __init__(self, inner_model):
- super().__init__()
- self.inner_model = inner_model
- self.sigma_data = 1.
-
- def get_scalings(self, sigma):
- c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2)
- c_out = -sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
- c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
- return c_skip, c_out, c_in
-
- def sigma_to_t(self, sigma):
- return sigma.atan() / math.pi * 2
-
- def t_to_sigma(self, t):
- return (t * math.pi / 2).tan()
-
- def loss(self, input, noise, sigma, **kwargs):
- c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
- noised_input = input + noise * utils.append_dims(sigma, input.ndim)
- model_output = self.inner_model(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
- target = (input - c_skip * noised_input) / c_out
- return (model_output - target).pow(2).flatten(1).mean(1)
-
- def forward(self, input, sigma, **kwargs):
- c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
- return self.inner_model(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip
-
-
-class DiscreteSchedule(nn.Module):
- """A mapping between continuous noise levels (sigmas) and a list of discrete noise
- levels."""
-
- def __init__(self, sigmas, quantize):
- super().__init__()
- self.register_buffer('sigmas', sigmas)
- self.register_buffer('log_sigmas', sigmas.log())
- self.quantize = quantize
-
- @property
- def sigma_min(self):
- return self.sigmas[0]
-
- @property
- def sigma_max(self):
- return self.sigmas[-1]
-
- def get_sigmas(self, n=None):
- if n is None:
- return sampling.append_zero(self.sigmas.flip(0))
- t_max = len(self.sigmas) - 1
- t = torch.linspace(t_max, 0, n, device=self.sigmas.device)
- return sampling.append_zero(self.t_to_sigma(t))
-
- def sigma_to_discrete_timestep(self, sigma):
- log_sigma = sigma.log()
- dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
- return dists.abs().argmin(dim=0).view(sigma.shape)
-
- def sigma_to_t(self, sigma, quantize=None):
- quantize = self.quantize if quantize is None else quantize
- if quantize:
- return self.sigma_to_discrete_timestep(sigma)
- log_sigma = sigma.log()
- dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
- low_idx = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
- high_idx = low_idx + 1
- low, high = self.log_sigmas[low_idx], self.log_sigmas[high_idx]
- w = (low - log_sigma) / (low - high)
- w = w.clamp(0, 1)
- t = (1 - w) * low_idx + w * high_idx
- return t.view(sigma.shape)
-
- def t_to_sigma(self, t):
- t = t.float()
- low_idx = t.floor().long()
- high_idx = t.ceil().long()
- w = t-low_idx if t.device.type == 'mps' else t.frac()
- log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
- return log_sigma.exp()
-
- def predict_eps_discrete_timestep(self, input, t, **kwargs):
- if t.dtype != torch.int64 and t.dtype != torch.int32:
- t = t.round()
- sigma = self.t_to_sigma(t)
- input = input * ((utils.append_dims(sigma, input.ndim) ** 2 + 1.0) ** 0.5)
- return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim)
-
-class DiscreteEpsDDPMDenoiser(DiscreteSchedule):
- """A wrapper for discrete schedule DDPM models that output eps (the predicted
- noise)."""
-
- def __init__(self, model, alphas_cumprod, quantize):
- super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize)
- self.inner_model = model
- self.sigma_data = 1.
-
- def get_scalings(self, sigma):
- c_out = -sigma
- c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
- return c_out, c_in
-
- def get_eps(self, *args, **kwargs):
- return self.inner_model(*args, **kwargs)
-
- def loss(self, input, noise, sigma, **kwargs):
- c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
- noised_input = input + noise * utils.append_dims(sigma, input.ndim)
- eps = self.get_eps(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
- return (eps - noise).pow(2).flatten(1).mean(1)
-
- def forward(self, input, sigma, **kwargs):
- c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
- eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
- return input + eps * c_out
-
-
-class OpenAIDenoiser(DiscreteEpsDDPMDenoiser):
- """A wrapper for OpenAI diffusion models."""
-
- def __init__(self, model, diffusion, quantize=False, has_learned_sigmas=True, device='cpu'):
- alphas_cumprod = torch.tensor(diffusion.alphas_cumprod, device=device, dtype=torch.float32)
- super().__init__(model, alphas_cumprod, quantize=quantize)
- self.has_learned_sigmas = has_learned_sigmas
-
- def get_eps(self, *args, **kwargs):
- model_output = self.inner_model(*args, **kwargs)
- if self.has_learned_sigmas:
- return model_output.chunk(2, dim=1)[0]
- return model_output
-
-
-class CompVisDenoiser(DiscreteEpsDDPMDenoiser):
- """A wrapper for CompVis diffusion models."""
-
- def __init__(self, model, quantize=False, device='cpu'):
- super().__init__(model, model.alphas_cumprod, quantize=quantize)
-
- def get_eps(self, *args, **kwargs):
- return self.inner_model.apply_model(*args, **kwargs)
-
-
-class DiscreteVDDPMDenoiser(DiscreteSchedule):
- """A wrapper for discrete schedule DDPM models that output v."""
-
- def __init__(self, model, alphas_cumprod, quantize):
- super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize)
- self.inner_model = model
- self.sigma_data = 1.
-
- def get_scalings(self, sigma):
- c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2)
- c_out = -sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
- c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
- return c_skip, c_out, c_in
-
- def get_v(self, *args, **kwargs):
- return self.inner_model(*args, **kwargs)
-
- def loss(self, input, noise, sigma, **kwargs):
- c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
- noised_input = input + noise * utils.append_dims(sigma, input.ndim)
- model_output = self.get_v(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
- target = (input - c_skip * noised_input) / c_out
- return (model_output - target).pow(2).flatten(1).mean(1)
-
- def forward(self, input, sigma, **kwargs):
- c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
- return self.get_v(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip
-
-
-class CompVisVDenoiser(DiscreteVDDPMDenoiser):
- """A wrapper for CompVis diffusion models that output v."""
-
- def __init__(self, model, quantize=False, device='cpu'):
- super().__init__(model, model.alphas_cumprod, quantize=quantize)
-
- def get_v(self, x, t, cond, **kwargs):
- return self.inner_model.apply_model(x, t, cond)
diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py
index 937c5a3881d..761c2e0ef7c 100644
--- a/comfy/k_diffusion/sampling.py
+++ b/comfy/k_diffusion/sampling.py
@@ -717,7 +717,6 @@ def DDPMSampler_step(x, sigma, sigma_prev, noise, noise_sampler):
mu += ((1 - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt() * noise_sampler(sigma, sigma_prev)
return mu
-
def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, step_function=None):
extra_args = {} if extra_args is None else extra_args
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
@@ -737,3 +736,75 @@ def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disab
def sample_ddpm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
return generic_step_sampler(model, x, sigmas, extra_args, callback, disable, noise_sampler, DDPMSampler_step)
+@torch.no_grad()
+def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
+ extra_args = {} if extra_args is None else extra_args
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
+
+ x = denoised
+ if sigmas[i + 1] > 0:
+ x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])
+ return x
+
+
+
+@torch.no_grad()
+def sample_heunpp2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
+ # From MIT licensed: https://github.com/Carzit/sd-webui-samplers-scheduler/
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ s_end = sigmas[-1]
+ for i in trange(len(sigmas) - 1, disable=disable):
+ gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
+ eps = torch.randn_like(x) * s_noise
+ sigma_hat = sigmas[i] * (gamma + 1)
+ if gamma > 0:
+ x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
+ denoised = model(x, sigma_hat * s_in, **extra_args)
+ d = to_d(x, sigma_hat, denoised)
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
+ dt = sigmas[i + 1] - sigma_hat
+ if sigmas[i + 1] == s_end:
+ # Euler method
+ x = x + d * dt
+ elif sigmas[i + 2] == s_end:
+
+ # Heun's method
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
+ d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
+
+ w = 2 * sigmas[0]
+ w2 = sigmas[i+1]/w
+ w1 = 1 - w2
+
+ d_prime = d * w1 + d_2 * w2
+
+
+ x = x + d_prime * dt
+
+ else:
+ # Heun++
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
+ d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
+ dt_2 = sigmas[i + 2] - sigmas[i + 1]
+
+ x_3 = x_2 + d_2 * dt_2
+ denoised_3 = model(x_3, sigmas[i + 2] * s_in, **extra_args)
+ d_3 = to_d(x_3, sigmas[i + 2], denoised_3)
+
+ w = 3 * sigmas[0]
+ w2 = sigmas[i + 1] / w
+ w3 = sigmas[i + 2] / w
+ w1 = 1 - w2 - w3
+
+ d_prime = w1 * d + w2 * d_2 + w3 * d_3
+ x = x + d_prime * dt
+ return x
diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py
index fadc0eec752..c209087e0cc 100644
--- a/comfy/latent_formats.py
+++ b/comfy/latent_formats.py
@@ -20,7 +20,7 @@ def __init__(self, scale_factor=0.18215):
[-0.2829, 0.1762, 0.2721],
[-0.2120, -0.2616, -0.7177]
]
- self.taesd_decoder_name = "taesd_decoder.pth"
+ self.taesd_decoder_name = "taesd_decoder"
class SDXL(LatentFormat):
def __init__(self):
@@ -32,4 +32,4 @@ def __init__(self):
[ 0.0568, 0.1687, -0.0755],
[-0.3112, -0.2359, -0.2076]
]
- self.taesd_decoder_name = "taesdxl_decoder.pth"
+ self.taesd_decoder_name = "taesdxl_decoder"
diff --git a/comfy/ldm/models/autoencoder.py b/comfy/ldm/models/autoencoder.py
index 1fb7ed879fc..d2f1d74a938 100644
--- a/comfy/ldm/models/autoencoder.py
+++ b/comfy/ldm/models/autoencoder.py
@@ -2,67 +2,66 @@
# import pytorch_lightning as pl
import torch.nn.functional as F
from contextlib import contextmanager
+from typing import Any, Dict, List, Optional, Tuple, Union
-from comfy.ldm.modules.diffusionmodules.model import Encoder, Decoder
from comfy.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from comfy.ldm.util import instantiate_from_config
from comfy.ldm.modules.ema import LitEma
-# class AutoencoderKL(pl.LightningModule):
-class AutoencoderKL(torch.nn.Module):
- def __init__(self,
- ddconfig,
- lossconfig,
- embed_dim,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- monitor=None,
- ema_decay=None,
- learn_logvar=False
- ):
+class DiagonalGaussianRegularizer(torch.nn.Module):
+ def __init__(self, sample: bool = True):
super().__init__()
- self.learn_logvar = learn_logvar
- self.image_key = image_key
- self.encoder = Encoder(**ddconfig)
- self.decoder = Decoder(**ddconfig)
- self.loss = instantiate_from_config(lossconfig)
- assert ddconfig["double_z"]
- self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
- self.embed_dim = embed_dim
- if colorize_nlabels is not None:
- assert type(colorize_nlabels)==int
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
+ self.sample = sample
+
+ def get_trainable_parameters(self) -> Any:
+ yield from ()
+
+ def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]:
+ log = dict()
+ posterior = DiagonalGaussianDistribution(z)
+ if self.sample:
+ z = posterior.sample()
+ else:
+ z = posterior.mode()
+ kl_loss = posterior.kl()
+ kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
+ log["kl_loss"] = kl_loss
+ return z, log
+
+
+class AbstractAutoencoder(torch.nn.Module):
+ """
+ This is the base class for all autoencoders, including image autoencoders, image autoencoders with discriminators,
+ unCLIP models, etc. Hence, it is fairly general, and specific features
+ (e.g. discriminator training, encoding, decoding) must be implemented in subclasses.
+ """
+
+ def __init__(
+ self,
+ ema_decay: Union[None, float] = None,
+ monitor: Union[None, str] = None,
+ input_key: str = "jpg",
+ **kwargs,
+ ):
+ super().__init__()
+
+ self.input_key = input_key
+ self.use_ema = ema_decay is not None
if monitor is not None:
self.monitor = monitor
- self.use_ema = ema_decay is not None
if self.use_ema:
- self.ema_decay = ema_decay
- assert 0. < ema_decay < 1.
self.model_ema = LitEma(self, decay=ema_decay)
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
+ logpy.info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
+ def get_input(self, batch) -> Any:
+ raise NotImplementedError()
- def init_from_ckpt(self, path, ignore_keys=list()):
- if path.lower().endswith(".safetensors"):
- import safetensors.torch
- sd = safetensors.torch.load_file(path, device="cpu")
- else:
- sd = torch.load(path, map_location="cpu")["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- self.load_state_dict(sd, strict=False)
- print(f"Restored from {path}")
+ def on_train_batch_end(self, *args, **kwargs):
+ # for EMA computation
+ if self.use_ema:
+ self.model_ema(self)
@contextmanager
def ema_scope(self, context=None):
@@ -70,154 +69,159 @@ def ema_scope(self, context=None):
self.model_ema.store(self.parameters())
self.model_ema.copy_to(self)
if context is not None:
- print(f"{context}: Switched to EMA weights")
+ logpy.info(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.parameters())
if context is not None:
- print(f"{context}: Restored training weights")
-
- def on_train_batch_end(self, *args, **kwargs):
- if self.use_ema:
- self.model_ema(self)
-
- def encode(self, x):
- h = self.encoder(x)
- moments = self.quant_conv(h)
- posterior = DiagonalGaussianDistribution(moments)
- return posterior
-
- def decode(self, z):
- z = self.post_quant_conv(z)
- dec = self.decoder(z)
- return dec
-
- def forward(self, input, sample_posterior=True):
- posterior = self.encode(input)
- if sample_posterior:
- z = posterior.sample()
- else:
- z = posterior.mode()
- dec = self.decode(z)
- return dec, posterior
-
- def get_input(self, batch, k):
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
- return x
-
- def training_step(self, batch, batch_idx, optimizer_idx):
- inputs = self.get_input(batch, self.image_key)
- reconstructions, posterior = self(inputs)
-
- if optimizer_idx == 0:
- # train encoder+decoder+logvar
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
- self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
- return aeloss
-
- if optimizer_idx == 1:
- # train the discriminator
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
-
- self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
- return discloss
-
- def validation_step(self, batch, batch_idx):
- log_dict = self._validation_step(batch, batch_idx)
- with self.ema_scope():
- log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
- return log_dict
-
- def _validation_step(self, batch, batch_idx, postfix=""):
- inputs = self.get_input(batch, self.image_key)
- reconstructions, posterior = self(inputs)
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
- last_layer=self.get_last_layer(), split="val"+postfix)
-
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
- last_layer=self.get_last_layer(), split="val"+postfix)
-
- self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
- self.log_dict(log_dict_ae)
- self.log_dict(log_dict_disc)
- return self.log_dict
-
- def configure_optimizers(self):
- lr = self.learning_rate
- ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(
- self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())
- if self.learn_logvar:
- print(f"{self.__class__.__name__}: Learning logvar")
- ae_params_list.append(self.loss.logvar)
- opt_ae = torch.optim.Adam(ae_params_list,
- lr=lr, betas=(0.5, 0.9))
- opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
- lr=lr, betas=(0.5, 0.9))
- return [opt_ae, opt_disc], []
+ logpy.info(f"{context}: Restored training weights")
+
+ def encode(self, *args, **kwargs) -> torch.Tensor:
+ raise NotImplementedError("encode()-method of abstract base class called")
+
+ def decode(self, *args, **kwargs) -> torch.Tensor:
+ raise NotImplementedError("decode()-method of abstract base class called")
+
+ def instantiate_optimizer_from_config(self, params, lr, cfg):
+ logpy.info(f"loading >>> {cfg['target']} <<< optimizer from config")
+ return get_obj_from_str(cfg["target"])(
+ params, lr=lr, **cfg.get("params", dict())
+ )
+
+ def configure_optimizers(self) -> Any:
+ raise NotImplementedError()
+
+
+class AutoencodingEngine(AbstractAutoencoder):
+ """
+ Base class for all image autoencoders that we train, like VQGAN or AutoencoderKL
+ (we also restore them explicitly as special cases for legacy reasons).
+ Regularizations such as KL or VQ are moved to the regularizer class.
+ """
+
+ def __init__(
+ self,
+ *args,
+ encoder_config: Dict,
+ decoder_config: Dict,
+ regularizer_config: Dict,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+
+ self.encoder: torch.nn.Module = instantiate_from_config(encoder_config)
+ self.decoder: torch.nn.Module = instantiate_from_config(decoder_config)
+ self.regularization: AbstractRegularizer = instantiate_from_config(
+ regularizer_config
+ )
def get_last_layer(self):
- return self.decoder.conv_out.weight
-
- @torch.no_grad()
- def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):
- log = dict()
- x = self.get_input(batch, self.image_key)
- x = x.to(self.device)
- if not only_inputs:
- xrec, posterior = self(x)
- if x.shape[1] > 3:
- # colorize with random projection
- assert xrec.shape[1] > 3
- x = self.to_rgb(x)
- xrec = self.to_rgb(xrec)
- log["samples"] = self.decode(torch.randn_like(posterior.sample()))
- log["reconstructions"] = xrec
- if log_ema or self.use_ema:
- with self.ema_scope():
- xrec_ema, posterior_ema = self(x)
- if x.shape[1] > 3:
- # colorize with random projection
- assert xrec_ema.shape[1] > 3
- xrec_ema = self.to_rgb(xrec_ema)
- log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample()))
- log["reconstructions_ema"] = xrec_ema
- log["inputs"] = x
- return log
-
- def to_rgb(self, x):
- assert self.image_key == "segmentation"
- if not hasattr(self, "colorize"):
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
- x = F.conv2d(x, weight=self.colorize)
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
+ return self.decoder.get_last_layer()
+
+ def encode(
+ self,
+ x: torch.Tensor,
+ return_reg_log: bool = False,
+ unregularized: bool = False,
+ ) -> Union[torch.Tensor, Tuple[torch.Tensor, dict]]:
+ z = self.encoder(x)
+ if unregularized:
+ return z, dict()
+ z, reg_log = self.regularization(z)
+ if return_reg_log:
+ return z, reg_log
+ return z
+
+ def decode(self, z: torch.Tensor, **kwargs) -> torch.Tensor:
+ x = self.decoder(z, **kwargs)
return x
+ def forward(
+ self, x: torch.Tensor, **additional_decode_kwargs
+ ) -> Tuple[torch.Tensor, torch.Tensor, dict]:
+ z, reg_log = self.encode(x, return_reg_log=True)
+ dec = self.decode(z, **additional_decode_kwargs)
+ return z, dec, reg_log
+
+
+class AutoencodingEngineLegacy(AutoencodingEngine):
+ def __init__(self, embed_dim: int, **kwargs):
+ self.max_batch_size = kwargs.pop("max_batch_size", None)
+ ddconfig = kwargs.pop("ddconfig")
+ super().__init__(
+ encoder_config={
+ "target": "comfy.ldm.modules.diffusionmodules.model.Encoder",
+ "params": ddconfig,
+ },
+ decoder_config={
+ "target": "comfy.ldm.modules.diffusionmodules.model.Decoder",
+ "params": ddconfig,
+ },
+ **kwargs,
+ )
+ self.quant_conv = torch.nn.Conv2d(
+ (1 + ddconfig["double_z"]) * ddconfig["z_channels"],
+ (1 + ddconfig["double_z"]) * embed_dim,
+ 1,
+ )
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
+ self.embed_dim = embed_dim
-class IdentityFirstStage(torch.nn.Module):
- def __init__(self, *args, vq_interface=False, **kwargs):
- self.vq_interface = vq_interface
- super().__init__()
-
- def encode(self, x, *args, **kwargs):
- return x
+ def get_autoencoder_params(self) -> list:
+ params = super().get_autoencoder_params()
+ return params
- def decode(self, x, *args, **kwargs):
- return x
+ def encode(
+ self, x: torch.Tensor, return_reg_log: bool = False
+ ) -> Union[torch.Tensor, Tuple[torch.Tensor, dict]]:
+ if self.max_batch_size is None:
+ z = self.encoder(x)
+ z = self.quant_conv(z)
+ else:
+ N = x.shape[0]
+ bs = self.max_batch_size
+ n_batches = int(math.ceil(N / bs))
+ z = list()
+ for i_batch in range(n_batches):
+ z_batch = self.encoder(x[i_batch * bs : (i_batch + 1) * bs])
+ z_batch = self.quant_conv(z_batch)
+ z.append(z_batch)
+ z = torch.cat(z, 0)
+
+ z, reg_log = self.regularization(z)
+ if return_reg_log:
+ return z, reg_log
+ return z
+
+ def decode(self, z: torch.Tensor, **decoder_kwargs) -> torch.Tensor:
+ if self.max_batch_size is None:
+ dec = self.post_quant_conv(z)
+ dec = self.decoder(dec, **decoder_kwargs)
+ else:
+ N = z.shape[0]
+ bs = self.max_batch_size
+ n_batches = int(math.ceil(N / bs))
+ dec = list()
+ for i_batch in range(n_batches):
+ dec_batch = self.post_quant_conv(z[i_batch * bs : (i_batch + 1) * bs])
+ dec_batch = self.decoder(dec_batch, **decoder_kwargs)
+ dec.append(dec_batch)
+ dec = torch.cat(dec, 0)
- def quantize(self, x, *args, **kwargs):
- if self.vq_interface:
- return x, None, [None, None, None]
- return x
+ return dec
- def forward(self, x, *args, **kwargs):
- return x
+class AutoencoderKL(AutoencodingEngineLegacy):
+ def __init__(self, **kwargs):
+ if "lossconfig" in kwargs:
+ kwargs["loss_config"] = kwargs.pop("lossconfig")
+ super().__init__(
+ regularizer_config={
+ "target": (
+ "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"
+ )
+ },
+ **kwargs,
+ )
diff --git a/comfy/ldm/models/diffusion/__init__.py b/comfy/ldm/models/diffusion/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/comfy/ldm/models/diffusion/ddim.py b/comfy/ldm/models/diffusion/ddim.py
deleted file mode 100644
index befab0075ca..00000000000
--- a/comfy/ldm/models/diffusion/ddim.py
+++ /dev/null
@@ -1,418 +0,0 @@
-"""SAMPLING ONLY."""
-
-import torch
-import numpy as np
-from tqdm import tqdm
-
-from comfy.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
-
-
-class DDIMSampler(object):
- def __init__(self, model, schedule="linear", device=torch.device("cuda"), **kwargs):
- super().__init__()
- self.model = model
- self.ddpm_num_timesteps = model.num_timesteps
- self.schedule = schedule
- self.device = device
- self.parameterization = kwargs.get("parameterization", "eps")
-
- def register_buffer(self, name, attr):
- if type(attr) == torch.Tensor:
- if attr.device != self.device:
- attr = attr.float().to(self.device)
- setattr(self, name, attr)
-
- def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
- ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
- num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
- self.make_schedule_timesteps(ddim_timesteps, ddim_eta=ddim_eta, verbose=verbose)
-
- def make_schedule_timesteps(self, ddim_timesteps, ddim_eta=0., verbose=True):
- self.ddim_timesteps = torch.tensor(ddim_timesteps)
- alphas_cumprod = self.model.alphas_cumprod
- assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.device)
-
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
- self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
-
- # calculations for diffusion q(x_t | x_{t-1}) and others
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
-
- # ddim sampling parameters
- ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
- ddim_timesteps=self.ddim_timesteps,
- eta=ddim_eta,verbose=verbose)
- self.register_buffer('ddim_sigmas', ddim_sigmas)
- self.register_buffer('ddim_alphas', ddim_alphas)
- self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
- self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
- sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
- (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
- 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
- self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
-
- @torch.no_grad()
- def sample_custom(self,
- ddim_timesteps,
- conditioning,
- callback=None,
- img_callback=None,
- quantize_x0=False,
- eta=0.,
- mask=None,
- x0=None,
- temperature=1.,
- noise_dropout=0.,
- score_corrector=None,
- corrector_kwargs=None,
- verbose=True,
- x_T=None,
- log_every_t=100,
- unconditional_guidance_scale=1.,
- unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
- dynamic_threshold=None,
- ucg_schedule=None,
- denoise_function=None,
- extra_args=None,
- to_zero=True,
- end_step=None,
- disable_pbar=False,
- **kwargs
- ):
- self.make_schedule_timesteps(ddim_timesteps=ddim_timesteps, ddim_eta=eta, verbose=verbose)
- samples, intermediates = self.ddim_sampling(conditioning, x_T.shape,
- callback=callback,
- img_callback=img_callback,
- quantize_denoised=quantize_x0,
- mask=mask, x0=x0,
- ddim_use_original_steps=False,
- noise_dropout=noise_dropout,
- temperature=temperature,
- score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- x_T=x_T,
- log_every_t=log_every_t,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- dynamic_threshold=dynamic_threshold,
- ucg_schedule=ucg_schedule,
- denoise_function=denoise_function,
- extra_args=extra_args,
- to_zero=to_zero,
- end_step=end_step,
- disable_pbar=disable_pbar
- )
- return samples, intermediates
-
-
- @torch.no_grad()
- def sample(self,
- S,
- batch_size,
- shape,
- conditioning=None,
- callback=None,
- normals_sequence=None,
- img_callback=None,
- quantize_x0=False,
- eta=0.,
- mask=None,
- x0=None,
- temperature=1.,
- noise_dropout=0.,
- score_corrector=None,
- corrector_kwargs=None,
- verbose=True,
- x_T=None,
- log_every_t=100,
- unconditional_guidance_scale=1.,
- unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
- dynamic_threshold=None,
- ucg_schedule=None,
- **kwargs
- ):
- if conditioning is not None:
- if isinstance(conditioning, dict):
- ctmp = conditioning[list(conditioning.keys())[0]]
- while isinstance(ctmp, list): ctmp = ctmp[0]
- cbs = ctmp.shape[0]
- if cbs != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
-
- elif isinstance(conditioning, list):
- for ctmp in conditioning:
- if ctmp.shape[0] != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
-
- else:
- if conditioning.shape[0] != batch_size:
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
-
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
- # sampling
- C, H, W = shape
- size = (batch_size, C, H, W)
- print(f'Data shape for DDIM sampling is {size}, eta {eta}')
-
- samples, intermediates = self.ddim_sampling(conditioning, size,
- callback=callback,
- img_callback=img_callback,
- quantize_denoised=quantize_x0,
- mask=mask, x0=x0,
- ddim_use_original_steps=False,
- noise_dropout=noise_dropout,
- temperature=temperature,
- score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- x_T=x_T,
- log_every_t=log_every_t,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- dynamic_threshold=dynamic_threshold,
- ucg_schedule=ucg_schedule,
- denoise_function=None,
- extra_args=None
- )
- return samples, intermediates
-
- def q_sample(self, x_start, t, noise=None):
- if noise is None:
- noise = torch.randn_like(x_start)
- return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
-
- @torch.no_grad()
- def ddim_sampling(self, cond, shape,
- x_T=None, ddim_use_original_steps=False,
- callback=None, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, img_callback=None, log_every_t=100,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
- unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
- ucg_schedule=None, denoise_function=None, extra_args=None, to_zero=True, end_step=None, disable_pbar=False):
- device = self.model.alphas_cumprod.device
- b = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=device)
- else:
- img = x_T
-
- if timesteps is None:
- timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
- elif timesteps is not None and not ddim_use_original_steps:
- subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
- timesteps = self.ddim_timesteps[:subset_end]
-
- intermediates = {'x_inter': [img], 'pred_x0': [img]}
- time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else timesteps.flip(0)
- total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
- # print(f"Running DDIM Sampling with {total_steps} timesteps")
-
- iterator = tqdm(time_range[:end_step], desc='DDIM Sampler', total=end_step, disable=disable_pbar)
-
- for i, step in enumerate(iterator):
- index = total_steps - i - 1
- ts = torch.full((b,), step, device=device, dtype=torch.long)
-
- if mask is not None:
- assert x0 is not None
- img_orig = self.q_sample(x0, ts) # TODO: deterministic forward pass?
- img = img_orig * mask + (1. - mask) * img
-
- if ucg_schedule is not None:
- assert len(ucg_schedule) == len(time_range)
- unconditional_guidance_scale = ucg_schedule[i]
-
- outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
- quantize_denoised=quantize_denoised, temperature=temperature,
- noise_dropout=noise_dropout, score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- dynamic_threshold=dynamic_threshold, denoise_function=denoise_function, extra_args=extra_args)
- img, pred_x0 = outs
- if callback: callback(i)
- if img_callback: img_callback(pred_x0, i)
-
- if index % log_every_t == 0 or index == total_steps - 1:
- intermediates['x_inter'].append(img)
- intermediates['pred_x0'].append(pred_x0)
-
- if to_zero:
- img = pred_x0
- else:
- if ddim_use_original_steps:
- sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
- else:
- sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
- img /= sqrt_alphas_cumprod[index - 1]
-
- return img, intermediates
-
- @torch.no_grad()
- def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
- unconditional_guidance_scale=1., unconditional_conditioning=None,
- dynamic_threshold=None, denoise_function=None, extra_args=None):
- b, *_, device = *x.shape, x.device
-
- if denoise_function is not None:
- model_output = denoise_function(x, t, **extra_args)
- elif unconditional_conditioning is None or unconditional_guidance_scale == 1.:
- model_output = self.model.apply_model(x, t, c)
- else:
- x_in = torch.cat([x] * 2)
- t_in = torch.cat([t] * 2)
- if isinstance(c, dict):
- assert isinstance(unconditional_conditioning, dict)
- c_in = dict()
- for k in c:
- if isinstance(c[k], list):
- c_in[k] = [torch.cat([
- unconditional_conditioning[k][i],
- c[k][i]]) for i in range(len(c[k]))]
- else:
- c_in[k] = torch.cat([
- unconditional_conditioning[k],
- c[k]])
- elif isinstance(c, list):
- c_in = list()
- assert isinstance(unconditional_conditioning, list)
- for i in range(len(c)):
- c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))
- else:
- c_in = torch.cat([unconditional_conditioning, c])
- model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
- model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
-
- if self.parameterization == "v":
- e_t = extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * model_output + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
- else:
- e_t = model_output
-
- if score_corrector is not None:
- assert self.parameterization == "eps", 'not implemented'
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
-
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
- # select parameters corresponding to the currently considered timestep
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
-
- # current prediction for x_0
- if self.parameterization != "v":
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
- else:
- pred_x0 = extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * x - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * model_output
-
- if quantize_denoised:
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
-
- if dynamic_threshold is not None:
- raise NotImplementedError()
-
- # direction pointing to x_t
- dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
- if noise_dropout > 0.:
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
- return x_prev, pred_x0
-
- @torch.no_grad()
- def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,
- unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):
- num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]
-
- assert t_enc <= num_reference_steps
- num_steps = t_enc
-
- if use_original_steps:
- alphas_next = self.alphas_cumprod[:num_steps]
- alphas = self.alphas_cumprod_prev[:num_steps]
- else:
- alphas_next = self.ddim_alphas[:num_steps]
- alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
-
- x_next = x0
- intermediates = []
- inter_steps = []
- for i in tqdm(range(num_steps), desc='Encoding Image'):
- t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)
- if unconditional_guidance_scale == 1.:
- noise_pred = self.model.apply_model(x_next, t, c)
- else:
- assert unconditional_conditioning is not None
- e_t_uncond, noise_pred = torch.chunk(
- self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),
- torch.cat((unconditional_conditioning, c))), 2)
- noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)
-
- xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
- weighted_noise_pred = alphas_next[i].sqrt() * (
- (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred
- x_next = xt_weighted + weighted_noise_pred
- if return_intermediates and i % (
- num_steps // return_intermediates) == 0 and i < num_steps - 1:
- intermediates.append(x_next)
- inter_steps.append(i)
- elif return_intermediates and i >= num_steps - 2:
- intermediates.append(x_next)
- inter_steps.append(i)
- if callback: callback(i)
-
- out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
- if return_intermediates:
- out.update({'intermediates': intermediates})
- return x_next, out
-
- @torch.no_grad()
- def stochastic_encode(self, x0, t, use_original_steps=False, noise=None, max_denoise=False):
- # fast, but does not allow for exact reconstruction
- # t serves as an index to gather the correct alphas
- if use_original_steps:
- sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
- sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
- else:
- sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
- sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
-
- if noise is None:
- noise = torch.randn_like(x0)
- if max_denoise:
- noise_multiplier = 1.0
- else:
- noise_multiplier = extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape)
-
- return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + noise_multiplier * noise)
-
- @torch.no_grad()
- def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
- use_original_steps=False, callback=None):
-
- timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
- timesteps = timesteps[:t_start]
-
- time_range = np.flip(timesteps)
- total_steps = timesteps.shape[0]
- print(f"Running DDIM Sampling with {total_steps} timesteps")
-
- iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
- x_dec = x_latent
- for i, step in enumerate(iterator):
- index = total_steps - i - 1
- ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
- x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning)
- if callback: callback(i)
- return x_dec
\ No newline at end of file
diff --git a/comfy/ldm/models/diffusion/dpm_solver/__init__.py b/comfy/ldm/models/diffusion/dpm_solver/__init__.py
deleted file mode 100644
index 7427f38c075..00000000000
--- a/comfy/ldm/models/diffusion/dpm_solver/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .sampler import DPMSolverSampler
\ No newline at end of file
diff --git a/comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py b/comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py
deleted file mode 100644
index da8d41f9c5e..00000000000
--- a/comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py
+++ /dev/null
@@ -1,1163 +0,0 @@
-import torch
-import torch.nn.functional as F
-import math
-from tqdm import tqdm
-
-
-class NoiseScheduleVP:
- def __init__(
- self,
- schedule='discrete',
- betas=None,
- alphas_cumprod=None,
- continuous_beta_0=0.1,
- continuous_beta_1=20.,
- ):
- """Create a wrapper class for the forward SDE (VP type).
- ***
- Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
- We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
- ***
- The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
- We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
- Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
- log_alpha_t = self.marginal_log_mean_coeff(t)
- sigma_t = self.marginal_std(t)
- lambda_t = self.marginal_lambda(t)
- Moreover, as lambda(t) is an invertible function, we also support its inverse function:
- t = self.inverse_lambda(lambda_t)
- ===============================================================
- We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
- 1. For discrete-time DPMs:
- For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
- t_i = (i + 1) / N
- e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
- We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
- Args:
- betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
- alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
- Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
- **Important**: Please pay special attention for the args for `alphas_cumprod`:
- The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
- q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
- Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
- alpha_{t_n} = \sqrt{\hat{alpha_n}},
- and
- log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
- 2. For continuous-time DPMs:
- We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
- schedule are the default settings in DDPM and improved-DDPM:
- Args:
- beta_min: A `float` number. The smallest beta for the linear schedule.
- beta_max: A `float` number. The largest beta for the linear schedule.
- cosine_s: A `float` number. The hyperparameter in the cosine schedule.
- cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
- T: A `float` number. The ending time of the forward process.
- ===============================================================
- Args:
- schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
- 'linear' or 'cosine' for continuous-time DPMs.
- Returns:
- A wrapper object of the forward SDE (VP type).
-
- ===============================================================
- Example:
- # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
- >>> ns = NoiseScheduleVP('discrete', betas=betas)
- # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
- >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
- # For continuous-time DPMs (VPSDE), linear schedule:
- >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
- """
-
- if schedule not in ['discrete', 'linear', 'cosine']:
- raise ValueError(
- "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
- schedule))
-
- self.schedule = schedule
- if schedule == 'discrete':
- if betas is not None:
- log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
- else:
- assert alphas_cumprod is not None
- log_alphas = 0.5 * torch.log(alphas_cumprod)
- self.total_N = len(log_alphas)
- self.T = 1.
- self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
- self.log_alpha_array = log_alphas.reshape((1, -1,))
- else:
- self.total_N = 1000
- self.beta_0 = continuous_beta_0
- self.beta_1 = continuous_beta_1
- self.cosine_s = 0.008
- self.cosine_beta_max = 999.
- self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
- 1. + self.cosine_s) / math.pi - self.cosine_s
- self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
- self.schedule = schedule
- if schedule == 'cosine':
- # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
- # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
- self.T = 0.9946
- else:
- self.T = 1.
-
- def marginal_log_mean_coeff(self, t):
- """
- Compute log(alpha_t) of a given continuous-time label t in [0, T].
- """
- if self.schedule == 'discrete':
- return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
- self.log_alpha_array.to(t.device)).reshape((-1))
- elif self.schedule == 'linear':
- return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
- elif self.schedule == 'cosine':
- log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
- log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
- return log_alpha_t
-
- def marginal_alpha(self, t):
- """
- Compute alpha_t of a given continuous-time label t in [0, T].
- """
- return torch.exp(self.marginal_log_mean_coeff(t))
-
- def marginal_std(self, t):
- """
- Compute sigma_t of a given continuous-time label t in [0, T].
- """
- return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
-
- def marginal_lambda(self, t):
- """
- Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
- """
- log_mean_coeff = self.marginal_log_mean_coeff(t)
- log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
- return log_mean_coeff - log_std
-
- def inverse_lambda(self, lamb):
- """
- Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
- """
- if self.schedule == 'linear':
- tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
- Delta = self.beta_0 ** 2 + tmp
- return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
- elif self.schedule == 'discrete':
- log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
- t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
- torch.flip(self.t_array.to(lamb.device), [1]))
- return t.reshape((-1,))
- else:
- log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
- t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
- 1. + self.cosine_s) / math.pi - self.cosine_s
- t = t_fn(log_alpha)
- return t
-
-
-def model_wrapper(
- model,
- noise_schedule,
- model_type="noise",
- model_kwargs={},
- guidance_type="uncond",
- condition=None,
- unconditional_condition=None,
- guidance_scale=1.,
- classifier_fn=None,
- classifier_kwargs={},
-):
- """Create a wrapper function for the noise prediction model.
- DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
- firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
- We support four types of the diffusion model by setting `model_type`:
- 1. "noise": noise prediction model. (Trained by predicting noise).
- 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
- 3. "v": velocity prediction model. (Trained by predicting the velocity).
- The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
- [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
- arXiv preprint arXiv:2202.00512 (2022).
- [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
- arXiv preprint arXiv:2210.02303 (2022).
-
- 4. "score": marginal score function. (Trained by denoising score matching).
- Note that the score function and the noise prediction model follows a simple relationship:
- ```
- noise(x_t, t) = -sigma_t * score(x_t, t)
- ```
- We support three types of guided sampling by DPMs by setting `guidance_type`:
- 1. "uncond": unconditional sampling by DPMs.
- The input `model` has the following format:
- ``
- model(x, t_input, **model_kwargs) -> noise | x_start | v | score
- ``
- 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
- The input `model` has the following format:
- ``
- model(x, t_input, **model_kwargs) -> noise | x_start | v | score
- ``
- The input `classifier_fn` has the following format:
- ``
- classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
- ``
- [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
- in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
- 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
- The input `model` has the following format:
- ``
- model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
- ``
- And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
- [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
- arXiv preprint arXiv:2207.12598 (2022).
-
- The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
- or continuous-time labels (i.e. epsilon to T).
- We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
- ``
- def model_fn(x, t_continuous) -> noise:
- t_input = get_model_input_time(t_continuous)
- return noise_pred(model, x, t_input, **model_kwargs)
- ``
- where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
- ===============================================================
- Args:
- model: A diffusion model with the corresponding format described above.
- noise_schedule: A noise schedule object, such as NoiseScheduleVP.
- model_type: A `str`. The parameterization type of the diffusion model.
- "noise" or "x_start" or "v" or "score".
- model_kwargs: A `dict`. A dict for the other inputs of the model function.
- guidance_type: A `str`. The type of the guidance for sampling.
- "uncond" or "classifier" or "classifier-free".
- condition: A pytorch tensor. The condition for the guided sampling.
- Only used for "classifier" or "classifier-free" guidance type.
- unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
- Only used for "classifier-free" guidance type.
- guidance_scale: A `float`. The scale for the guided sampling.
- classifier_fn: A classifier function. Only used for the classifier guidance.
- classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
- Returns:
- A noise prediction model that accepts the noised data and the continuous time as the inputs.
- """
-
- def get_model_input_time(t_continuous):
- """
- Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
- For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
- For continuous-time DPMs, we just use `t_continuous`.
- """
- if noise_schedule.schedule == 'discrete':
- return (t_continuous - 1. / noise_schedule.total_N) * 1000.
- else:
- return t_continuous
-
- def noise_pred_fn(x, t_continuous, cond=None):
- if t_continuous.reshape((-1,)).shape[0] == 1:
- t_continuous = t_continuous.expand((x.shape[0]))
- t_input = get_model_input_time(t_continuous)
- if cond is None:
- output = model(x, t_input, **model_kwargs)
- else:
- output = model(x, t_input, cond, **model_kwargs)
- if model_type == "noise":
- return output
- elif model_type == "x_start":
- alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
- dims = x.dim()
- return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
- elif model_type == "v":
- alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
- dims = x.dim()
- return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
- elif model_type == "score":
- sigma_t = noise_schedule.marginal_std(t_continuous)
- dims = x.dim()
- return -expand_dims(sigma_t, dims) * output
-
- def cond_grad_fn(x, t_input):
- """
- Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
- """
- with torch.enable_grad():
- x_in = x.detach().requires_grad_(True)
- log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
- return torch.autograd.grad(log_prob.sum(), x_in)[0]
-
- def model_fn(x, t_continuous):
- """
- The noise predicition model function that is used for DPM-Solver.
- """
- if t_continuous.reshape((-1,)).shape[0] == 1:
- t_continuous = t_continuous.expand((x.shape[0]))
- if guidance_type == "uncond":
- return noise_pred_fn(x, t_continuous)
- elif guidance_type == "classifier":
- assert classifier_fn is not None
- t_input = get_model_input_time(t_continuous)
- cond_grad = cond_grad_fn(x, t_input)
- sigma_t = noise_schedule.marginal_std(t_continuous)
- noise = noise_pred_fn(x, t_continuous)
- return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
- elif guidance_type == "classifier-free":
- if guidance_scale == 1. or unconditional_condition is None:
- return noise_pred_fn(x, t_continuous, cond=condition)
- else:
- x_in = torch.cat([x] * 2)
- t_in = torch.cat([t_continuous] * 2)
- if isinstance(condition, dict):
- assert isinstance(unconditional_condition, dict)
- c_in = dict()
- for k in condition:
- if isinstance(condition[k], list):
- c_in[k] = [torch.cat([unconditional_condition[k][i], condition[k][i]]) for i in range(len(condition[k]))]
- else:
- c_in[k] = torch.cat([unconditional_condition[k], condition[k]])
- else:
- c_in = torch.cat([unconditional_condition, condition])
- noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
- return noise_uncond + guidance_scale * (noise - noise_uncond)
-
- assert model_type in ["noise", "x_start", "v"]
- assert guidance_type in ["uncond", "classifier", "classifier-free"]
- return model_fn
-
-
-class DPM_Solver:
- def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
- """Construct a DPM-Solver.
- We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
- If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
- If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
- In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
- The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
- Args:
- model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
- ``
- def model_fn(x, t_continuous):
- return noise
- ``
- noise_schedule: A noise schedule object, such as NoiseScheduleVP.
- predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
- thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
- max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
-
- [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
- """
- self.model = model_fn
- self.noise_schedule = noise_schedule
- self.predict_x0 = predict_x0
- self.thresholding = thresholding
- self.max_val = max_val
-
- def noise_prediction_fn(self, x, t):
- """
- Return the noise prediction model.
- """
- return self.model(x, t)
-
- def data_prediction_fn(self, x, t):
- """
- Return the data prediction model (with thresholding).
- """
- noise = self.noise_prediction_fn(x, t)
- dims = x.dim()
- alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
- x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
- if self.thresholding:
- p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
- s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
- s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
- x0 = torch.clamp(x0, -s, s) / s
- return x0
-
- def model_fn(self, x, t):
- """
- Convert the model to the noise prediction model or the data prediction model.
- """
- if self.predict_x0:
- return self.data_prediction_fn(x, t)
- else:
- return self.noise_prediction_fn(x, t)
-
- def get_time_steps(self, skip_type, t_T, t_0, N, device):
- """Compute the intermediate time steps for sampling.
- Args:
- skip_type: A `str`. The type for the spacing of the time steps. We support three types:
- - 'logSNR': uniform logSNR for the time steps.
- - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
- - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
- t_T: A `float`. The starting time of the sampling (default is T).
- t_0: A `float`. The ending time of the sampling (default is epsilon).
- N: A `int`. The total number of the spacing of the time steps.
- device: A torch device.
- Returns:
- A pytorch tensor of the time steps, with the shape (N + 1,).
- """
- if skip_type == 'logSNR':
- lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
- lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
- logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
- return self.noise_schedule.inverse_lambda(logSNR_steps)
- elif skip_type == 'time_uniform':
- return torch.linspace(t_T, t_0, N + 1).to(device)
- elif skip_type == 'time_quadratic':
- t_order = 2
- t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
- return t
- else:
- raise ValueError(
- "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
-
- def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
- """
- Get the order of each step for sampling by the singlestep DPM-Solver.
- We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
- Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
- - If order == 1:
- We take `steps` of DPM-Solver-1 (i.e. DDIM).
- - If order == 2:
- - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
- - If steps % 2 == 0, we use K steps of DPM-Solver-2.
- - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
- - If order == 3:
- - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
- - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
- - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
- - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
- ============================================
- Args:
- order: A `int`. The max order for the solver (2 or 3).
- steps: A `int`. The total number of function evaluations (NFE).
- skip_type: A `str`. The type for the spacing of the time steps. We support three types:
- - 'logSNR': uniform logSNR for the time steps.
- - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
- - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
- t_T: A `float`. The starting time of the sampling (default is T).
- t_0: A `float`. The ending time of the sampling (default is epsilon).
- device: A torch device.
- Returns:
- orders: A list of the solver order of each step.
- """
- if order == 3:
- K = steps // 3 + 1
- if steps % 3 == 0:
- orders = [3, ] * (K - 2) + [2, 1]
- elif steps % 3 == 1:
- orders = [3, ] * (K - 1) + [1]
- else:
- orders = [3, ] * (K - 1) + [2]
- elif order == 2:
- if steps % 2 == 0:
- K = steps // 2
- orders = [2, ] * K
- else:
- K = steps // 2 + 1
- orders = [2, ] * (K - 1) + [1]
- elif order == 1:
- K = 1
- orders = [1, ] * steps
- else:
- raise ValueError("'order' must be '1' or '2' or '3'.")
- if skip_type == 'logSNR':
- # To reproduce the results in DPM-Solver paper
- timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
- else:
- timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
- torch.cumsum(torch.tensor([0, ] + orders)).to(device)]
- return timesteps_outer, orders
-
- def denoise_to_zero_fn(self, x, s):
- """
- Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
- """
- return self.data_prediction_fn(x, s)
-
- def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
- """
- DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- model_s: A pytorch tensor. The model function evaluated at time `s`.
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
- return_intermediate: A `bool`. If true, also return the model value at time `s`.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- ns = self.noise_schedule
- dims = x.dim()
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
- h = lambda_t - lambda_s
- log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
- sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
- alpha_t = torch.exp(log_alpha_t)
-
- if self.predict_x0:
- phi_1 = torch.expm1(-h)
- if model_s is None:
- model_s = self.model_fn(x, s)
- x_t = (
- expand_dims(sigma_t / sigma_s, dims) * x
- - expand_dims(alpha_t * phi_1, dims) * model_s
- )
- if return_intermediate:
- return x_t, {'model_s': model_s}
- else:
- return x_t
- else:
- phi_1 = torch.expm1(h)
- if model_s is None:
- model_s = self.model_fn(x, s)
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
- - expand_dims(sigma_t * phi_1, dims) * model_s
- )
- if return_intermediate:
- return x_t, {'model_s': model_s}
- else:
- return x_t
-
- def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
- solver_type='dpm_solver'):
- """
- Singlestep solver DPM-Solver-2 from time `s` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- r1: A `float`. The hyperparameter of the second-order solver.
- model_s: A pytorch tensor. The model function evaluated at time `s`.
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
- return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- if solver_type not in ['dpm_solver', 'taylor']:
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
- if r1 is None:
- r1 = 0.5
- ns = self.noise_schedule
- dims = x.dim()
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
- h = lambda_t - lambda_s
- lambda_s1 = lambda_s + r1 * h
- s1 = ns.inverse_lambda(lambda_s1)
- log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
- s1), ns.marginal_log_mean_coeff(t)
- sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
- alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
-
- if self.predict_x0:
- phi_11 = torch.expm1(-r1 * h)
- phi_1 = torch.expm1(-h)
-
- if model_s is None:
- model_s = self.model_fn(x, s)
- x_s1 = (
- expand_dims(sigma_s1 / sigma_s, dims) * x
- - expand_dims(alpha_s1 * phi_11, dims) * model_s
- )
- model_s1 = self.model_fn(x_s1, s1)
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(sigma_t / sigma_s, dims) * x
- - expand_dims(alpha_t * phi_1, dims) * model_s
- - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
- )
- elif solver_type == 'taylor':
- x_t = (
- expand_dims(sigma_t / sigma_s, dims) * x
- - expand_dims(alpha_t * phi_1, dims) * model_s
- + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
- model_s1 - model_s)
- )
- else:
- phi_11 = torch.expm1(r1 * h)
- phi_1 = torch.expm1(h)
-
- if model_s is None:
- model_s = self.model_fn(x, s)
- x_s1 = (
- expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
- - expand_dims(sigma_s1 * phi_11, dims) * model_s
- )
- model_s1 = self.model_fn(x_s1, s1)
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
- - expand_dims(sigma_t * phi_1, dims) * model_s
- - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
- )
- elif solver_type == 'taylor':
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
- - expand_dims(sigma_t * phi_1, dims) * model_s
- - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
- )
- if return_intermediate:
- return x_t, {'model_s': model_s, 'model_s1': model_s1}
- else:
- return x_t
-
- def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
- return_intermediate=False, solver_type='dpm_solver'):
- """
- Singlestep solver DPM-Solver-3 from time `s` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- r1: A `float`. The hyperparameter of the third-order solver.
- r2: A `float`. The hyperparameter of the third-order solver.
- model_s: A pytorch tensor. The model function evaluated at time `s`.
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
- model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
- If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
- return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- if solver_type not in ['dpm_solver', 'taylor']:
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
- if r1 is None:
- r1 = 1. / 3.
- if r2 is None:
- r2 = 2. / 3.
- ns = self.noise_schedule
- dims = x.dim()
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
- h = lambda_t - lambda_s
- lambda_s1 = lambda_s + r1 * h
- lambda_s2 = lambda_s + r2 * h
- s1 = ns.inverse_lambda(lambda_s1)
- s2 = ns.inverse_lambda(lambda_s2)
- log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
- s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
- sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
- s2), ns.marginal_std(t)
- alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
-
- if self.predict_x0:
- phi_11 = torch.expm1(-r1 * h)
- phi_12 = torch.expm1(-r2 * h)
- phi_1 = torch.expm1(-h)
- phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
- phi_2 = phi_1 / h + 1.
- phi_3 = phi_2 / h - 0.5
-
- if model_s is None:
- model_s = self.model_fn(x, s)
- if model_s1 is None:
- x_s1 = (
- expand_dims(sigma_s1 / sigma_s, dims) * x
- - expand_dims(alpha_s1 * phi_11, dims) * model_s
- )
- model_s1 = self.model_fn(x_s1, s1)
- x_s2 = (
- expand_dims(sigma_s2 / sigma_s, dims) * x
- - expand_dims(alpha_s2 * phi_12, dims) * model_s
- + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
- )
- model_s2 = self.model_fn(x_s2, s2)
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(sigma_t / sigma_s, dims) * x
- - expand_dims(alpha_t * phi_1, dims) * model_s
- + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
- )
- elif solver_type == 'taylor':
- D1_0 = (1. / r1) * (model_s1 - model_s)
- D1_1 = (1. / r2) * (model_s2 - model_s)
- D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
- D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
- x_t = (
- expand_dims(sigma_t / sigma_s, dims) * x
- - expand_dims(alpha_t * phi_1, dims) * model_s
- + expand_dims(alpha_t * phi_2, dims) * D1
- - expand_dims(alpha_t * phi_3, dims) * D2
- )
- else:
- phi_11 = torch.expm1(r1 * h)
- phi_12 = torch.expm1(r2 * h)
- phi_1 = torch.expm1(h)
- phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
- phi_2 = phi_1 / h - 1.
- phi_3 = phi_2 / h - 0.5
-
- if model_s is None:
- model_s = self.model_fn(x, s)
- if model_s1 is None:
- x_s1 = (
- expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
- - expand_dims(sigma_s1 * phi_11, dims) * model_s
- )
- model_s1 = self.model_fn(x_s1, s1)
- x_s2 = (
- expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
- - expand_dims(sigma_s2 * phi_12, dims) * model_s
- - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
- )
- model_s2 = self.model_fn(x_s2, s2)
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
- - expand_dims(sigma_t * phi_1, dims) * model_s
- - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
- )
- elif solver_type == 'taylor':
- D1_0 = (1. / r1) * (model_s1 - model_s)
- D1_1 = (1. / r2) * (model_s2 - model_s)
- D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
- D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
- - expand_dims(sigma_t * phi_1, dims) * model_s
- - expand_dims(sigma_t * phi_2, dims) * D1
- - expand_dims(sigma_t * phi_3, dims) * D2
- )
-
- if return_intermediate:
- return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
- else:
- return x_t
-
- def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
- """
- Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- model_prev_list: A list of pytorch tensor. The previous computed model values.
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- if solver_type not in ['dpm_solver', 'taylor']:
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
- ns = self.noise_schedule
- dims = x.dim()
- model_prev_1, model_prev_0 = model_prev_list
- t_prev_1, t_prev_0 = t_prev_list
- lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
- t_prev_0), ns.marginal_lambda(t)
- log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
- sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
- alpha_t = torch.exp(log_alpha_t)
-
- h_0 = lambda_prev_0 - lambda_prev_1
- h = lambda_t - lambda_prev_0
- r0 = h_0 / h
- D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
- if self.predict_x0:
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(sigma_t / sigma_prev_0, dims) * x
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
- - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
- )
- elif solver_type == 'taylor':
- x_t = (
- expand_dims(sigma_t / sigma_prev_0, dims) * x
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
- + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
- )
- else:
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
- - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
- )
- elif solver_type == 'taylor':
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
- - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
- )
- return x_t
-
- def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
- """
- Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- model_prev_list: A list of pytorch tensor. The previous computed model values.
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- ns = self.noise_schedule
- dims = x.dim()
- model_prev_2, model_prev_1, model_prev_0 = model_prev_list
- t_prev_2, t_prev_1, t_prev_0 = t_prev_list
- lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
- t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
- log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
- sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
- alpha_t = torch.exp(log_alpha_t)
-
- h_1 = lambda_prev_1 - lambda_prev_2
- h_0 = lambda_prev_0 - lambda_prev_1
- h = lambda_t - lambda_prev_0
- r0, r1 = h_0 / h, h_1 / h
- D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
- D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
- D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
- D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
- if self.predict_x0:
- x_t = (
- expand_dims(sigma_t / sigma_prev_0, dims) * x
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
- + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
- - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
- )
- else:
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
- - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
- - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
- )
- return x_t
-
- def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
- r2=None):
- """
- Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
- return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- r1: A `float`. The hyperparameter of the second-order or third-order solver.
- r2: A `float`. The hyperparameter of the third-order solver.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- if order == 1:
- return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
- elif order == 2:
- return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
- solver_type=solver_type, r1=r1)
- elif order == 3:
- return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
- solver_type=solver_type, r1=r1, r2=r2)
- else:
- raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
-
- def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
- """
- Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- model_prev_list: A list of pytorch tensor. The previous computed model values.
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- if order == 1:
- return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
- elif order == 2:
- return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
- elif order == 3:
- return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
- else:
- raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
-
- def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
- solver_type='dpm_solver'):
- """
- The adaptive step size solver based on singlestep DPM-Solver.
- Args:
- x: A pytorch tensor. The initial value at time `t_T`.
- order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
- t_T: A `float`. The starting time of the sampling (default is T).
- t_0: A `float`. The ending time of the sampling (default is epsilon).
- h_init: A `float`. The initial step size (for logSNR).
- atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
- rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
- theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
- t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
- current time and `t_0` is less than `t_err`. The default setting is 1e-5.
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_0: A pytorch tensor. The approximated solution at time `t_0`.
- [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
- """
- ns = self.noise_schedule
- s = t_T * torch.ones((x.shape[0],)).to(x)
- lambda_s = ns.marginal_lambda(s)
- lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
- h = h_init * torch.ones_like(s).to(x)
- x_prev = x
- nfe = 0
- if order == 2:
- r1 = 0.5
- lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
- higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
- solver_type=solver_type,
- **kwargs)
- elif order == 3:
- r1, r2 = 1. / 3., 2. / 3.
- lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
- return_intermediate=True,
- solver_type=solver_type)
- higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
- solver_type=solver_type,
- **kwargs)
- else:
- raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
- while torch.abs((s - t_0)).mean() > t_err:
- t = ns.inverse_lambda(lambda_s + h)
- x_lower, lower_noise_kwargs = lower_update(x, s, t)
- x_higher = higher_update(x, s, t, **lower_noise_kwargs)
- delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
- norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
- E = norm_fn((x_higher - x_lower) / delta).max()
- if torch.all(E <= 1.):
- x = x_higher
- s = t
- x_prev = x_lower
- lambda_s = ns.marginal_lambda(s)
- h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
- nfe += order
- print('adaptive solver nfe', nfe)
- return x
-
- def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
- method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
- atol=0.0078, rtol=0.05,
- ):
- """
- Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
- =====================================================
- We support the following algorithms for both noise prediction model and data prediction model:
- - 'singlestep':
- Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
- We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
- The total number of function evaluations (NFE) == `steps`.
- Given a fixed NFE == `steps`, the sampling procedure is:
- - If `order` == 1:
- - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
- - If `order` == 2:
- - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
- - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
- - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
- - If `order` == 3:
- - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
- - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
- - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
- - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
- - 'multistep':
- Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
- We initialize the first `order` values by lower order multistep solvers.
- Given a fixed NFE == `steps`, the sampling procedure is:
- Denote K = steps.
- - If `order` == 1:
- - We use K steps of DPM-Solver-1 (i.e. DDIM).
- - If `order` == 2:
- - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
- - If `order` == 3:
- - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
- - 'singlestep_fixed':
- Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
- We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
- - 'adaptive':
- Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
- We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
- You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
- (NFE) and the sample quality.
- - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
- - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
- =====================================================
- Some advices for choosing the algorithm:
- - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
- Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
- e.g.
- >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
- >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
- skip_type='time_uniform', method='singlestep')
- - For **guided sampling with large guidance scale** by DPMs:
- Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
- e.g.
- >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
- >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
- skip_type='time_uniform', method='multistep')
- We support three types of `skip_type`:
- - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
- - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
- - 'time_quadratic': quadratic time for the time steps.
- =====================================================
- Args:
- x: A pytorch tensor. The initial value at time `t_start`
- e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
- steps: A `int`. The total number of function evaluations (NFE).
- t_start: A `float`. The starting time of the sampling.
- If `T` is None, we use self.noise_schedule.T (default is 1.0).
- t_end: A `float`. The ending time of the sampling.
- If `t_end` is None, we use 1. / self.noise_schedule.total_N.
- e.g. if total_N == 1000, we have `t_end` == 1e-3.
- For discrete-time DPMs:
- - We recommend `t_end` == 1. / self.noise_schedule.total_N.
- For continuous-time DPMs:
- - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
- order: A `int`. The order of DPM-Solver.
- skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
- method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
- denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
- Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
- This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
- score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
- for diffusion models sampling by diffusion SDEs for low-resolutional images
- (such as CIFAR-10). However, we observed that such trick does not matter for
- high-resolutional images. As it needs an additional NFE, we do not recommend
- it for high-resolutional images.
- lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
- Only valid for `method=multistep` and `steps < 15`. We empirically find that
- this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
- (especially for steps <= 10). So we recommend to set it to be `True`.
- solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
- atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
- rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
- Returns:
- x_end: A pytorch tensor. The approximated solution at time `t_end`.
- """
- t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
- t_T = self.noise_schedule.T if t_start is None else t_start
- device = x.device
- if method == 'adaptive':
- with torch.no_grad():
- x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
- solver_type=solver_type)
- elif method == 'multistep':
- assert steps >= order
- timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
- assert timesteps.shape[0] - 1 == steps
- with torch.no_grad():
- vec_t = timesteps[0].expand((x.shape[0]))
- model_prev_list = [self.model_fn(x, vec_t)]
- t_prev_list = [vec_t]
- # Init the first `order` values by lower order multistep DPM-Solver.
- for init_order in tqdm(range(1, order), desc="DPM init order"):
- vec_t = timesteps[init_order].expand(x.shape[0])
- x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
- solver_type=solver_type)
- model_prev_list.append(self.model_fn(x, vec_t))
- t_prev_list.append(vec_t)
- # Compute the remaining values by `order`-th order multistep DPM-Solver.
- for step in tqdm(range(order, steps + 1), desc="DPM multistep"):
- vec_t = timesteps[step].expand(x.shape[0])
- if lower_order_final and steps < 15:
- step_order = min(order, steps + 1 - step)
- else:
- step_order = order
- x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order,
- solver_type=solver_type)
- for i in range(order - 1):
- t_prev_list[i] = t_prev_list[i + 1]
- model_prev_list[i] = model_prev_list[i + 1]
- t_prev_list[-1] = vec_t
- # We do not need to evaluate the final model value.
- if step < steps:
- model_prev_list[-1] = self.model_fn(x, vec_t)
- elif method in ['singlestep', 'singlestep_fixed']:
- if method == 'singlestep':
- timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
- skip_type=skip_type,
- t_T=t_T, t_0=t_0,
- device=device)
- elif method == 'singlestep_fixed':
- K = steps // order
- orders = [order, ] * K
- timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
- for i, order in enumerate(orders):
- t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
- timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
- N=order, device=device)
- lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
- vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
- h = lambda_inner[-1] - lambda_inner[0]
- r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
- r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
- x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
- if denoise_to_zero:
- x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
- return x
-
-
-#############################################################
-# other utility functions
-#############################################################
-
-def interpolate_fn(x, xp, yp):
- """
- A piecewise linear function y = f(x), using xp and yp as keypoints.
- We implement f(x) in a differentiable way (i.e. applicable for autograd).
- The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
- Args:
- x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
- xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
- yp: PyTorch tensor with shape [C, K].
- Returns:
- The function values f(x), with shape [N, C].
- """
- N, K = x.shape[0], xp.shape[1]
- all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
- sorted_all_x, x_indices = torch.sort(all_x, dim=2)
- x_idx = torch.argmin(x_indices, dim=2)
- cand_start_idx = x_idx - 1
- start_idx = torch.where(
- torch.eq(x_idx, 0),
- torch.tensor(1, device=x.device),
- torch.where(
- torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
- ),
- )
- end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
- start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
- end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
- start_idx2 = torch.where(
- torch.eq(x_idx, 0),
- torch.tensor(0, device=x.device),
- torch.where(
- torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
- ),
- )
- y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
- start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
- end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
- cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
- return cand
-
-
-def expand_dims(v, dims):
- """
- Expand the tensor `v` to the dim `dims`.
- Args:
- `v`: a PyTorch tensor with shape [N].
- `dim`: a `int`.
- Returns:
- a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
- """
- return v[(...,) + (None,) * (dims - 1)]
\ No newline at end of file
diff --git a/comfy/ldm/models/diffusion/dpm_solver/sampler.py b/comfy/ldm/models/diffusion/dpm_solver/sampler.py
deleted file mode 100644
index e4d0d0a3875..00000000000
--- a/comfy/ldm/models/diffusion/dpm_solver/sampler.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""SAMPLING ONLY."""
-import torch
-
-from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
-
-MODEL_TYPES = {
- "eps": "noise",
- "v": "v"
-}
-
-
-class DPMSolverSampler(object):
- def __init__(self, model, device=torch.device("cuda"), **kwargs):
- super().__init__()
- self.model = model
- self.device = device
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
- self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
-
- def register_buffer(self, name, attr):
- if type(attr) == torch.Tensor:
- if attr.device != self.device:
- attr = attr.to(self.device)
- setattr(self, name, attr)
-
- @torch.no_grad()
- def sample(self,
- S,
- batch_size,
- shape,
- conditioning=None,
- callback=None,
- normals_sequence=None,
- img_callback=None,
- quantize_x0=False,
- eta=0.,
- mask=None,
- x0=None,
- temperature=1.,
- noise_dropout=0.,
- score_corrector=None,
- corrector_kwargs=None,
- verbose=True,
- x_T=None,
- log_every_t=100,
- unconditional_guidance_scale=1.,
- unconditional_conditioning=None,
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
- **kwargs
- ):
- if conditioning is not None:
- if isinstance(conditioning, dict):
- ctmp = conditioning[list(conditioning.keys())[0]]
- while isinstance(ctmp, list): ctmp = ctmp[0]
- if isinstance(ctmp, torch.Tensor):
- cbs = ctmp.shape[0]
- if cbs != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
- elif isinstance(conditioning, list):
- for ctmp in conditioning:
- if ctmp.shape[0] != batch_size:
- print(f"Warning: Got {ctmp.shape[0]} conditionings but batch-size is {batch_size}")
- else:
- if isinstance(conditioning, torch.Tensor):
- if conditioning.shape[0] != batch_size:
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
-
- # sampling
- C, H, W = shape
- size = (batch_size, C, H, W)
-
- print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')
-
- device = self.model.betas.device
- if x_T is None:
- img = torch.randn(size, device=device)
- else:
- img = x_T
-
- ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
-
- model_fn = model_wrapper(
- lambda x, t, c: self.model.apply_model(x, t, c),
- ns,
- model_type=MODEL_TYPES[self.model.parameterization],
- guidance_type="classifier-free",
- condition=conditioning,
- unconditional_condition=unconditional_conditioning,
- guidance_scale=unconditional_guidance_scale,
- )
-
- dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)
- x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2,
- lower_order_final=True)
-
- return x.to(device), None
diff --git a/comfy/ldm/models/diffusion/plms.py b/comfy/ldm/models/diffusion/plms.py
deleted file mode 100644
index 9d31b3994ed..00000000000
--- a/comfy/ldm/models/diffusion/plms.py
+++ /dev/null
@@ -1,245 +0,0 @@
-"""SAMPLING ONLY."""
-
-import torch
-import numpy as np
-from tqdm import tqdm
-from functools import partial
-
-from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
-from ldm.models.diffusion.sampling_util import norm_thresholding
-
-
-class PLMSSampler(object):
- def __init__(self, model, schedule="linear", device=torch.device("cuda"), **kwargs):
- super().__init__()
- self.model = model
- self.ddpm_num_timesteps = model.num_timesteps
- self.schedule = schedule
- self.device = device
-
- def register_buffer(self, name, attr):
- if type(attr) == torch.Tensor:
- if attr.device != self.device:
- attr = attr.to(self.device)
- setattr(self, name, attr)
-
- def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
- if ddim_eta != 0:
- raise ValueError('ddim_eta must be 0 for PLMS')
- self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
- num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
- alphas_cumprod = self.model.alphas_cumprod
- assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
-
- self.register_buffer('betas', to_torch(self.model.betas))
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
- self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
-
- # calculations for diffusion q(x_t | x_{t-1}) and others
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
-
- # ddim sampling parameters
- ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
- ddim_timesteps=self.ddim_timesteps,
- eta=ddim_eta,verbose=verbose)
- self.register_buffer('ddim_sigmas', ddim_sigmas)
- self.register_buffer('ddim_alphas', ddim_alphas)
- self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
- self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
- sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
- (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
- 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
- self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
-
- @torch.no_grad()
- def sample(self,
- S,
- batch_size,
- shape,
- conditioning=None,
- callback=None,
- normals_sequence=None,
- img_callback=None,
- quantize_x0=False,
- eta=0.,
- mask=None,
- x0=None,
- temperature=1.,
- noise_dropout=0.,
- score_corrector=None,
- corrector_kwargs=None,
- verbose=True,
- x_T=None,
- log_every_t=100,
- unconditional_guidance_scale=1.,
- unconditional_conditioning=None,
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
- dynamic_threshold=None,
- **kwargs
- ):
- if conditioning is not None:
- if isinstance(conditioning, dict):
- cbs = conditioning[list(conditioning.keys())[0]].shape[0]
- if cbs != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
- else:
- if conditioning.shape[0] != batch_size:
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
-
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
- # sampling
- C, H, W = shape
- size = (batch_size, C, H, W)
- print(f'Data shape for PLMS sampling is {size}')
-
- samples, intermediates = self.plms_sampling(conditioning, size,
- callback=callback,
- img_callback=img_callback,
- quantize_denoised=quantize_x0,
- mask=mask, x0=x0,
- ddim_use_original_steps=False,
- noise_dropout=noise_dropout,
- temperature=temperature,
- score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- x_T=x_T,
- log_every_t=log_every_t,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- dynamic_threshold=dynamic_threshold,
- )
- return samples, intermediates
-
- @torch.no_grad()
- def plms_sampling(self, cond, shape,
- x_T=None, ddim_use_original_steps=False,
- callback=None, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, img_callback=None, log_every_t=100,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
- unconditional_guidance_scale=1., unconditional_conditioning=None,
- dynamic_threshold=None):
- device = self.model.betas.device
- b = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=device)
- else:
- img = x_T
-
- if timesteps is None:
- timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
- elif timesteps is not None and not ddim_use_original_steps:
- subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
- timesteps = self.ddim_timesteps[:subset_end]
-
- intermediates = {'x_inter': [img], 'pred_x0': [img]}
- time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
- total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
- print(f"Running PLMS Sampling with {total_steps} timesteps")
-
- iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
- old_eps = []
-
- for i, step in enumerate(iterator):
- index = total_steps - i - 1
- ts = torch.full((b,), step, device=device, dtype=torch.long)
- ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
-
- if mask is not None:
- assert x0 is not None
- img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
- img = img_orig * mask + (1. - mask) * img
-
- outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
- quantize_denoised=quantize_denoised, temperature=temperature,
- noise_dropout=noise_dropout, score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- old_eps=old_eps, t_next=ts_next,
- dynamic_threshold=dynamic_threshold)
- img, pred_x0, e_t = outs
- old_eps.append(e_t)
- if len(old_eps) >= 4:
- old_eps.pop(0)
- if callback: callback(i)
- if img_callback: img_callback(pred_x0, i)
-
- if index % log_every_t == 0 or index == total_steps - 1:
- intermediates['x_inter'].append(img)
- intermediates['pred_x0'].append(pred_x0)
-
- return img, intermediates
-
- @torch.no_grad()
- def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
- unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None,
- dynamic_threshold=None):
- b, *_, device = *x.shape, x.device
-
- def get_model_output(x, t):
- if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
- e_t = self.model.apply_model(x, t, c)
- else:
- x_in = torch.cat([x] * 2)
- t_in = torch.cat([t] * 2)
- c_in = torch.cat([unconditional_conditioning, c])
- e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
- e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
-
- if score_corrector is not None:
- assert self.model.parameterization == "eps"
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
-
- return e_t
-
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
-
- def get_x_prev_and_pred_x0(e_t, index):
- # select parameters corresponding to the currently considered timestep
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
-
- # current prediction for x_0
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
- if quantize_denoised:
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
- if dynamic_threshold is not None:
- pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
- # direction pointing to x_t
- dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
- if noise_dropout > 0.:
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
- return x_prev, pred_x0
-
- e_t = get_model_output(x, t)
- if len(old_eps) == 0:
- # Pseudo Improved Euler (2nd order)
- x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
- e_t_next = get_model_output(x_prev, t_next)
- e_t_prime = (e_t + e_t_next) / 2
- elif len(old_eps) == 1:
- # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
- e_t_prime = (3 * e_t - old_eps[-1]) / 2
- elif len(old_eps) == 2:
- # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
- e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
- elif len(old_eps) >= 3:
- # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
- e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
-
- x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
-
- return x_prev, pred_x0, e_t
diff --git a/comfy/ldm/models/diffusion/sampling_util.py b/comfy/ldm/models/diffusion/sampling_util.py
deleted file mode 100644
index 7eff02be6d7..00000000000
--- a/comfy/ldm/models/diffusion/sampling_util.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import torch
-import numpy as np
-
-
-def append_dims(x, target_dims):
- """Appends dimensions to the end of a tensor until it has target_dims dimensions.
- From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py"""
- dims_to_append = target_dims - x.ndim
- if dims_to_append < 0:
- raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
- return x[(...,) + (None,) * dims_to_append]
-
-
-def norm_thresholding(x0, value):
- s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)
- return x0 * (value / s)
-
-
-def spatial_norm_thresholding(x0, value):
- # b c h w
- s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)
- return x0 * (value / s)
\ No newline at end of file
diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py
index 34484b288b4..f684523823d 100644
--- a/comfy/ldm/modules/attention.py
+++ b/comfy/ldm/modules/attention.py
@@ -5,8 +5,10 @@
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any
+from functools import partial
-from .diffusionmodules.util import checkpoint
+
+from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding
from .sub_quadratic_attention import efficient_dot_product_attention
from comfy import model_management
@@ -94,253 +96,259 @@ def zero_module(module):
def Normalize(in_channels, dtype=None, device=None):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
+def attention_basic(q, k, v, heads, mask=None):
+ b, _, dim_head = q.shape
+ dim_head //= heads
+ scale = dim_head ** -0.5
+
+ h = heads
+ q, k, v = map(
+ lambda t: t.unsqueeze(3)
+ .reshape(b, -1, heads, dim_head)
+ .permute(0, 2, 1, 3)
+ .reshape(b * heads, -1, dim_head)
+ .contiguous(),
+ (q, k, v),
+ )
+
+ # force cast to fp32 to avoid overflowing
+ if _ATTN_PRECISION =="fp32":
+ with torch.autocast(enabled=False, device_type = 'cuda'):
+ q, k = q.float(), k.float()
+ sim = einsum('b i d, b j d -> b i j', q, k) * scale
+ else:
+ sim = einsum('b i d, b j d -> b i j', q, k) * scale
-class SpatialSelfAttention(nn.Module):
- def __init__(self, in_channels):
- super().__init__()
- self.in_channels = in_channels
-
- self.norm = Normalize(in_channels)
- self.q = torch.nn.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
- self.k = torch.nn.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
- self.v = torch.nn.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
- self.proj_out = torch.nn.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
-
- def forward(self, x):
- h_ = x
- h_ = self.norm(h_)
- q = self.q(h_)
- k = self.k(h_)
- v = self.v(h_)
-
- # compute attention
- b,c,h,w = q.shape
- q = rearrange(q, 'b c h w -> b (h w) c')
- k = rearrange(k, 'b c h w -> b c (h w)')
- w_ = torch.einsum('bij,bjk->bik', q, k)
-
- w_ = w_ * (int(c)**(-0.5))
- w_ = torch.nn.functional.softmax(w_, dim=2)
-
- # attend to values
- v = rearrange(v, 'b c h w -> b c (h w)')
- w_ = rearrange(w_, 'b i j -> b j i')
- h_ = torch.einsum('bij,bjk->bik', v, w_)
- h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
- h_ = self.proj_out(h_)
-
- return x+h_
+ del q, k
+ if exists(mask):
+ mask = rearrange(mask, 'b ... -> b (...)')
+ max_neg_value = -torch.finfo(sim.dtype).max
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
+ sim.masked_fill_(~mask, max_neg_value)
-class CrossAttentionBirchSan(nn.Module):
- def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops):
- super().__init__()
- inner_dim = dim_head * heads
- context_dim = default(context_dim, query_dim)
+ # attention, what we cannot get enough of
+ sim = sim.softmax(dim=-1)
- self.scale = dim_head ** -0.5
- self.heads = heads
+ out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v)
+ out = (
+ out.unsqueeze(0)
+ .reshape(b, heads, -1, dim_head)
+ .permute(0, 2, 1, 3)
+ .reshape(b, -1, heads * dim_head)
+ )
+ return out
- self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
- self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
- self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
- self.to_out = nn.Sequential(
- operations.Linear(inner_dim, query_dim, dtype=dtype, device=device),
- nn.Dropout(dropout)
- )
+def attention_sub_quad(query, key, value, heads, mask=None):
+ b, _, dim_head = query.shape
+ dim_head //= heads
- def forward(self, x, context=None, value=None, mask=None):
- h = self.heads
+ scale = dim_head ** -0.5
+ query = query.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)
+ value = value.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)
- query = self.to_q(x)
- context = default(context, x)
- key = self.to_k(context)
- if value is not None:
- value = self.to_v(value)
- else:
- value = self.to_v(context)
+ key = key.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 3, 1).reshape(b * heads, dim_head, -1)
- del context, x
-
- query = query.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1)
- key_t = key.transpose(1,2).unflatten(1, (self.heads, -1)).flatten(end_dim=1)
- del key
- value = value.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1)
-
- dtype = query.dtype
- upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32
- if upcast_attention:
- bytes_per_token = torch.finfo(torch.float32).bits//8
- else:
- bytes_per_token = torch.finfo(query.dtype).bits//8
- batch_x_heads, q_tokens, _ = query.shape
- _, _, k_tokens = key_t.shape
- qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
-
- mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)
+ dtype = query.dtype
+ upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32
+ if upcast_attention:
+ bytes_per_token = torch.finfo(torch.float32).bits//8
+ else:
+ bytes_per_token = torch.finfo(query.dtype).bits//8
+ batch_x_heads, q_tokens, _ = query.shape
+ _, _, k_tokens = key.shape
+ qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
- chunk_threshold_bytes = mem_free_torch * 0.5 #Using only this seems to work better on AMD
+ mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)
- kv_chunk_size_min = None
+ kv_chunk_size_min = None
+ kv_chunk_size = None
+ query_chunk_size = None
- #not sure at all about the math here
- #TODO: tweak this
- if mem_free_total > 8192 * 1024 * 1024 * 1.3:
- query_chunk_size_x = 1024 * 4
- elif mem_free_total > 4096 * 1024 * 1024 * 1.3:
- query_chunk_size_x = 1024 * 2
- else:
- query_chunk_size_x = 1024
- kv_chunk_size_min_x = None
- kv_chunk_size_x = (int((chunk_threshold_bytes // (batch_x_heads * bytes_per_token * query_chunk_size_x)) * 2.0) // 1024) * 1024
- if kv_chunk_size_x < 1024:
- kv_chunk_size_x = None
-
- if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes:
- # the big matmul fits into our memory limit; do everything in 1 chunk,
- # i.e. send it down the unchunked fast-path
- query_chunk_size = q_tokens
+ for x in [4096, 2048, 1024, 512, 256]:
+ count = mem_free_total / (batch_x_heads * bytes_per_token * x * 4.0)
+ if count >= k_tokens:
kv_chunk_size = k_tokens
- else:
- query_chunk_size = query_chunk_size_x
- kv_chunk_size = kv_chunk_size_x
- kv_chunk_size_min = kv_chunk_size_min_x
-
- hidden_states = efficient_dot_product_attention(
- query,
- key_t,
- value,
- query_chunk_size=query_chunk_size,
- kv_chunk_size=kv_chunk_size,
- kv_chunk_size_min=kv_chunk_size_min,
- use_checkpoint=self.training,
- upcast_attention=upcast_attention,
- )
-
- hidden_states = hidden_states.to(dtype)
-
- hidden_states = hidden_states.unflatten(0, (-1, self.heads)).transpose(1,2).flatten(start_dim=2)
-
- out_proj, dropout = self.to_out
- hidden_states = out_proj(hidden_states)
- hidden_states = dropout(hidden_states)
-
- return hidden_states
-
-
-class CrossAttentionDoggettx(nn.Module):
- def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops):
- super().__init__()
- inner_dim = dim_head * heads
- context_dim = default(context_dim, query_dim)
-
- self.scale = dim_head ** -0.5
- self.heads = heads
-
- self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
- self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
- self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
-
- self.to_out = nn.Sequential(
- operations.Linear(inner_dim, query_dim, dtype=dtype, device=device),
- nn.Dropout(dropout)
- )
-
- def forward(self, x, context=None, value=None, mask=None):
- h = self.heads
-
- q_in = self.to_q(x)
- context = default(context, x)
- k_in = self.to_k(context)
- if value is not None:
- v_in = self.to_v(value)
- del value
- else:
- v_in = self.to_v(context)
- del context, x
-
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
- del q_in, k_in, v_in
-
- r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
-
- mem_free_total = model_management.get_free_memory(q.device)
-
- gb = 1024 ** 3
- tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
- modifier = 3 if q.element_size() == 2 else 2.5
- mem_required = tensor_size * modifier
- steps = 1
-
-
- if mem_required > mem_free_total:
- steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
- # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
- # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")
-
- if steps > 64:
- max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
- raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
- f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free')
-
- # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
- first_op_done = False
- cleared_cache = False
- while True:
- try:
- slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
- for i in range(0, q.shape[1], slice_size):
- end = i + slice_size
- if _ATTN_PRECISION =="fp32":
- with torch.autocast(enabled=False, device_type = 'cuda'):
- s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * self.scale
- else:
- s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * self.scale
- first_op_done = True
-
- s2 = s1.softmax(dim=-1).to(v.dtype)
- del s1
-
- r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
- del s2
- break
- except model_management.OOM_EXCEPTION as e:
- if first_op_done == False:
- model_management.soft_empty_cache(True)
- if cleared_cache == False:
- cleared_cache = True
- print("out of memory error, emptying cache and trying again")
- continue
- steps *= 2
- if steps > 64:
- raise e
- print("out of memory error, increasing steps and trying again", steps)
+ query_chunk_size = x
+ break
+
+ if query_chunk_size is None:
+ query_chunk_size = 512
+
+ hidden_states = efficient_dot_product_attention(
+ query,
+ key,
+ value,
+ query_chunk_size=query_chunk_size,
+ kv_chunk_size=kv_chunk_size,
+ kv_chunk_size_min=kv_chunk_size_min,
+ use_checkpoint=False,
+ upcast_attention=upcast_attention,
+ )
+
+ hidden_states = hidden_states.to(dtype)
+
+ hidden_states = hidden_states.unflatten(0, (-1, heads)).transpose(1,2).flatten(start_dim=2)
+ return hidden_states
+
+def attention_split(q, k, v, heads, mask=None):
+ b, _, dim_head = q.shape
+ dim_head //= heads
+ scale = dim_head ** -0.5
+
+ h = heads
+ q, k, v = map(
+ lambda t: t.unsqueeze(3)
+ .reshape(b, -1, heads, dim_head)
+ .permute(0, 2, 1, 3)
+ .reshape(b * heads, -1, dim_head)
+ .contiguous(),
+ (q, k, v),
+ )
+
+ r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
+
+ mem_free_total = model_management.get_free_memory(q.device)
+
+ if _ATTN_PRECISION =="fp32":
+ element_size = 4
+ else:
+ element_size = q.element_size()
+
+ gb = 1024 ** 3
+ tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size
+ modifier = 3
+ mem_required = tensor_size * modifier
+ steps = 1
+
+
+ if mem_required > mem_free_total:
+ steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
+ # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
+ # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")
+
+ if steps > 64:
+ max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
+ raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
+ f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free')
+
+ # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
+ first_op_done = False
+ cleared_cache = False
+ while True:
+ try:
+ slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
+ for i in range(0, q.shape[1], slice_size):
+ end = i + slice_size
+ if _ATTN_PRECISION =="fp32":
+ with torch.autocast(enabled=False, device_type = 'cuda'):
+ s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * scale
else:
+ s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale
+
+ s2 = s1.softmax(dim=-1).to(v.dtype)
+ del s1
+ first_op_done = True
+
+ r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
+ del s2
+ break
+ except model_management.OOM_EXCEPTION as e:
+ if first_op_done == False:
+ model_management.soft_empty_cache(True)
+ if cleared_cache == False:
+ cleared_cache = True
+ print("out of memory error, emptying cache and trying again")
+ continue
+ steps *= 2
+ if steps > 64:
raise e
+ print("out of memory error, increasing steps and trying again", steps)
+ else:
+ raise e
+
+ del q, k, v
+
+ r1 = (
+ r1.unsqueeze(0)
+ .reshape(b, heads, -1, dim_head)
+ .permute(0, 2, 1, 3)
+ .reshape(b, -1, heads * dim_head)
+ )
+ return r1
+
+BROKEN_XFORMERS = False
+try:
+ x_vers = xformers.__version__
+ #I think 0.0.23 is also broken (q with bs bigger than 65535 gives CUDA error)
+ BROKEN_XFORMERS = x_vers.startswith("0.0.21") or x_vers.startswith("0.0.22") or x_vers.startswith("0.0.23")
+except:
+ pass
+
+def attention_xformers(q, k, v, heads, mask=None):
+ b, _, dim_head = q.shape
+ dim_head //= heads
+ if BROKEN_XFORMERS:
+ if b * heads > 65535:
+ return attention_pytorch(q, k, v, heads, mask)
+
+ q, k, v = map(
+ lambda t: t.unsqueeze(3)
+ .reshape(b, -1, heads, dim_head)
+ .permute(0, 2, 1, 3)
+ .reshape(b * heads, -1, dim_head)
+ .contiguous(),
+ (q, k, v),
+ )
+
+ # actually compute the attention, what we cannot get enough of
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
+
+ if exists(mask):
+ raise NotImplementedError
+ out = (
+ out.unsqueeze(0)
+ .reshape(b, heads, -1, dim_head)
+ .permute(0, 2, 1, 3)
+ .reshape(b, -1, heads * dim_head)
+ )
+ return out
+
+def attention_pytorch(q, k, v, heads, mask=None):
+ b, _, dim_head = q.shape
+ dim_head //= heads
+ q, k, v = map(
+ lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2),
+ (q, k, v),
+ )
+
+ out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
+ out = (
+ out.transpose(1, 2).reshape(b, -1, heads * dim_head)
+ )
+ return out
+
+
+optimized_attention = attention_basic
+optimized_attention_masked = attention_basic
- del q, k, v
-
- r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
- del r1
+if model_management.xformers_enabled():
+ print("Using xformers cross attention")
+ optimized_attention = attention_xformers
+elif model_management.pytorch_attention_enabled():
+ print("Using pytorch cross attention")
+ optimized_attention = attention_pytorch
+else:
+ if args.use_split_cross_attention:
+ print("Using split optimization for cross attention")
+ optimized_attention = attention_split
+ else:
+ print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
+ optimized_attention = attention_sub_quad
- return self.to_out(r2)
+if model_management.pytorch_attention_enabled():
+ optimized_attention_masked = attention_pytorch
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops):
@@ -348,62 +356,6 @@ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
- self.scale = dim_head ** -0.5
- self.heads = heads
-
- self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
- self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
- self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
-
- self.to_out = nn.Sequential(
- operations.Linear(inner_dim, query_dim, dtype=dtype, device=device),
- nn.Dropout(dropout)
- )
-
- def forward(self, x, context=None, value=None, mask=None):
- h = self.heads
-
- q = self.to_q(x)
- context = default(context, x)
- k = self.to_k(context)
- if value is not None:
- v = self.to_v(value)
- del value
- else:
- v = self.to_v(context)
-
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
-
- # force cast to fp32 to avoid overflowing
- if _ATTN_PRECISION =="fp32":
- with torch.autocast(enabled=False, device_type = 'cuda'):
- q, k = q.float(), k.float()
- sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
- else:
- sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
-
- del q, k
-
- if exists(mask):
- mask = rearrange(mask, 'b ... -> b (...)')
- max_neg_value = -torch.finfo(sim.dtype).max
- mask = repeat(mask, 'b j -> (b h) () j', h=h)
- sim.masked_fill_(~mask, max_neg_value)
-
- # attention, what we cannot get enough of
- sim = sim.softmax(dim=-1)
-
- out = einsum('b i j, b j d -> b i d', sim, v)
- out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
- return self.to_out(out)
-
-class MemoryEfficientCrossAttention(nn.Module):
- # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
- def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None, operations=comfy.ops):
- super().__init__()
- inner_dim = dim_head * heads
- context_dim = default(context_dim, query_dim)
-
self.heads = heads
self.dim_head = dim_head
@@ -412,7 +364,6 @@ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.
self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout))
- self.attention_op: Optional[Any] = None
def forward(self, x, context=None, value=None, mask=None):
q = self.to_q(x)
@@ -424,132 +375,80 @@ def forward(self, x, context=None, value=None, mask=None):
else:
v = self.to_v(context)
- b, _, _ = q.shape
- q, k, v = map(
- lambda t: t.unsqueeze(3)
- .reshape(b, t.shape[1], self.heads, self.dim_head)
- .permute(0, 2, 1, 3)
- .reshape(b * self.heads, t.shape[1], self.dim_head)
- .contiguous(),
- (q, k, v),
- )
-
- # actually compute the attention, what we cannot get enough of
- out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
-
- if exists(mask):
- raise NotImplementedError
- out = (
- out.unsqueeze(0)
- .reshape(b, self.heads, out.shape[1], self.dim_head)
- .permute(0, 2, 1, 3)
- .reshape(b, out.shape[1], self.heads * self.dim_head)
- )
+ if mask is None:
+ out = optimized_attention(q, k, v, self.heads)
+ else:
+ out = optimized_attention_masked(q, k, v, self.heads, mask)
return self.to_out(out)
-class CrossAttentionPytorch(nn.Module):
- def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops):
- super().__init__()
- inner_dim = dim_head * heads
- context_dim = default(context_dim, query_dim)
-
- self.heads = heads
- self.dim_head = dim_head
-
- self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
- self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
- self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
-
- self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout))
- self.attention_op: Optional[Any] = None
- def forward(self, x, context=None, value=None, mask=None):
- q = self.to_q(x)
- context = default(context, x)
- k = self.to_k(context)
- if value is not None:
- v = self.to_v(value)
- del value
- else:
- v = self.to_v(context)
+class BasicTransformerBlock(nn.Module):
+ def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, ff_in=False, inner_dim=None,
+ disable_self_attn=False, disable_temporal_crossattention=False, switch_temporal_ca_to_sa=False, dtype=None, device=None, operations=comfy.ops):
+ super().__init__()
- b, _, _ = q.shape
- q, k, v = map(
- lambda t: t.view(b, -1, self.heads, self.dim_head).transpose(1, 2),
- (q, k, v),
- )
+ self.ff_in = ff_in or inner_dim is not None
+ if inner_dim is None:
+ inner_dim = dim
- out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
+ self.is_res = inner_dim == dim
- if exists(mask):
- raise NotImplementedError
- out = (
- out.transpose(1, 2).reshape(b, -1, self.heads * self.dim_head)
- )
+ if self.ff_in:
+ self.norm_in = nn.LayerNorm(dim, dtype=dtype, device=device)
+ self.ff_in = FeedForward(dim, dim_out=inner_dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)
- return self.to_out(out)
+ self.disable_self_attn = disable_self_attn
+ self.attn1 = CrossAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout,
+ context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device, operations=operations) # is a self-attention if not self.disable_self_attn
+ self.ff = FeedForward(inner_dim, dim_out=dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)
-if model_management.xformers_enabled():
- print("Using xformers cross attention")
- CrossAttention = MemoryEfficientCrossAttention
-elif model_management.pytorch_attention_enabled():
- print("Using pytorch cross attention")
- CrossAttention = CrossAttentionPytorch
-else:
- if args.use_split_cross_attention:
- print("Using split optimization for cross attention")
- CrossAttention = CrossAttentionDoggettx
- else:
- print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
- CrossAttention = CrossAttentionBirchSan
+ if disable_temporal_crossattention:
+ if switch_temporal_ca_to_sa:
+ raise ValueError
+ else:
+ self.attn2 = None
+ else:
+ context_dim_attn2 = None
+ if not switch_temporal_ca_to_sa:
+ context_dim_attn2 = context_dim
+ self.attn2 = CrossAttention(query_dim=inner_dim, context_dim=context_dim_attn2,
+ heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations) # is self-attn if context is none
+ self.norm2 = nn.LayerNorm(inner_dim, dtype=dtype, device=device)
-class BasicTransformerBlock(nn.Module):
- def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
- disable_self_attn=False, dtype=None, device=None, operations=comfy.ops):
- super().__init__()
- self.disable_self_attn = disable_self_attn
- self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
- context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device, operations=operations) # is a self-attention if not self.disable_self_attn
- self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)
- self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
- heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations) # is self-attn if context is none
- self.norm1 = nn.LayerNorm(dim, dtype=dtype, device=device)
- self.norm2 = nn.LayerNorm(dim, dtype=dtype, device=device)
- self.norm3 = nn.LayerNorm(dim, dtype=dtype, device=device)
+ self.norm1 = nn.LayerNorm(inner_dim, dtype=dtype, device=device)
+ self.norm3 = nn.LayerNorm(inner_dim, dtype=dtype, device=device)
self.checkpoint = checkpoint
self.n_heads = n_heads
self.d_head = d_head
+ self.switch_temporal_ca_to_sa = switch_temporal_ca_to_sa
def forward(self, x, context=None, transformer_options={}):
return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
def _forward(self, x, context=None, transformer_options={}):
extra_options = {}
- block = None
- block_index = 0
- if "current_index" in transformer_options:
- extra_options["transformer_index"] = transformer_options["current_index"]
- if "block_index" in transformer_options:
- block_index = transformer_options["block_index"]
- extra_options["block_index"] = block_index
- if "original_shape" in transformer_options:
- extra_options["original_shape"] = transformer_options["original_shape"]
- if "block" in transformer_options:
- block = transformer_options["block"]
- extra_options["block"] = block
- if "patches" in transformer_options:
- transformer_patches = transformer_options["patches"]
- else:
- transformer_patches = {}
+ block = transformer_options.get("block", None)
+ block_index = transformer_options.get("block_index", 0)
+ transformer_patches = {}
+ transformer_patches_replace = {}
+
+ for k in transformer_options:
+ if k == "patches":
+ transformer_patches = transformer_options[k]
+ elif k == "patches_replace":
+ transformer_patches_replace = transformer_options[k]
+ else:
+ extra_options[k] = transformer_options[k]
extra_options["n_heads"] = self.n_heads
extra_options["dim_head"] = self.d_head
- if "patches_replace" in transformer_options:
- transformer_patches_replace = transformer_options["patches_replace"]
- else:
- transformer_patches_replace = {}
+ if self.ff_in:
+ x_skip = x
+ x = self.ff_in(self.norm_in(x))
+ if self.is_res:
+ x += x_skip
n = self.norm1(x)
if self.disable_self_attn:
@@ -598,31 +497,34 @@ def _forward(self, x, context=None, transformer_options={}):
for p in patch:
x = p(x, extra_options)
- n = self.norm2(x)
-
- context_attn2 = context
- value_attn2 = None
- if "attn2_patch" in transformer_patches:
- patch = transformer_patches["attn2_patch"]
- value_attn2 = context_attn2
- for p in patch:
- n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options)
-
- attn2_replace_patch = transformer_patches_replace.get("attn2", {})
- block_attn2 = transformer_block
- if block_attn2 not in attn2_replace_patch:
- block_attn2 = block
-
- if block_attn2 in attn2_replace_patch:
- if value_attn2 is None:
+ if self.attn2 is not None:
+ n = self.norm2(x)
+ if self.switch_temporal_ca_to_sa:
+ context_attn2 = n
+ else:
+ context_attn2 = context
+ value_attn2 = None
+ if "attn2_patch" in transformer_patches:
+ patch = transformer_patches["attn2_patch"]
value_attn2 = context_attn2
- n = self.attn2.to_q(n)
- context_attn2 = self.attn2.to_k(context_attn2)
- value_attn2 = self.attn2.to_v(value_attn2)
- n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
- n = self.attn2.to_out(n)
- else:
- n = self.attn2(n, context=context_attn2, value=value_attn2)
+ for p in patch:
+ n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options)
+
+ attn2_replace_patch = transformer_patches_replace.get("attn2", {})
+ block_attn2 = transformer_block
+ if block_attn2 not in attn2_replace_patch:
+ block_attn2 = block
+
+ if block_attn2 in attn2_replace_patch:
+ if value_attn2 is None:
+ value_attn2 = context_attn2
+ n = self.attn2.to_q(n)
+ context_attn2 = self.attn2.to_k(context_attn2)
+ value_attn2 = self.attn2.to_v(value_attn2)
+ n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
+ n = self.attn2.to_out(n)
+ else:
+ n = self.attn2(n, context=context_attn2, value=value_attn2)
if "attn2_output_patch" in transformer_patches:
patch = transformer_patches["attn2_output_patch"]
@@ -630,7 +532,12 @@ def _forward(self, x, context=None, transformer_options={}):
n = p(n, extra_options)
x += n
- x = self.ff(self.norm3(x)) + x
+ if self.is_res:
+ x_skip = x
+ x = self.ff(self.norm3(x))
+ if self.is_res:
+ x += x_skip
+
return x
@@ -698,3 +605,164 @@ def forward(self, x, context=None, transformer_options={}):
x = self.proj_out(x)
return x + x_in
+
+class SpatialVideoTransformer(SpatialTransformer):
+ def __init__(
+ self,
+ in_channels,
+ n_heads,
+ d_head,
+ depth=1,
+ dropout=0.0,
+ use_linear=False,
+ context_dim=None,
+ use_spatial_context=False,
+ timesteps=None,
+ merge_strategy: str = "fixed",
+ merge_factor: float = 0.5,
+ time_context_dim=None,
+ ff_in=False,
+ checkpoint=False,
+ time_depth=1,
+ disable_self_attn=False,
+ disable_temporal_crossattention=False,
+ max_time_embed_period: int = 10000,
+ dtype=None, device=None, operations=comfy.ops
+ ):
+ super().__init__(
+ in_channels,
+ n_heads,
+ d_head,
+ depth=depth,
+ dropout=dropout,
+ use_checkpoint=checkpoint,
+ context_dim=context_dim,
+ use_linear=use_linear,
+ disable_self_attn=disable_self_attn,
+ dtype=dtype, device=device, operations=operations
+ )
+ self.time_depth = time_depth
+ self.depth = depth
+ self.max_time_embed_period = max_time_embed_period
+
+ time_mix_d_head = d_head
+ n_time_mix_heads = n_heads
+
+ time_mix_inner_dim = int(time_mix_d_head * n_time_mix_heads)
+
+ inner_dim = n_heads * d_head
+ if use_spatial_context:
+ time_context_dim = context_dim
+
+ self.time_stack = nn.ModuleList(
+ [
+ BasicTransformerBlock(
+ inner_dim,
+ n_time_mix_heads,
+ time_mix_d_head,
+ dropout=dropout,
+ context_dim=time_context_dim,
+ # timesteps=timesteps,
+ checkpoint=checkpoint,
+ ff_in=ff_in,
+ inner_dim=time_mix_inner_dim,
+ disable_self_attn=disable_self_attn,
+ disable_temporal_crossattention=disable_temporal_crossattention,
+ dtype=dtype, device=device, operations=operations
+ )
+ for _ in range(self.depth)
+ ]
+ )
+
+ assert len(self.time_stack) == len(self.transformer_blocks)
+
+ self.use_spatial_context = use_spatial_context
+ self.in_channels = in_channels
+
+ time_embed_dim = self.in_channels * 4
+ self.time_pos_embed = nn.Sequential(
+ operations.Linear(self.in_channels, time_embed_dim, dtype=dtype, device=device),
+ nn.SiLU(),
+ operations.Linear(time_embed_dim, self.in_channels, dtype=dtype, device=device),
+ )
+
+ self.time_mixer = AlphaBlender(
+ alpha=merge_factor, merge_strategy=merge_strategy
+ )
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ context: Optional[torch.Tensor] = None,
+ time_context: Optional[torch.Tensor] = None,
+ timesteps: Optional[int] = None,
+ image_only_indicator: Optional[torch.Tensor] = None,
+ transformer_options={}
+ ) -> torch.Tensor:
+ _, _, h, w = x.shape
+ x_in = x
+ spatial_context = None
+ if exists(context):
+ spatial_context = context
+
+ if self.use_spatial_context:
+ assert (
+ context.ndim == 3
+ ), f"n dims of spatial context should be 3 but are {context.ndim}"
+
+ if time_context is None:
+ time_context = context
+ time_context_first_timestep = time_context[::timesteps]
+ time_context = repeat(
+ time_context_first_timestep, "b ... -> (b n) ...", n=h * w
+ )
+ elif time_context is not None and not self.use_spatial_context:
+ time_context = repeat(time_context, "b ... -> (b n) ...", n=h * w)
+ if time_context.ndim == 2:
+ time_context = rearrange(time_context, "b c -> b 1 c")
+
+ x = self.norm(x)
+ if not self.use_linear:
+ x = self.proj_in(x)
+ x = rearrange(x, "b c h w -> b (h w) c")
+ if self.use_linear:
+ x = self.proj_in(x)
+
+ num_frames = torch.arange(timesteps, device=x.device)
+ num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps)
+ num_frames = rearrange(num_frames, "b t -> (b t)")
+ t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False, max_period=self.max_time_embed_period).to(x.dtype)
+ emb = self.time_pos_embed(t_emb)
+ emb = emb[:, None, :]
+
+ for it_, (block, mix_block) in enumerate(
+ zip(self.transformer_blocks, self.time_stack)
+ ):
+ transformer_options["block_index"] = it_
+ x = block(
+ x,
+ context=spatial_context,
+ transformer_options=transformer_options,
+ )
+
+ x_mix = x
+ x_mix = x_mix + emb
+
+ B, S, C = x_mix.shape
+ x_mix = rearrange(x_mix, "(b t) s c -> (b s) t c", t=timesteps)
+ x_mix = mix_block(x_mix, context=time_context) #TODO: transformer_options
+ x_mix = rearrange(
+ x_mix, "(b s) t c -> (b t) s c", s=S, b=B // timesteps, c=C, t=timesteps
+ )
+
+ x = self.time_mixer(x_spatial=x, x_temporal=x_mix, image_only_indicator=image_only_indicator)
+
+ if self.use_linear:
+ x = self.proj_out(x)
+ x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
+ if not self.use_linear:
+ x = self.proj_out(x)
+ out = x + x_in
+ return out
+
+
diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py
index 5f38640c3d8..f23417fd216 100644
--- a/comfy/ldm/modules/diffusionmodules/model.py
+++ b/comfy/ldm/modules/diffusionmodules/model.py
@@ -6,7 +6,6 @@
from einops import rearrange
from typing import Optional, Any
-from ..attention import MemoryEfficientCrossAttention
from comfy import model_management
import comfy.ops
@@ -194,62 +193,53 @@ def slice_attention(q, k, v):
return r1
-class AttnBlock(nn.Module):
- def __init__(self, in_channels):
- super().__init__()
- self.in_channels = in_channels
+def normal_attention(q, k, v):
+ # compute attention
+ b,c,h,w = q.shape
+
+ q = q.reshape(b,c,h*w)
+ q = q.permute(0,2,1) # b,hw,c
+ k = k.reshape(b,c,h*w) # b,c,hw
+ v = v.reshape(b,c,h*w)
+
+ r1 = slice_attention(q, k, v)
+ h_ = r1.reshape(b,c,h,w)
+ del r1
+ return h_
+
+def xformers_attention(q, k, v):
+ # compute attention
+ B, C, H, W = q.shape
+ q, k, v = map(
+ lambda t: t.view(B, C, -1).transpose(1, 2).contiguous(),
+ (q, k, v),
+ )
+
+ try:
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
+ out = out.transpose(1, 2).reshape(B, C, H, W)
+ except NotImplementedError as e:
+ out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
+ return out
+
+def pytorch_attention(q, k, v):
+ # compute attention
+ B, C, H, W = q.shape
+ q, k, v = map(
+ lambda t: t.view(B, 1, C, -1).transpose(2, 3).contiguous(),
+ (q, k, v),
+ )
+
+ try:
+ out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
+ out = out.transpose(2, 3).reshape(B, C, H, W)
+ except model_management.OOM_EXCEPTION as e:
+ print("scaled_dot_product_attention OOMed: switched to slice attention")
+ out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
+ return out
- self.norm = Normalize(in_channels)
- self.q = comfy.ops.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
- self.k = comfy.ops.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
- self.v = comfy.ops.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
- self.proj_out = comfy.ops.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
-
- def forward(self, x):
- h_ = x
- h_ = self.norm(h_)
- q = self.q(h_)
- k = self.k(h_)
- v = self.v(h_)
-
- # compute attention
- b,c,h,w = q.shape
-
- q = q.reshape(b,c,h*w)
- q = q.permute(0,2,1) # b,hw,c
- k = k.reshape(b,c,h*w) # b,c,hw
- v = v.reshape(b,c,h*w)
-
- r1 = slice_attention(q, k, v)
- h_ = r1.reshape(b,c,h,w)
- del r1
- h_ = self.proj_out(h_)
- return x+h_
-
-class MemoryEfficientAttnBlock(nn.Module):
- """
- Uses xformers efficient implementation,
- see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
- Note: this is a single-head self-attention operation
- """
- #
+class AttnBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
@@ -275,58 +265,16 @@ def __init__(self, in_channels):
kernel_size=1,
stride=1,
padding=0)
- self.attention_op: Optional[Any] = None
-
- def forward(self, x):
- h_ = x
- h_ = self.norm(h_)
- q = self.q(h_)
- k = self.k(h_)
- v = self.v(h_)
-
- # compute attention
- B, C, H, W = q.shape
- q, k, v = map(
- lambda t: t.view(B, C, -1).transpose(1, 2).contiguous(),
- (q, k, v),
- )
-
- try:
- out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
- out = out.transpose(1, 2).reshape(B, C, H, W)
- except NotImplementedError as e:
- out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
-
- out = self.proj_out(out)
- return x+out
-
-class MemoryEfficientAttnBlockPytorch(nn.Module):
- def __init__(self, in_channels):
- super().__init__()
- self.in_channels = in_channels
- self.norm = Normalize(in_channels)
- self.q = comfy.ops.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
- self.k = comfy.ops.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
- self.v = comfy.ops.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
- self.proj_out = comfy.ops.Conv2d(in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0)
- self.attention_op: Optional[Any] = None
+ if model_management.xformers_enabled_vae():
+ print("Using xformers attention in VAE")
+ self.optimized_attention = xformers_attention
+ elif model_management.pytorch_attention_enabled():
+ print("Using pytorch attention in VAE")
+ self.optimized_attention = pytorch_attention
+ else:
+ print("Using split attention in VAE")
+ self.optimized_attention = normal_attention
def forward(self, x):
h_ = x
@@ -335,54 +283,15 @@ def forward(self, x):
k = self.k(h_)
v = self.v(h_)
- # compute attention
- B, C, H, W = q.shape
- q, k, v = map(
- lambda t: t.view(B, 1, C, -1).transpose(2, 3).contiguous(),
- (q, k, v),
- )
-
- try:
- out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
- out = out.transpose(2, 3).reshape(B, C, H, W)
- except model_management.OOM_EXCEPTION as e:
- print("scaled_dot_product_attention OOMed: switched to slice attention")
- out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
+ h_ = self.optimized_attention(q, k, v)
- out = self.proj_out(out)
- return x+out
+ h_ = self.proj_out(h_)
-class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
- def forward(self, x, context=None, mask=None):
- b, c, h, w = x.shape
- x = rearrange(x, 'b c h w -> b (h w) c')
- out = super().forward(x, context=context, mask=mask)
- out = rearrange(out, 'b (h w) c -> b c h w', h=h, w=w, c=c)
- return x + out
+ return x+h_
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
- assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown'
- if model_management.xformers_enabled_vae() and attn_type == "vanilla":
- attn_type = "vanilla-xformers"
- if model_management.pytorch_attention_enabled() and attn_type == "vanilla":
- attn_type = "vanilla-pytorch"
- print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
- if attn_type == "vanilla":
- assert attn_kwargs is None
- return AttnBlock(in_channels)
- elif attn_type == "vanilla-xformers":
- print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...")
- return MemoryEfficientAttnBlock(in_channels)
- elif attn_type == "vanilla-pytorch":
- return MemoryEfficientAttnBlockPytorch(in_channels)
- elif type == "memory-efficient-cross-attn":
- attn_kwargs["query_dim"] = in_channels
- return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)
- elif attn_type == "none":
- return nn.Identity(in_channels)
- else:
- raise NotImplementedError()
+ return AttnBlock(in_channels)
class Model(nn.Module):
@@ -632,7 +541,10 @@ class Decoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
- attn_type="vanilla", **ignorekwargs):
+ conv_out_op=comfy.ops.Conv2d,
+ resnet_op=ResnetBlock,
+ attn_op=AttnBlock,
+ **ignorekwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
@@ -661,12 +573,12 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
# middle
self.mid = nn.Module()
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
+ self.mid.block_1 = resnet_op(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
- self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
+ self.mid.attn_1 = attn_op(block_in)
+ self.mid.block_2 = resnet_op(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
@@ -678,13 +590,13 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
- block.append(ResnetBlock(in_channels=block_in,
+ block.append(resnet_op(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
- attn.append(make_attn(block_in, attn_type=attn_type))
+ attn.append(attn_op(block_in))
up = nn.Module()
up.block = block
up.attn = attn
@@ -695,13 +607,13 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
# end
self.norm_out = Normalize(block_in)
- self.conv_out = comfy.ops.Conv2d(block_in,
+ self.conv_out = conv_out_op(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
- def forward(self, z):
+ def forward(self, z, **kwargs):
#assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
@@ -712,16 +624,16 @@ def forward(self, z):
h = self.conv_in(z)
# middle
- h = self.mid.block_1(h, temb)
- h = self.mid.attn_1(h)
- h = self.mid.block_2(h, temb)
+ h = self.mid.block_1(h, temb, **kwargs)
+ h = self.mid.attn_1(h, **kwargs)
+ h = self.mid.block_2(h, temb, **kwargs)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
- h = self.up[i_level].block[i_block](h, temb)
+ h = self.up[i_level].block[i_block](h, temb, **kwargs)
if len(self.up[i_level].attn) > 0:
- h = self.up[i_level].attn[i_block](h)
+ h = self.up[i_level].attn[i_block](h, **kwargs)
if i_level != 0:
h = self.up[i_level].upsample(h)
@@ -731,7 +643,7 @@ def forward(self, z):
h = self.norm_out(h)
h = nonlinearity(h)
- h = self.conv_out(h)
+ h = self.conv_out(h, **kwargs)
if self.tanh_out:
h = torch.tanh(h)
return h
diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py
index b42637c821a..48264892c26 100644
--- a/comfy/ldm/modules/diffusionmodules/openaimodel.py
+++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py
@@ -5,6 +5,8 @@
import torch as th
import torch.nn as nn
import torch.nn.functional as F
+from einops import rearrange
+from functools import partial
from .util import (
checkpoint,
@@ -12,8 +14,9 @@
zero_module,
normalization,
timestep_embedding,
+ AlphaBlender,
)
-from ..attention import SpatialTransformer
+from ..attention import SpatialTransformer, SpatialVideoTransformer, default
from comfy.ldm.util import exists
import comfy.ops
@@ -28,39 +31,36 @@ def forward(self, x, emb):
Apply the module to `x` given `emb` timestep embeddings.
"""
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb, context=None, transformer_options={}, output_shape=None):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb)
- elif isinstance(layer, SpatialTransformer):
- x = layer(x, context, transformer_options)
- elif isinstance(layer, Upsample):
- x = layer(x, output_shape=output_shape)
- else:
- x = layer(x)
- return x
-
-#This is needed because accelerate makes a copy of transformer_options which breaks "current_index"
-def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None):
+#This is needed because accelerate makes a copy of transformer_options which breaks "transformer_index"
+def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None, time_context=None, num_video_frames=None, image_only_indicator=None):
for layer in ts:
- if isinstance(layer, TimestepBlock):
+ if isinstance(layer, VideoResBlock):
+ x = layer(x, emb, num_video_frames, image_only_indicator)
+ elif isinstance(layer, TimestepBlock):
x = layer(x, emb)
+ elif isinstance(layer, SpatialVideoTransformer):
+ x = layer(x, context, time_context, num_video_frames, image_only_indicator, transformer_options)
+ if "transformer_index" in transformer_options:
+ transformer_options["transformer_index"] += 1
elif isinstance(layer, SpatialTransformer):
x = layer(x, context, transformer_options)
- transformer_options["current_index"] += 1
+ if "transformer_index" in transformer_options:
+ transformer_options["transformer_index"] += 1
elif isinstance(layer, Upsample):
x = layer(x, output_shape=output_shape)
else:
x = layer(x)
return x
+class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
+ """
+ A sequential module that passes timestep embeddings to the children that
+ support it as an extra input.
+ """
+
+ def forward(self, *args, **kwargs):
+ return forward_timestep_embed(self, *args, **kwargs)
+
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
@@ -154,6 +154,9 @@ def __init__(
use_checkpoint=False,
up=False,
down=False,
+ kernel_size=3,
+ exchange_temb_dims=False,
+ skip_t_emb=False,
dtype=None,
device=None,
operations=comfy.ops
@@ -166,11 +169,17 @@ def __init__(
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
+ self.exchange_temb_dims = exchange_temb_dims
+
+ if isinstance(kernel_size, list):
+ padding = [k // 2 for k in kernel_size]
+ else:
+ padding = kernel_size // 2
self.in_layers = nn.Sequential(
nn.GroupNorm(32, channels, dtype=dtype, device=device),
nn.SiLU(),
- operations.conv_nd(dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device),
+ operations.conv_nd(dims, channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device),
)
self.updown = up or down
@@ -184,19 +193,24 @@ def __init__(
else:
self.h_upd = self.x_upd = nn.Identity()
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- operations.Linear(
- emb_channels,
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels, dtype=dtype, device=device
- ),
- )
+ self.skip_t_emb = skip_t_emb
+ if self.skip_t_emb:
+ self.emb_layers = None
+ self.exchange_temb_dims = False
+ else:
+ self.emb_layers = nn.Sequential(
+ nn.SiLU(),
+ operations.Linear(
+ emb_channels,
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels, dtype=dtype, device=device
+ ),
+ )
self.out_layers = nn.Sequential(
nn.GroupNorm(32, self.out_channels, dtype=dtype, device=device),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
- operations.conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1, dtype=dtype, device=device)
+ operations.conv_nd(dims, self.out_channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device)
),
)
@@ -204,7 +218,7 @@ def __init__(
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = operations.conv_nd(
- dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device
+ dims, channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device
)
else:
self.skip_connection = operations.conv_nd(dims, channels, self.out_channels, 1, dtype=dtype, device=device)
@@ -230,19 +244,110 @@ def _forward(self, x, emb):
h = in_conv(h)
else:
h = self.in_layers(x)
- emb_out = self.emb_layers(emb).type(h.dtype)
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
+
+ emb_out = None
+ if not self.skip_t_emb:
+ emb_out = self.emb_layers(emb).type(h.dtype)
+ while len(emb_out.shape) < len(h.shape):
+ emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = th.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
+ h = out_norm(h)
+ if emb_out is not None:
+ scale, shift = th.chunk(emb_out, 2, dim=1)
+ h *= (1 + scale)
+ h += shift
h = out_rest(h)
else:
- h = h + emb_out
+ if emb_out is not None:
+ if self.exchange_temb_dims:
+ emb_out = rearrange(emb_out, "b t c ... -> b c t ...")
+ h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
+
+class VideoResBlock(ResBlock):
+ def __init__(
+ self,
+ channels: int,
+ emb_channels: int,
+ dropout: float,
+ video_kernel_size=3,
+ merge_strategy: str = "fixed",
+ merge_factor: float = 0.5,
+ out_channels=None,
+ use_conv: bool = False,
+ use_scale_shift_norm: bool = False,
+ dims: int = 2,
+ use_checkpoint: bool = False,
+ up: bool = False,
+ down: bool = False,
+ dtype=None,
+ device=None,
+ operations=comfy.ops
+ ):
+ super().__init__(
+ channels,
+ emb_channels,
+ dropout,
+ out_channels=out_channels,
+ use_conv=use_conv,
+ use_scale_shift_norm=use_scale_shift_norm,
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ up=up,
+ down=down,
+ dtype=dtype,
+ device=device,
+ operations=operations
+ )
+
+ self.time_stack = ResBlock(
+ default(out_channels, channels),
+ emb_channels,
+ dropout=dropout,
+ dims=3,
+ out_channels=default(out_channels, channels),
+ use_scale_shift_norm=False,
+ use_conv=False,
+ up=False,
+ down=False,
+ kernel_size=video_kernel_size,
+ use_checkpoint=use_checkpoint,
+ exchange_temb_dims=True,
+ dtype=dtype,
+ device=device,
+ operations=operations
+ )
+ self.time_mixer = AlphaBlender(
+ alpha=merge_factor,
+ merge_strategy=merge_strategy,
+ rearrange_pattern="b t -> b 1 t 1 1",
+ )
+
+ def forward(
+ self,
+ x: th.Tensor,
+ emb: th.Tensor,
+ num_video_frames: int,
+ image_only_indicator = None,
+ ) -> th.Tensor:
+ x = super().forward(x, emb)
+
+ x_mix = rearrange(x, "(b t) c h w -> b c t h w", t=num_video_frames)
+ x = rearrange(x, "(b t) c h w -> b c t h w", t=num_video_frames)
+
+ x = self.time_stack(
+ x, rearrange(emb, "(b t) ... -> b t ...", t=num_video_frames)
+ )
+ x = self.time_mixer(
+ x_spatial=x_mix, x_temporal=x, image_only_indicator=image_only_indicator
+ )
+ x = rearrange(x, "b c t h w -> (b t) c h w")
+ return x
+
+
class Timestep(nn.Module):
def __init__(self, dim):
super().__init__()
@@ -251,6 +356,15 @@ def __init__(self, dim):
def forward(self, t):
return timestep_embedding(t, self.dim)
+def apply_control(h, control, name):
+ if control is not None and name in control and len(control[name]) > 0:
+ ctrl = control[name].pop()
+ if ctrl is not None:
+ try:
+ h += ctrl
+ except:
+ print("warning control could not be applied", h.shape, ctrl.shape)
+ return h
class UNetModel(nn.Module):
"""
@@ -259,10 +373,6 @@ class UNetModel(nn.Module):
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which
- attention will take place. May be a set, list, or tuple.
- For example, if this contains 4, then at 4x downsampling, attention
- will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
@@ -289,15 +399,13 @@ def __init__(
model_channels,
out_channels,
num_res_blocks,
- attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
- use_fp16=False,
- use_bf16=False,
+ dtype=th.float32,
num_heads=-1,
num_head_channels=-1,
num_heads_upsample=-1,
@@ -315,6 +423,17 @@ def __init__(
use_linear_in_transformer=False,
adm_in_channels=None,
transformer_depth_middle=None,
+ transformer_depth_output=None,
+ use_temporal_resblock=False,
+ use_temporal_attention=False,
+ time_context_dim=None,
+ extra_ff_mix_layer=False,
+ use_spatial_context=False,
+ merge_strategy=None,
+ merge_factor=0.0,
+ video_kernel_size=None,
+ disable_temporal_crossattention=False,
+ max_ddpm_temb_period=10000,
device=None,
operations=comfy.ops,
):
@@ -342,10 +461,7 @@ def __init__(
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
- if isinstance(transformer_depth, int):
- transformer_depth = len(channel_mult) * [transformer_depth]
- if transformer_depth_middle is None:
- transformer_depth_middle = transformer_depth[-1]
+
if isinstance(num_res_blocks, int):
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
else:
@@ -353,30 +469,31 @@ def __init__(
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
"as a list/tuple (per-level) with the same length as channel_mult")
self.num_res_blocks = num_res_blocks
+
if disable_self_attentions is not None:
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
assert len(disable_self_attentions) == len(channel_mult)
if num_attention_blocks is not None:
assert len(num_attention_blocks) == len(self.num_res_blocks)
- assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
- print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
- f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
- f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
- f"attention will still not be set.")
- self.attention_resolutions = attention_resolutions
+ transformer_depth = transformer_depth[:]
+ transformer_depth_output = transformer_depth_output[:]
+
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.dtype = th.bfloat16 if use_bf16 else self.dtype
+ self.dtype = dtype
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
+ self.use_temporal_resblocks = use_temporal_resblock
self.predict_codebook_ids = n_embed is not None
+ self.default_num_video_frames = None
+ self.default_image_only_indicator = None
+
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
operations.Linear(model_channels, time_embed_dim, dtype=self.dtype, device=device),
@@ -413,13 +530,104 @@ def __init__(
input_block_chans = [model_channels]
ch = model_channels
ds = 1
+
+ def get_attention_layer(
+ ch,
+ num_heads,
+ dim_head,
+ depth=1,
+ context_dim=None,
+ use_checkpoint=False,
+ disable_self_attn=False,
+ ):
+ if use_temporal_attention:
+ return SpatialVideoTransformer(
+ ch,
+ num_heads,
+ dim_head,
+ depth=depth,
+ context_dim=context_dim,
+ time_context_dim=time_context_dim,
+ dropout=dropout,
+ ff_in=extra_ff_mix_layer,
+ use_spatial_context=use_spatial_context,
+ merge_strategy=merge_strategy,
+ merge_factor=merge_factor,
+ checkpoint=use_checkpoint,
+ use_linear=use_linear_in_transformer,
+ disable_self_attn=disable_self_attn,
+ disable_temporal_crossattention=disable_temporal_crossattention,
+ max_time_embed_period=max_ddpm_temb_period,
+ dtype=self.dtype, device=device, operations=operations
+ )
+ else:
+ return SpatialTransformer(
+ ch, num_heads, dim_head, depth=depth, context_dim=context_dim,
+ disable_self_attn=disable_self_attn, use_linear=use_linear_in_transformer,
+ use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations
+ )
+
+ def get_resblock(
+ merge_factor,
+ merge_strategy,
+ video_kernel_size,
+ ch,
+ time_embed_dim,
+ dropout,
+ out_channels,
+ dims,
+ use_checkpoint,
+ use_scale_shift_norm,
+ down=False,
+ up=False,
+ dtype=None,
+ device=None,
+ operations=comfy.ops
+ ):
+ if self.use_temporal_resblocks:
+ return VideoResBlock(
+ merge_factor=merge_factor,
+ merge_strategy=merge_strategy,
+ video_kernel_size=video_kernel_size,
+ channels=ch,
+ emb_channels=time_embed_dim,
+ dropout=dropout,
+ out_channels=out_channels,
+ dims=dims,
+ use_checkpoint=use_checkpoint,
+ use_scale_shift_norm=use_scale_shift_norm,
+ down=down,
+ up=up,
+ dtype=dtype,
+ device=device,
+ operations=operations
+ )
+ else:
+ return ResBlock(
+ channels=ch,
+ emb_channels=time_embed_dim,
+ dropout=dropout,
+ out_channels=out_channels,
+ use_checkpoint=use_checkpoint,
+ dims=dims,
+ use_scale_shift_norm=use_scale_shift_norm,
+ down=down,
+ up=up,
+ dtype=dtype,
+ device=device,
+ operations=operations
+ )
+
for level, mult in enumerate(channel_mult):
for nr in range(self.num_res_blocks[level]):
layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
+ get_resblock(
+ merge_factor=merge_factor,
+ merge_strategy=merge_strategy,
+ video_kernel_size=video_kernel_size,
+ ch=ch,
+ time_embed_dim=time_embed_dim,
+ dropout=dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
@@ -430,7 +638,8 @@ def __init__(
)
]
ch = mult * model_channels
- if ds in attention_resolutions:
+ num_transformers = transformer_depth.pop(0)
+ if num_transformers > 0:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
@@ -445,11 +654,9 @@ def __init__(
disabled_sa = False
if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
- layers.append(SpatialTransformer(
- ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim,
- disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
- use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations
- )
+ layers.append(get_attention_layer(
+ ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim,
+ disable_self_attn=disabled_sa, use_checkpoint=use_checkpoint)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
@@ -458,10 +665,13 @@ def __init__(
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
+ get_resblock(
+ merge_factor=merge_factor,
+ merge_strategy=merge_strategy,
+ video_kernel_size=video_kernel_size,
+ ch=ch,
+ time_embed_dim=time_embed_dim,
+ dropout=dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
@@ -490,35 +700,43 @@ def __init__(
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
+ mid_block = [
+ get_resblock(
+ merge_factor=merge_factor,
+ merge_strategy=merge_strategy,
+ video_kernel_size=video_kernel_size,
+ ch=ch,
+ time_embed_dim=time_embed_dim,
+ dropout=dropout,
+ out_channels=None,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
dtype=self.dtype,
device=device,
operations=operations
- ),
- SpatialTransformer( # always uses a self-attn
+ )]
+ if transformer_depth_middle >= 0:
+ mid_block += [get_attention_layer( # always uses a self-attn
ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim,
- disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
- use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations
+ disable_self_attn=disable_middle_self_attn, use_checkpoint=use_checkpoint
),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
+ get_resblock(
+ merge_factor=merge_factor,
+ merge_strategy=merge_strategy,
+ video_kernel_size=video_kernel_size,
+ ch=ch,
+ time_embed_dim=time_embed_dim,
+ dropout=dropout,
+ out_channels=None,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
dtype=self.dtype,
device=device,
operations=operations
- ),
- )
+ )]
+ self.middle_block = TimestepEmbedSequential(*mid_block)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
@@ -526,10 +744,13 @@ def __init__(
for i in range(self.num_res_blocks[level] + 1):
ich = input_block_chans.pop()
layers = [
- ResBlock(
- ch + ich,
- time_embed_dim,
- dropout,
+ get_resblock(
+ merge_factor=merge_factor,
+ merge_strategy=merge_strategy,
+ video_kernel_size=video_kernel_size,
+ ch=ch + ich,
+ time_embed_dim=time_embed_dim,
+ dropout=dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
@@ -540,7 +761,8 @@ def __init__(
)
]
ch = model_channels * mult
- if ds in attention_resolutions:
+ num_transformers = transformer_depth_output.pop()
+ if num_transformers > 0:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
@@ -556,19 +778,21 @@ def __init__(
if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
layers.append(
- SpatialTransformer(
- ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim,
- disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
- use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations
+ get_attention_layer(
+ ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim,
+ disable_self_attn=disabled_sa, use_checkpoint=use_checkpoint
)
)
if level and i == self.num_res_blocks[level]:
out_ch = ch
layers.append(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
+ get_resblock(
+ merge_factor=merge_factor,
+ merge_strategy=merge_strategy,
+ video_kernel_size=video_kernel_size,
+ ch=ch,
+ time_embed_dim=time_embed_dim,
+ dropout=dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
@@ -607,9 +831,13 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo
:return: an [N x C x ...] Tensor of outputs.
"""
transformer_options["original_shape"] = list(x.shape)
- transformer_options["current_index"] = 0
+ transformer_options["transformer_index"] = 0
transformer_patches = transformer_options.get("patches", {})
+ num_video_frames = kwargs.get("num_video_frames", self.default_num_video_frames)
+ image_only_indicator = kwargs.get("image_only_indicator", self.default_image_only_indicator)
+ time_context = kwargs.get("time_context", None)
+
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
@@ -624,26 +852,28 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo
h = x.type(self.dtype)
for id, module in enumerate(self.input_blocks):
transformer_options["block"] = ("input", id)
- h = forward_timestep_embed(module, h, emb, context, transformer_options)
- if control is not None and 'input' in control and len(control['input']) > 0:
- ctrl = control['input'].pop()
- if ctrl is not None:
- h += ctrl
+ h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
+ h = apply_control(h, control, 'input')
+ if "input_block_patch" in transformer_patches:
+ patch = transformer_patches["input_block_patch"]
+ for p in patch:
+ h = p(h, transformer_options)
+
hs.append(h)
+ if "input_block_patch_after_skip" in transformer_patches:
+ patch = transformer_patches["input_block_patch_after_skip"]
+ for p in patch:
+ h = p(h, transformer_options)
+
transformer_options["block"] = ("middle", 0)
- h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options)
- if control is not None and 'middle' in control and len(control['middle']) > 0:
- ctrl = control['middle'].pop()
- if ctrl is not None:
- h += ctrl
+ h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
+ h = apply_control(h, control, 'middle')
+
for id, module in enumerate(self.output_blocks):
transformer_options["block"] = ("output", id)
hsp = hs.pop()
- if control is not None and 'output' in control and len(control['output']) > 0:
- ctrl = control['output'].pop()
- if ctrl is not None:
- hsp += ctrl
+ hsp = apply_control(hsp, control, 'output')
if "output_block_patch" in transformer_patches:
patch = transformer_patches["output_block_patch"]
@@ -656,7 +886,7 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo
output_shape = hs[-1].shape
else:
output_shape = None
- h = forward_timestep_embed(module, h, emb, context, transformer_options, output_shape)
+ h = forward_timestep_embed(module, h, emb, context, transformer_options, output_shape, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
h = h.type(x.dtype)
if self.predict_codebook_ids:
return self.id_predictor(h)
diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py
index d890c8044aa..704bbe57450 100644
--- a/comfy/ldm/modules/diffusionmodules/util.py
+++ b/comfy/ldm/modules/diffusionmodules/util.py
@@ -13,11 +13,78 @@
import torch
import torch.nn as nn
import numpy as np
-from einops import repeat
+from einops import repeat, rearrange
from comfy.ldm.util import instantiate_from_config
import comfy.ops
+class AlphaBlender(nn.Module):
+ strategies = ["learned", "fixed", "learned_with_images"]
+
+ def __init__(
+ self,
+ alpha: float,
+ merge_strategy: str = "learned_with_images",
+ rearrange_pattern: str = "b t -> (b t) 1 1",
+ ):
+ super().__init__()
+ self.merge_strategy = merge_strategy
+ self.rearrange_pattern = rearrange_pattern
+
+ assert (
+ merge_strategy in self.strategies
+ ), f"merge_strategy needs to be in {self.strategies}"
+
+ if self.merge_strategy == "fixed":
+ self.register_buffer("mix_factor", torch.Tensor([alpha]))
+ elif (
+ self.merge_strategy == "learned"
+ or self.merge_strategy == "learned_with_images"
+ ):
+ self.register_parameter(
+ "mix_factor", torch.nn.Parameter(torch.Tensor([alpha]))
+ )
+ else:
+ raise ValueError(f"unknown merge strategy {self.merge_strategy}")
+
+ def get_alpha(self, image_only_indicator: torch.Tensor) -> torch.Tensor:
+ # skip_time_mix = rearrange(repeat(skip_time_mix, 'b -> (b t) () () ()', t=t), '(b t) 1 ... -> b 1 t ...', t=t)
+ if self.merge_strategy == "fixed":
+ # make shape compatible
+ # alpha = repeat(self.mix_factor, '1 -> b () t () ()', t=t, b=bs)
+ alpha = self.mix_factor
+ elif self.merge_strategy == "learned":
+ alpha = torch.sigmoid(self.mix_factor)
+ # make shape compatible
+ # alpha = repeat(alpha, '1 -> s () ()', s = t * bs)
+ elif self.merge_strategy == "learned_with_images":
+ assert image_only_indicator is not None, "need image_only_indicator ..."
+ alpha = torch.where(
+ image_only_indicator.bool(),
+ torch.ones(1, 1, device=image_only_indicator.device),
+ rearrange(torch.sigmoid(self.mix_factor), "... -> ... 1"),
+ )
+ alpha = rearrange(alpha, self.rearrange_pattern)
+ # make shape compatible
+ # alpha = repeat(alpha, '1 -> s () ()', s = t * bs)
+ else:
+ raise NotImplementedError()
+ return alpha
+
+ def forward(
+ self,
+ x_spatial,
+ x_temporal,
+ image_only_indicator=None,
+ ) -> torch.Tensor:
+ alpha = self.get_alpha(image_only_indicator)
+ x = (
+ alpha.to(x_spatial.dtype) * x_spatial
+ + (1.0 - alpha).to(x_spatial.dtype) * x_temporal
+ )
+ return x
+
+
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if schedule == "linear":
betas = (
@@ -170,8 +237,8 @@ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
if not repeat_only:
half = dim // 2
freqs = torch.exp(
- -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
- ).to(device=timesteps.device)
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=timesteps.device) / half
+ )
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
diff --git a/comfy/ldm/modules/sub_quadratic_attention.py b/comfy/ldm/modules/sub_quadratic_attention.py
index 4d42059b5a8..8e8e8054dfd 100644
--- a/comfy/ldm/modules/sub_quadratic_attention.py
+++ b/comfy/ldm/modules/sub_quadratic_attention.py
@@ -83,7 +83,8 @@ def _summarize_chunk(
)
max_score, _ = torch.max(attn_weights, -1, keepdim=True)
max_score = max_score.detach()
- torch.exp(attn_weights - max_score, out=attn_weights)
+ attn_weights -= max_score
+ torch.exp(attn_weights, out=attn_weights)
exp_weights = attn_weights.to(value.dtype)
exp_values = torch.bmm(exp_weights, value)
max_score = max_score.squeeze(-1)
diff --git a/comfy/ldm/modules/temporal_ae.py b/comfy/ldm/modules/temporal_ae.py
new file mode 100644
index 00000000000..11ae049f3be
--- /dev/null
+++ b/comfy/ldm/modules/temporal_ae.py
@@ -0,0 +1,244 @@
+import functools
+from typing import Callable, Iterable, Union
+
+import torch
+from einops import rearrange, repeat
+
+import comfy.ops
+
+from .diffusionmodules.model import (
+ AttnBlock,
+ Decoder,
+ ResnetBlock,
+)
+from .diffusionmodules.openaimodel import ResBlock, timestep_embedding
+from .attention import BasicTransformerBlock
+
+def partialclass(cls, *args, **kwargs):
+ class NewCls(cls):
+ __init__ = functools.partialmethod(cls.__init__, *args, **kwargs)
+
+ return NewCls
+
+
+class VideoResBlock(ResnetBlock):
+ def __init__(
+ self,
+ out_channels,
+ *args,
+ dropout=0.0,
+ video_kernel_size=3,
+ alpha=0.0,
+ merge_strategy="learned",
+ **kwargs,
+ ):
+ super().__init__(out_channels=out_channels, dropout=dropout, *args, **kwargs)
+ if video_kernel_size is None:
+ video_kernel_size = [3, 1, 1]
+ self.time_stack = ResBlock(
+ channels=out_channels,
+ emb_channels=0,
+ dropout=dropout,
+ dims=3,
+ use_scale_shift_norm=False,
+ use_conv=False,
+ up=False,
+ down=False,
+ kernel_size=video_kernel_size,
+ use_checkpoint=False,
+ skip_t_emb=True,
+ )
+
+ self.merge_strategy = merge_strategy
+ if self.merge_strategy == "fixed":
+ self.register_buffer("mix_factor", torch.Tensor([alpha]))
+ elif self.merge_strategy == "learned":
+ self.register_parameter(
+ "mix_factor", torch.nn.Parameter(torch.Tensor([alpha]))
+ )
+ else:
+ raise ValueError(f"unknown merge strategy {self.merge_strategy}")
+
+ def get_alpha(self, bs):
+ if self.merge_strategy == "fixed":
+ return self.mix_factor
+ elif self.merge_strategy == "learned":
+ return torch.sigmoid(self.mix_factor)
+ else:
+ raise NotImplementedError()
+
+ def forward(self, x, temb, skip_video=False, timesteps=None):
+ b, c, h, w = x.shape
+ if timesteps is None:
+ timesteps = b
+
+ x = super().forward(x, temb)
+
+ if not skip_video:
+ x_mix = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps)
+
+ x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps)
+
+ x = self.time_stack(x, temb)
+
+ alpha = self.get_alpha(bs=b // timesteps)
+ x = alpha * x + (1.0 - alpha) * x_mix
+
+ x = rearrange(x, "b c t h w -> (b t) c h w")
+ return x
+
+
+class AE3DConv(torch.nn.Conv2d):
+ def __init__(self, in_channels, out_channels, video_kernel_size=3, *args, **kwargs):
+ super().__init__(in_channels, out_channels, *args, **kwargs)
+ if isinstance(video_kernel_size, Iterable):
+ padding = [int(k // 2) for k in video_kernel_size]
+ else:
+ padding = int(video_kernel_size // 2)
+
+ self.time_mix_conv = torch.nn.Conv3d(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=video_kernel_size,
+ padding=padding,
+ )
+
+ def forward(self, input, timesteps=None, skip_video=False):
+ if timesteps is None:
+ timesteps = input.shape[0]
+ x = super().forward(input)
+ if skip_video:
+ return x
+ x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps)
+ x = self.time_mix_conv(x)
+ return rearrange(x, "b c t h w -> (b t) c h w")
+
+
+class AttnVideoBlock(AttnBlock):
+ def __init__(
+ self, in_channels: int, alpha: float = 0, merge_strategy: str = "learned"
+ ):
+ super().__init__(in_channels)
+ # no context, single headed, as in base class
+ self.time_mix_block = BasicTransformerBlock(
+ dim=in_channels,
+ n_heads=1,
+ d_head=in_channels,
+ checkpoint=False,
+ ff_in=True,
+ )
+
+ time_embed_dim = self.in_channels * 4
+ self.video_time_embed = torch.nn.Sequential(
+ comfy.ops.Linear(self.in_channels, time_embed_dim),
+ torch.nn.SiLU(),
+ comfy.ops.Linear(time_embed_dim, self.in_channels),
+ )
+
+ self.merge_strategy = merge_strategy
+ if self.merge_strategy == "fixed":
+ self.register_buffer("mix_factor", torch.Tensor([alpha]))
+ elif self.merge_strategy == "learned":
+ self.register_parameter(
+ "mix_factor", torch.nn.Parameter(torch.Tensor([alpha]))
+ )
+ else:
+ raise ValueError(f"unknown merge strategy {self.merge_strategy}")
+
+ def forward(self, x, timesteps=None, skip_time_block=False):
+ if skip_time_block:
+ return super().forward(x)
+
+ if timesteps is None:
+ timesteps = x.shape[0]
+
+ x_in = x
+ x = self.attention(x)
+ h, w = x.shape[2:]
+ x = rearrange(x, "b c h w -> b (h w) c")
+
+ x_mix = x
+ num_frames = torch.arange(timesteps, device=x.device)
+ num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps)
+ num_frames = rearrange(num_frames, "b t -> (b t)")
+ t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False)
+ emb = self.video_time_embed(t_emb) # b, n_channels
+ emb = emb[:, None, :]
+ x_mix = x_mix + emb
+
+ alpha = self.get_alpha()
+ x_mix = self.time_mix_block(x_mix, timesteps=timesteps)
+ x = alpha * x + (1.0 - alpha) * x_mix # alpha merge
+
+ x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
+ x = self.proj_out(x)
+
+ return x_in + x
+
+ def get_alpha(
+ self,
+ ):
+ if self.merge_strategy == "fixed":
+ return self.mix_factor
+ elif self.merge_strategy == "learned":
+ return torch.sigmoid(self.mix_factor)
+ else:
+ raise NotImplementedError(f"unknown merge strategy {self.merge_strategy}")
+
+
+
+def make_time_attn(
+ in_channels,
+ attn_type="vanilla",
+ attn_kwargs=None,
+ alpha: float = 0,
+ merge_strategy: str = "learned",
+):
+ return partialclass(
+ AttnVideoBlock, in_channels, alpha=alpha, merge_strategy=merge_strategy
+ )
+
+
+class Conv2DWrapper(torch.nn.Conv2d):
+ def forward(self, input: torch.Tensor, **kwargs) -> torch.Tensor:
+ return super().forward(input)
+
+
+class VideoDecoder(Decoder):
+ available_time_modes = ["all", "conv-only", "attn-only"]
+
+ def __init__(
+ self,
+ *args,
+ video_kernel_size: Union[int, list] = 3,
+ alpha: float = 0.0,
+ merge_strategy: str = "learned",
+ time_mode: str = "conv-only",
+ **kwargs,
+ ):
+ self.video_kernel_size = video_kernel_size
+ self.alpha = alpha
+ self.merge_strategy = merge_strategy
+ self.time_mode = time_mode
+ assert (
+ self.time_mode in self.available_time_modes
+ ), f"time_mode parameter has to be in {self.available_time_modes}"
+
+ if self.time_mode != "attn-only":
+ kwargs["conv_out_op"] = partialclass(AE3DConv, video_kernel_size=self.video_kernel_size)
+ if self.time_mode not in ["conv-only", "only-last-conv"]:
+ kwargs["attn_op"] = partialclass(make_time_attn, alpha=self.alpha, merge_strategy=self.merge_strategy)
+ if self.time_mode not in ["attn-only", "only-last-conv"]:
+ kwargs["resnet_op"] = partialclass(VideoResBlock, video_kernel_size=self.video_kernel_size, alpha=self.alpha, merge_strategy=self.merge_strategy)
+
+ super().__init__(*args, **kwargs)
+
+ def get_last_layer(self, skip_time_mix=False, **kwargs):
+ if self.time_mode == "attn-only":
+ raise NotImplementedError("TODO")
+ else:
+ return (
+ self.conv_out.time_mix_conv.weight
+ if not skip_time_mix
+ else self.conv_out.weight
+ )
diff --git a/comfy/lora.py b/comfy/lora.py
index 3009a1c9e0c..29c59d89307 100644
--- a/comfy/lora.py
+++ b/comfy/lora.py
@@ -131,6 +131,18 @@ def load_lora(lora, to_load):
loaded_keys.add(b_norm_name)
patch_dict["{}.bias".format(to_load[x][:-len(".weight")])] = (b_norm,)
+ diff_name = "{}.diff".format(x)
+ diff_weight = lora.get(diff_name, None)
+ if diff_weight is not None:
+ patch_dict[to_load[x]] = (diff_weight,)
+ loaded_keys.add(diff_name)
+
+ diff_bias_name = "{}.diff_b".format(x)
+ diff_bias = lora.get(diff_bias_name, None)
+ if diff_bias is not None:
+ patch_dict["{}.bias".format(to_load[x][:-len(".weight")])] = (diff_bias,)
+ loaded_keys.add(diff_bias_name)
+
for x in lora.keys():
if x not in loaded_keys:
print("lora key not loaded", x)
@@ -141,9 +153,9 @@ def model_lora_keys_clip(model, key_map={}):
text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
clip_l_present = False
- for b in range(32):
+ for b in range(32): #TODO: clean up
for c in LORA_CLIP_MAP:
- k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
+ k = "clip_h.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
if k in sdk:
lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
key_map[lora_key] = k
@@ -154,6 +166,8 @@ def model_lora_keys_clip(model, key_map={}):
k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
if k in sdk:
+ lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
+ key_map[lora_key] = k
lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
key_map[lora_key] = k
clip_l_present = True
diff --git a/comfy/model_base.py b/comfy/model_base.py
index ed2dc83e4e0..786c9cf47ba 100644
--- a/comfy/model_base.py
+++ b/comfy/model_base.py
@@ -1,16 +1,37 @@
import torch
from comfy.ldm.modules.diffusionmodules.openaimodel import UNetModel
from comfy.ldm.modules.encoders.noise_aug_modules import CLIPEmbeddingNoiseAugmentation
-from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule
from comfy.ldm.modules.diffusionmodules.openaimodel import Timestep
import comfy.model_management
-import numpy as np
+import comfy.conds
from enum import Enum
from . import utils
class ModelType(Enum):
EPS = 1
V_PREDICTION = 2
+ V_PREDICTION_EDM = 3
+
+
+from comfy.model_sampling import EPS, V_PREDICTION, ModelSamplingDiscrete, ModelSamplingContinuousEDM
+
+
+def model_sampling(model_config, model_type):
+ s = ModelSamplingDiscrete
+
+ if model_type == ModelType.EPS:
+ c = EPS
+ elif model_type == ModelType.V_PREDICTION:
+ c = V_PREDICTION
+ elif model_type == ModelType.V_PREDICTION_EDM:
+ c = V_PREDICTION
+ s = ModelSamplingContinuousEDM
+
+ class ModelSampling(s, c):
+ pass
+
+ return ModelSampling(model_config)
+
class BaseModel(torch.nn.Module):
def __init__(self, model_config, model_type=ModelType.EPS, device=None):
@@ -19,48 +40,38 @@ def __init__(self, model_config, model_type=ModelType.EPS, device=None):
unet_config = model_config.unet_config
self.latent_format = model_config.latent_format
self.model_config = model_config
- self.register_schedule(given_betas=None, beta_schedule=model_config.beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3)
+
if not unet_config.get("disable_unet_model_creation", False):
self.diffusion_model = UNetModel(**unet_config, device=device)
self.model_type = model_type
+ self.model_sampling = model_sampling(model_config, model_type)
+
self.adm_channels = unet_config.get("adm_in_channels", None)
if self.adm_channels is None:
self.adm_channels = 0
+ self.inpaint_model = False
print("model_type", model_type.name)
print("adm", self.adm_channels)
- def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- if given_betas is not None:
- betas = given_betas
- else:
- betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
- alphas = 1. - betas
- alphas_cumprod = np.cumprod(alphas, axis=0)
- alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
-
- timesteps, = betas.shape
- self.num_timesteps = int(timesteps)
- self.linear_start = linear_start
- self.linear_end = linear_end
-
- self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
- self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
- self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
-
- def apply_model(self, x, t, c_concat=None, c_crossattn=None, c_adm=None, control=None, transformer_options={}):
+ def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs):
+ sigma = t
+ xc = self.model_sampling.calculate_input(sigma, x)
if c_concat is not None:
- xc = torch.cat([x] + [c_concat], dim=1)
- else:
- xc = x
+ xc = torch.cat([xc] + [c_concat], dim=1)
+
context = c_crossattn
dtype = self.get_dtype()
xc = xc.to(dtype)
- t = t.to(dtype)
+ t = self.model_sampling.timestep(t).float()
context = context.to(dtype)
- if c_adm is not None:
- c_adm = c_adm.to(dtype)
- return self.diffusion_model(xc, t, context=context, y=c_adm, control=control, transformer_options=transformer_options).float()
+ extra_conds = {}
+ for o in kwargs:
+ extra = kwargs[o]
+ if hasattr(extra, "to"):
+ extra = extra.to(dtype)
+ extra_conds[o] = extra
+ model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
+ return self.model_sampling.calculate_denoised(sigma, model_output, x)
def get_dtype(self):
return self.diffusion_model.dtype
@@ -71,6 +82,43 @@ def is_adm(self):
def encode_adm(self, **kwargs):
return None
+ def extra_conds(self, **kwargs):
+ out = {}
+ if self.inpaint_model:
+ concat_keys = ("mask", "masked_image")
+ cond_concat = []
+ denoise_mask = kwargs.get("denoise_mask", None)
+ latent_image = kwargs.get("latent_image", None)
+ noise = kwargs.get("noise", None)
+ device = kwargs["device"]
+
+ def blank_inpaint_image_like(latent_image):
+ blank_image = torch.ones_like(latent_image)
+ # these are the values for "zero" in pixel space translated to latent space
+ blank_image[:,0] *= 0.8223
+ blank_image[:,1] *= -0.6876
+ blank_image[:,2] *= 0.6364
+ blank_image[:,3] *= 0.1380
+ return blank_image
+
+ for ck in concat_keys:
+ if denoise_mask is not None:
+ if ck == "mask":
+ cond_concat.append(denoise_mask[:,:1].to(device))
+ elif ck == "masked_image":
+ cond_concat.append(latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space
+ else:
+ if ck == "mask":
+ cond_concat.append(torch.ones_like(noise)[:,:1])
+ elif ck == "masked_image":
+ cond_concat.append(blank_inpaint_image_like(noise))
+ data = torch.cat(cond_concat, dim=1)
+ out['c_concat'] = comfy.conds.CONDNoiseShape(data)
+ adm = self.encode_adm(**kwargs)
+ if adm is not None:
+ out['y'] = comfy.conds.CONDRegular(adm)
+ return out
+
def load_model_weights(self, sd, unet_prefix=""):
to_load = {}
keys = list(sd.keys())
@@ -78,6 +126,7 @@ def load_model_weights(self, sd, unet_prefix=""):
if k.startswith(unet_prefix):
to_load[k[len(unet_prefix):]] = sd.pop(k)
+ to_load = self.model_config.process_unet_state_dict(to_load)
m, u = self.diffusion_model.load_state_dict(to_load, strict=False)
if len(m) > 0:
print("unet missing:", m)
@@ -112,7 +161,18 @@ def state_dict_for_saving(self, clip_state_dict, vae_state_dict):
return {**unet_state_dict, **vae_state_dict, **clip_state_dict}
def set_inpaint(self):
- self.concat_keys = ("mask", "masked_image")
+ self.inpaint_model = True
+
+ def memory_required(self, input_shape):
+ if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention():
+ #TODO: this needs to be tweaked
+ area = input_shape[0] * input_shape[2] * input_shape[3]
+ return (area * comfy.model_management.dtype_size(self.get_dtype()) / 50) * (1024 * 1024)
+ else:
+ #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory.
+ area = input_shape[0] * input_shape[2] * input_shape[3]
+ return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024)
+
def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0):
adm_inputs = []
@@ -208,3 +268,48 @@ def encode_adm(self, **kwargs):
out.append(self.embedder(torch.Tensor([target_width])))
flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0).repeat(clip_pooled.shape[0], 1)
return torch.cat((clip_pooled.to(flat.device), flat), dim=1)
+
+class SVD_img2vid(BaseModel):
+ def __init__(self, model_config, model_type=ModelType.V_PREDICTION_EDM, device=None):
+ super().__init__(model_config, model_type, device=device)
+ self.embedder = Timestep(256)
+
+ def encode_adm(self, **kwargs):
+ fps_id = kwargs.get("fps", 6) - 1
+ motion_bucket_id = kwargs.get("motion_bucket_id", 127)
+ augmentation = kwargs.get("augmentation_level", 0)
+
+ out = []
+ out.append(self.embedder(torch.Tensor([fps_id])))
+ out.append(self.embedder(torch.Tensor([motion_bucket_id])))
+ out.append(self.embedder(torch.Tensor([augmentation])))
+
+ flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0)
+ return flat
+
+ def extra_conds(self, **kwargs):
+ out = {}
+ adm = self.encode_adm(**kwargs)
+ if adm is not None:
+ out['y'] = comfy.conds.CONDRegular(adm)
+
+ latent_image = kwargs.get("concat_latent_image", None)
+ noise = kwargs.get("noise", None)
+ device = kwargs["device"]
+
+ if latent_image is None:
+ latent_image = torch.zeros_like(noise)
+
+ if latent_image.shape[1:] != noise.shape[1:]:
+ latent_image = utils.common_upscale(latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center")
+
+ latent_image = utils.repeat_to_batch_size(latent_image, noise.shape[0])
+
+ out['c_concat'] = comfy.conds.CONDNoiseShape(latent_image)
+
+ if "time_conditioning" in kwargs:
+ out["time_context"] = comfy.conds.CONDCrossAttn(kwargs["time_conditioning"])
+
+ out['image_only_indicator'] = comfy.conds.CONDConstant(torch.zeros((1,), device=device))
+ out['num_video_frames'] = comfy.conds.CONDConstant(noise.shape[0])
+ return out
diff --git a/comfy/model_detection.py b/comfy/model_detection.py
index 787c78575ae..c682c3e1a18 100644
--- a/comfy/model_detection.py
+++ b/comfy/model_detection.py
@@ -14,7 +14,21 @@ def count_blocks(state_dict_keys, prefix_string):
count += 1
return count
-def detect_unet_config(state_dict, key_prefix, use_fp16):
+def calculate_transformer_depth(prefix, state_dict_keys, state_dict):
+ context_dim = None
+ use_linear_in_transformer = False
+
+ transformer_prefix = prefix + "1.transformer_blocks."
+ transformer_keys = sorted(list(filter(lambda a: a.startswith(transformer_prefix), state_dict_keys)))
+ if len(transformer_keys) > 0:
+ last_transformer_depth = count_blocks(state_dict_keys, transformer_prefix + '{}')
+ context_dim = state_dict['{}0.attn2.to_k.weight'.format(transformer_prefix)].shape[1]
+ use_linear_in_transformer = len(state_dict['{}1.proj_in.weight'.format(prefix)].shape) == 2
+ time_stack = '{}1.time_stack.0.attn1.to_q.weight'.format(prefix) in state_dict or '{}1.time_mix_blocks.0.attn1.to_q.weight'.format(prefix) in state_dict
+ return last_transformer_depth, context_dim, use_linear_in_transformer, time_stack
+ return None
+
+def detect_unet_config(state_dict, key_prefix, dtype):
state_dict_keys = list(state_dict.keys())
unet_config = {
@@ -32,7 +46,7 @@ def detect_unet_config(state_dict, key_prefix, use_fp16):
else:
unet_config["adm_in_channels"] = None
- unet_config["use_fp16"] = use_fp16
+ unet_config["dtype"] = dtype
model_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[0]
in_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[1]
@@ -40,72 +54,95 @@ def detect_unet_config(state_dict, key_prefix, use_fp16):
channel_mult = []
attention_resolutions = []
transformer_depth = []
+ transformer_depth_output = []
context_dim = None
use_linear_in_transformer = False
+ video_model = False
current_res = 1
count = 0
last_res_blocks = 0
- last_transformer_depth = 0
last_channel_mult = 0
- while True:
+ input_block_count = count_blocks(state_dict_keys, '{}input_blocks'.format(key_prefix) + '.{}.')
+ for count in range(input_block_count):
prefix = '{}input_blocks.{}.'.format(key_prefix, count)
+ prefix_output = '{}output_blocks.{}.'.format(key_prefix, input_block_count - count - 1)
+
block_keys = sorted(list(filter(lambda a: a.startswith(prefix), state_dict_keys)))
if len(block_keys) == 0:
break
+ block_keys_output = sorted(list(filter(lambda a: a.startswith(prefix_output), state_dict_keys)))
+
if "{}0.op.weight".format(prefix) in block_keys: #new layer
- if last_transformer_depth > 0:
- attention_resolutions.append(current_res)
- transformer_depth.append(last_transformer_depth)
num_res_blocks.append(last_res_blocks)
channel_mult.append(last_channel_mult)
current_res *= 2
last_res_blocks = 0
- last_transformer_depth = 0
last_channel_mult = 0
+ out = calculate_transformer_depth(prefix_output, state_dict_keys, state_dict)
+ if out is not None:
+ transformer_depth_output.append(out[0])
+ else:
+ transformer_depth_output.append(0)
else:
res_block_prefix = "{}0.in_layers.0.weight".format(prefix)
if res_block_prefix in block_keys:
last_res_blocks += 1
last_channel_mult = state_dict["{}0.out_layers.3.weight".format(prefix)].shape[0] // model_channels
- transformer_prefix = prefix + "1.transformer_blocks."
- transformer_keys = sorted(list(filter(lambda a: a.startswith(transformer_prefix), state_dict_keys)))
- if len(transformer_keys) > 0:
- last_transformer_depth = count_blocks(state_dict_keys, transformer_prefix + '{}')
- if context_dim is None:
- context_dim = state_dict['{}0.attn2.to_k.weight'.format(transformer_prefix)].shape[1]
- use_linear_in_transformer = len(state_dict['{}1.proj_in.weight'.format(prefix)].shape) == 2
+ out = calculate_transformer_depth(prefix, state_dict_keys, state_dict)
+ if out is not None:
+ transformer_depth.append(out[0])
+ if context_dim is None:
+ context_dim = out[1]
+ use_linear_in_transformer = out[2]
+ video_model = out[3]
+ else:
+ transformer_depth.append(0)
+
+ res_block_prefix = "{}0.in_layers.0.weight".format(prefix_output)
+ if res_block_prefix in block_keys_output:
+ out = calculate_transformer_depth(prefix_output, state_dict_keys, state_dict)
+ if out is not None:
+ transformer_depth_output.append(out[0])
+ else:
+ transformer_depth_output.append(0)
- count += 1
- if last_transformer_depth > 0:
- attention_resolutions.append(current_res)
- transformer_depth.append(last_transformer_depth)
num_res_blocks.append(last_res_blocks)
channel_mult.append(last_channel_mult)
- transformer_depth_middle = count_blocks(state_dict_keys, '{}middle_block.1.transformer_blocks.'.format(key_prefix) + '{}')
-
- if len(set(num_res_blocks)) == 1:
- num_res_blocks = num_res_blocks[0]
-
- if len(set(transformer_depth)) == 1:
- transformer_depth = transformer_depth[0]
+ if "{}middle_block.1.proj_in.weight".format(key_prefix) in state_dict_keys:
+ transformer_depth_middle = count_blocks(state_dict_keys, '{}middle_block.1.transformer_blocks.'.format(key_prefix) + '{}')
+ else:
+ transformer_depth_middle = -1
unet_config["in_channels"] = in_channels
unet_config["model_channels"] = model_channels
unet_config["num_res_blocks"] = num_res_blocks
- unet_config["attention_resolutions"] = attention_resolutions
unet_config["transformer_depth"] = transformer_depth
+ unet_config["transformer_depth_output"] = transformer_depth_output
unet_config["channel_mult"] = channel_mult
unet_config["transformer_depth_middle"] = transformer_depth_middle
unet_config['use_linear_in_transformer'] = use_linear_in_transformer
unet_config["context_dim"] = context_dim
+
+ if video_model:
+ unet_config["extra_ff_mix_layer"] = True
+ unet_config["use_spatial_context"] = True
+ unet_config["merge_strategy"] = "learned_with_images"
+ unet_config["merge_factor"] = 0.0
+ unet_config["video_kernel_size"] = [3, 1, 1]
+ unet_config["use_temporal_resblock"] = True
+ unet_config["use_temporal_attention"] = True
+ else:
+ unet_config["use_temporal_resblock"] = False
+ unet_config["use_temporal_attention"] = False
+
return unet_config
def model_config_from_unet_config(unet_config):
@@ -116,27 +153,73 @@ def model_config_from_unet_config(unet_config):
print("no match", unet_config)
return None
-def model_config_from_unet(state_dict, unet_key_prefix, use_fp16, use_base_if_no_match=False):
- unet_config = detect_unet_config(state_dict, unet_key_prefix, use_fp16)
+def model_config_from_unet(state_dict, unet_key_prefix, dtype, use_base_if_no_match=False):
+ unet_config = detect_unet_config(state_dict, unet_key_prefix, dtype)
model_config = model_config_from_unet_config(unet_config)
if model_config is None and use_base_if_no_match:
return comfy.supported_models_base.BASE(unet_config)
else:
return model_config
-def unet_config_from_diffusers_unet(state_dict, use_fp16):
+def convert_config(unet_config):
+ new_config = unet_config.copy()
+ num_res_blocks = new_config.get("num_res_blocks", None)
+ channel_mult = new_config.get("channel_mult", None)
+
+ if isinstance(num_res_blocks, int):
+ num_res_blocks = len(channel_mult) * [num_res_blocks]
+
+ if "attention_resolutions" in new_config:
+ attention_resolutions = new_config.pop("attention_resolutions")
+ transformer_depth = new_config.get("transformer_depth", None)
+ transformer_depth_middle = new_config.get("transformer_depth_middle", None)
+
+ if isinstance(transformer_depth, int):
+ transformer_depth = len(channel_mult) * [transformer_depth]
+ if transformer_depth_middle is None:
+ transformer_depth_middle = transformer_depth[-1]
+ t_in = []
+ t_out = []
+ s = 1
+ for i in range(len(num_res_blocks)):
+ res = num_res_blocks[i]
+ d = 0
+ if s in attention_resolutions:
+ d = transformer_depth[i]
+
+ t_in += [d] * res
+ t_out += [d] * (res + 1)
+ s *= 2
+ transformer_depth = t_in
+ transformer_depth_output = t_out
+ new_config["transformer_depth"] = t_in
+ new_config["transformer_depth_output"] = t_out
+ new_config["transformer_depth_middle"] = transformer_depth_middle
+
+ new_config["num_res_blocks"] = num_res_blocks
+ return new_config
+
+
+def unet_config_from_diffusers_unet(state_dict, dtype):
match = {}
- attention_resolutions = []
+ transformer_depth = []
attn_res = 1
- for i in range(5):
- k = "down_blocks.{}.attentions.1.transformer_blocks.0.attn2.to_k.weight".format(i)
- if k in state_dict:
- match["context_dim"] = state_dict[k].shape[1]
- attention_resolutions.append(attn_res)
+ down_blocks = count_blocks(state_dict, "down_blocks.{}")
+ for i in range(down_blocks):
+ attn_blocks = count_blocks(state_dict, "down_blocks.{}.attentions.".format(i) + '{}')
+ for ab in range(attn_blocks):
+ transformer_count = count_blocks(state_dict, "down_blocks.{}.attentions.{}.transformer_blocks.".format(i, ab) + '{}')
+ transformer_depth.append(transformer_count)
+ if transformer_count > 0:
+ match["context_dim"] = state_dict["down_blocks.{}.attentions.{}.transformer_blocks.0.attn2.to_k.weight".format(i, ab)].shape[1]
+
attn_res *= 2
+ if attn_blocks == 0:
+ transformer_depth.append(0)
+ transformer_depth.append(0)
- match["attention_resolutions"] = attention_resolutions
+ match["transformer_depth"] = transformer_depth
match["model_channels"] = state_dict["conv_in.weight"].shape[0]
match["in_channels"] = state_dict["conv_in.weight"].shape[1]
@@ -147,51 +230,66 @@ def unet_config_from_diffusers_unet(state_dict, use_fp16):
match["adm_in_channels"] = state_dict["add_embedding.linear_1.weight"].shape[1]
SDXL = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
- 'num_classes': 'sequential', 'adm_in_channels': 2816, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320,
- 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 2, 10], 'channel_mult': [1, 2, 4],
- 'transformer_depth_middle': 10, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64}
+ 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320,
+ 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 10, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10,
+ 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10],
+ 'use_temporal_attention': False, 'use_temporal_resblock': False}
SDXL_refiner = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
- 'num_classes': 'sequential', 'adm_in_channels': 2560, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 384,
- 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 4, 4, 0], 'channel_mult': [1, 2, 4, 4],
- 'transformer_depth_middle': 4, 'use_linear_in_transformer': True, 'context_dim': 1280, "num_head_channels": 64}
+ 'num_classes': 'sequential', 'adm_in_channels': 2560, 'dtype': dtype, 'in_channels': 4, 'model_channels': 384,
+ 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [0, 0, 4, 4, 4, 4, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 4,
+ 'use_linear_in_transformer': True, 'context_dim': 1280, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 4, 4, 4, 4, 4, 4, 0, 0, 0],
+ 'use_temporal_attention': False, 'use_temporal_resblock': False}
SD21 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
- 'adm_in_channels': None, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2,
- 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
- 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024, "num_head_channels": 64}
+ 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2],
+ 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': True,
+ 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
+ 'use_temporal_attention': False, 'use_temporal_resblock': False}
SD21_uncliph = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
- 'num_classes': 'sequential', 'adm_in_channels': 2048, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320,
- 'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
- 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024, "num_head_channels": 64}
+ 'num_classes': 'sequential', 'adm_in_channels': 2048, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320,
+ 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1,
+ 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
+ 'use_temporal_attention': False, 'use_temporal_resblock': False}
SD21_unclipl = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
- 'num_classes': 'sequential', 'adm_in_channels': 1536, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320,
- 'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
- 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024}
+ 'num_classes': 'sequential', 'adm_in_channels': 1536, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320,
+ 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1,
+ 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
+ 'use_temporal_attention': False, 'use_temporal_resblock': False}
- SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
- 'adm_in_channels': None, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2,
- 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
- 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, "num_heads": 8}
+ SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'adm_in_channels': None,
+ 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0],
+ 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, 'num_heads': 8,
+ 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
+ 'use_temporal_attention': False, 'use_temporal_resblock': False}
SDXL_mid_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
- 'num_classes': 'sequential', 'adm_in_channels': 2816, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320,
- 'num_res_blocks': 2, 'attention_resolutions': [4], 'transformer_depth': [0, 0, 1], 'channel_mult': [1, 2, 4],
- 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64}
+ 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320,
+ 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 0, 0, 1, 1], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 1,
+ 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 1, 1, 1],
+ 'use_temporal_attention': False, 'use_temporal_resblock': False}
SDXL_small_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
- 'num_classes': 'sequential', 'adm_in_channels': 2816, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320,
- 'num_res_blocks': 2, 'attention_resolutions': [], 'transformer_depth': [0, 0, 0], 'channel_mult': [1, 2, 4],
- 'transformer_depth_middle': 0, 'use_linear_in_transformer': True, "num_head_channels": 64, 'context_dim': 1}
+ 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320,
+ 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 0, 0, 0, 0], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 0,
+ 'use_linear_in_transformer': True, 'num_head_channels': 64, 'context_dim': 1, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ 'use_temporal_attention': False, 'use_temporal_resblock': False}
SDXL_diffusers_inpaint = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
- 'num_classes': 'sequential', 'adm_in_channels': 2816, 'use_fp16': use_fp16, 'in_channels': 9, 'model_channels': 320,
- 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 2, 10], 'channel_mult': [1, 2, 4],
- 'transformer_depth_middle': 10, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64}
+ 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 9, 'model_channels': 320,
+ 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 10, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10,
+ 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10],
+ 'use_temporal_attention': False, 'use_temporal_resblock': False}
+
+ SSD_1B = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
+ 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320,
+ 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 4, 4], 'transformer_depth_output': [0, 0, 0, 1, 1, 2, 10, 4, 4],
+ 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -1, 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64,
+ 'use_temporal_attention': False, 'use_temporal_resblock': False}
- supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint]
+ supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B]
for unet_config in supported_models:
matches = True
@@ -200,11 +298,11 @@ def unet_config_from_diffusers_unet(state_dict, use_fp16):
matches = False
break
if matches:
- return unet_config
+ return convert_config(unet_config)
return None
-def model_config_from_diffusers_unet(state_dict, use_fp16):
- unet_config = unet_config_from_diffusers_unet(state_dict, use_fp16)
+def model_config_from_diffusers_unet(state_dict, dtype):
+ unet_config = unet_config_from_diffusers_unet(state_dict, dtype)
if unet_config is not None:
return model_config_from_unet_config(unet_config)
return None
diff --git a/comfy/model_management.py b/comfy/model_management.py
index 8b896372687..d4acd8950ca 100644
--- a/comfy/model_management.py
+++ b/comfy/model_management.py
@@ -133,6 +133,10 @@ def get_total_memory(dev=None, torch_total_too=False):
import xformers
import xformers.ops
XFORMERS_IS_AVAILABLE = True
+ try:
+ XFORMERS_IS_AVAILABLE = xformers._has_cpp_library
+ except:
+ pass
try:
XFORMERS_VERSION = xformers.version.__version__
print("xformers version:", XFORMERS_VERSION)
@@ -154,14 +158,18 @@ def is_nvidia():
return True
return False
-ENABLE_PYTORCH_ATTENTION = args.use_pytorch_cross_attention
+ENABLE_PYTORCH_ATTENTION = False
+if args.use_pytorch_cross_attention:
+ ENABLE_PYTORCH_ATTENTION = True
+ XFORMERS_IS_AVAILABLE = False
+
VAE_DTYPE = torch.float32
try:
if is_nvidia():
torch_version = torch.version.__version__
if int(torch_version[0]) >= 2:
- if ENABLE_PYTORCH_ATTENTION == False and XFORMERS_IS_AVAILABLE == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
+ if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
ENABLE_PYTORCH_ATTENTION = True
if torch.cuda.is_bf16_supported():
VAE_DTYPE = torch.bfloat16
@@ -186,7 +194,6 @@ def is_nvidia():
torch.backends.cuda.enable_math_sdp(True)
torch.backends.cuda.enable_flash_sdp(True)
torch.backends.cuda.enable_mem_efficient_sdp(True)
- XFORMERS_IS_AVAILABLE = False
if args.lowvram:
set_vram_to = VRAMState.LOW_VRAM
@@ -336,7 +343,11 @@ def free_memory(memory_required, device, keep_loaded=[]):
if unloaded_model:
soft_empty_cache()
-
+ else:
+ if vram_state != VRAMState.HIGH_VRAM:
+ mem_free_total, mem_free_torch = get_free_memory(device, torch_free_too=True)
+ if mem_free_torch > mem_free_total * 0.25:
+ soft_empty_cache()
def load_models_gpu(models, memory_required=0):
global vram_state
@@ -354,6 +365,8 @@ def load_models_gpu(models, memory_required=0):
current_loaded_models.insert(0, current_loaded_models.pop(index))
models_already_loaded.append(loaded_model)
else:
+ if hasattr(x, "model"):
+ print(f"Requested to load {x.model.__class__.__name__}")
models_to_load.append(loaded_model)
if len(models_to_load) == 0:
@@ -363,7 +376,7 @@ def load_models_gpu(models, memory_required=0):
free_memory(extra_mem, d, models_already_loaded)
return
- print("loading new")
+ print(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}")
total_memory_required = {}
for loaded_model in models_to_load:
@@ -405,7 +418,6 @@ def load_model_gpu(model):
def cleanup_models():
to_delete = []
for i in range(len(current_loaded_models)):
- print(sys.getrefcount(current_loaded_models[i].model))
if sys.getrefcount(current_loaded_models[i].model) <= 2:
to_delete = [i] + to_delete
@@ -444,6 +456,13 @@ def unet_inital_load_device(parameters, dtype):
else:
return cpu_dev
+def unet_dtype(device=None, model_params=0):
+ if args.bf16_unet:
+ return torch.bfloat16
+ if should_use_fp16(device=device, model_params=model_params):
+ return torch.float16
+ return torch.float32
+
def text_encoder_offload_device():
if args.gpu_only:
return get_torch_device()
@@ -463,6 +482,21 @@ def text_encoder_device():
else:
return torch.device("cpu")
+def text_encoder_dtype(device=None):
+ if args.fp8_e4m3fn_text_enc:
+ return torch.float8_e4m3fn
+ elif args.fp8_e5m2_text_enc:
+ return torch.float8_e5m2
+ elif args.fp16_text_enc:
+ return torch.float16
+ elif args.fp32_text_enc:
+ return torch.float32
+
+ if should_use_fp16(device, prioritize_performance=False):
+ return torch.float16
+ else:
+ return torch.float32
+
def vae_device():
return get_torch_device()
@@ -564,27 +598,6 @@ def get_free_memory(dev=None, torch_free_too=False):
else:
return mem_free_total
-def batch_area_memory(area):
- if xformers_enabled() or pytorch_attention_flash_attention():
- #TODO: these formulas are copied from maximum_batch_area below
- return (area / 20) * (1024 * 1024)
- else:
- return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024)
-
-def maximum_batch_area():
- global vram_state
- if vram_state == VRAMState.NO_VRAM:
- return 0
-
- memory_free = get_free_memory() / (1024 * 1024)
- if xformers_enabled() or pytorch_attention_flash_attention():
- #TODO: this needs to be tweaked
- area = 20 * memory_free
- else:
- #TODO: this formula is because AMD sucks and has memory management issues which might be fixed in the future
- area = ((memory_free - 1024) * 0.9) / (0.6)
- return int(max(area, 0))
-
def cpu_mode():
global cpu_state
return cpu_state == CPUState.CPU
@@ -656,7 +669,7 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True):
return False
#FP16 is just broken on these cards
- nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450", "CMP 30HX"]
+ nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450", "CMP 30HX", "T2000", "T1000", "T1200"]
for x in nvidia_16_series:
if x in props.name:
return False
diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py
index ba505221e77..a3cffc3be9d 100644
--- a/comfy/model_patcher.py
+++ b/comfy/model_patcher.py
@@ -6,11 +6,13 @@
import comfy.model_management
class ModelPatcher:
- def __init__(self, model, load_device, offload_device, size=0, current_device=None):
+ def __init__(self, model, load_device, offload_device, size=0, current_device=None, weight_inplace_update=False):
self.size = size
self.model = model
self.patches = {}
self.backup = {}
+ self.object_patches = {}
+ self.object_patches_backup = {}
self.model_options = {"transformer_options":{}}
self.model_size()
self.load_device = load_device
@@ -20,6 +22,8 @@ def __init__(self, model, load_device, offload_device, size=0, current_device=No
else:
self.current_device = current_device
+ self.weight_inplace_update = weight_inplace_update
+
def model_size(self):
if self.size > 0:
return self.size
@@ -33,11 +37,12 @@ def model_size(self):
return size
def clone(self):
- n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size, self.current_device)
+ n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size, self.current_device, weight_inplace_update=self.weight_inplace_update)
n.patches = {}
for k in self.patches:
n.patches[k] = self.patches[k][:]
+ n.object_patches = self.object_patches.copy()
n.model_options = copy.deepcopy(self.model_options)
n.model_keys = self.model_keys
return n
@@ -47,6 +52,9 @@ def is_clone(self, other):
return True
return False
+ def memory_required(self, input_shape):
+ return self.model.memory_required(input_shape=input_shape)
+
def set_model_sampler_cfg_function(self, sampler_cfg_function):
if len(inspect.signature(sampler_cfg_function).parameters) == 3:
self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
@@ -88,9 +96,18 @@ def set_model_attn1_output_patch(self, patch):
def set_model_attn2_output_patch(self, patch):
self.set_model_patch(patch, "attn2_output_patch")
+ def set_model_input_block_patch(self, patch):
+ self.set_model_patch(patch, "input_block_patch")
+
+ def set_model_input_block_patch_after_skip(self, patch):
+ self.set_model_patch(patch, "input_block_patch_after_skip")
+
def set_model_output_block_patch(self, patch):
self.set_model_patch(patch, "output_block_patch")
+ def add_object_patch(self, name, obj):
+ self.object_patches[name] = obj
+
def model_patches_to(self, device):
to = self.model_options["transformer_options"]
if "patches" in to:
@@ -107,6 +124,10 @@ def model_patches_to(self, device):
for k in patch_list:
if hasattr(patch_list[k], "to"):
patch_list[k] = patch_list[k].to(device)
+ if "model_function_wrapper" in self.model_options:
+ wrap_func = self.model_options["model_function_wrapper"]
+ if hasattr(wrap_func, "to"):
+ self.model_options["model_function_wrapper"] = wrap_func.to(device)
def model_dtype(self):
if hasattr(self.model, "get_dtype"):
@@ -124,6 +145,7 @@ def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
return list(p)
def get_key_patches(self, filter_prefix=None):
+ comfy.model_management.unload_model_clones(self)
model_sd = self.model_state_dict()
p = {}
for k in model_sd:
@@ -146,6 +168,12 @@ def model_state_dict(self, filter_prefix=None):
return sd
def patch_model(self, device_to=None):
+ for k in self.object_patches:
+ old = getattr(self.model, k)
+ if k not in self.object_patches_backup:
+ self.object_patches_backup[k] = old
+ setattr(self.model, k, self.object_patches[k])
+
model_sd = self.model_state_dict()
for key in self.patches:
if key not in model_sd:
@@ -154,15 +182,20 @@ def patch_model(self, device_to=None):
weight = model_sd[key]
+ inplace_update = self.weight_inplace_update
+
if key not in self.backup:
- self.backup[key] = weight.to(self.offload_device)
+ self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update)
if device_to is not None:
temp_weight = comfy.model_management.cast_to_device(weight, device_to, torch.float32, copy=True)
else:
temp_weight = weight.to(torch.float32, copy=True)
out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype)
- comfy.utils.set_attr(self.model, key, out_weight)
+ if inplace_update:
+ comfy.utils.copy_to_param(self.model, key, out_weight)
+ else:
+ comfy.utils.set_attr(self.model, key, out_weight)
del temp_weight
if device_to is not None:
@@ -278,11 +311,21 @@ def calculate_weight(self, patches, weight, key):
def unpatch_model(self, device_to=None):
keys = list(self.backup.keys())
- for k in keys:
- comfy.utils.set_attr(self.model, k, self.backup[k])
+ if self.weight_inplace_update:
+ for k in keys:
+ comfy.utils.copy_to_param(self.model, k, self.backup[k])
+ else:
+ for k in keys:
+ comfy.utils.set_attr(self.model, k, self.backup[k])
self.backup = {}
if device_to is not None:
self.model.to(device_to)
self.current_device = device_to
+
+ keys = list(self.object_patches_backup.keys())
+ for k in keys:
+ setattr(self.model, k, self.object_patches_backup[k])
+
+ self.object_patches_backup = {}
diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py
new file mode 100644
index 00000000000..69c8b1f01fc
--- /dev/null
+++ b/comfy/model_sampling.py
@@ -0,0 +1,129 @@
+import torch
+import numpy as np
+from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule
+import math
+
+class EPS:
+ def calculate_input(self, sigma, noise):
+ sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
+ return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
+
+ def calculate_denoised(self, sigma, model_output, model_input):
+ sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
+ return model_input - model_output * sigma
+
+
+class V_PREDICTION(EPS):
+ def calculate_denoised(self, sigma, model_output, model_input):
+ sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
+ return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
+
+
+class ModelSamplingDiscrete(torch.nn.Module):
+ def __init__(self, model_config=None):
+ super().__init__()
+ beta_schedule = "linear"
+ if model_config is not None:
+ beta_schedule = model_config.sampling_settings.get("beta_schedule", beta_schedule)
+ self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3)
+ self.sigma_data = 1.0
+
+ def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
+ if given_betas is not None:
+ betas = given_betas
+ else:
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
+ alphas = 1. - betas
+ alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
+ # alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
+
+ timesteps, = betas.shape
+ self.num_timesteps = int(timesteps)
+ self.linear_start = linear_start
+ self.linear_end = linear_end
+
+ # self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
+ # self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
+ # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
+
+ sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
+ self.set_sigmas(sigmas)
+
+ def set_sigmas(self, sigmas):
+ self.register_buffer('sigmas', sigmas)
+ self.register_buffer('log_sigmas', sigmas.log())
+
+ @property
+ def sigma_min(self):
+ return self.sigmas[0]
+
+ @property
+ def sigma_max(self):
+ return self.sigmas[-1]
+
+ def timestep(self, sigma):
+ log_sigma = sigma.log()
+ dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
+ return dists.abs().argmin(dim=0).view(sigma.shape).to(sigma.device)
+
+ def sigma(self, timestep):
+ t = torch.clamp(timestep.float().to(self.log_sigmas.device), min=0, max=(len(self.sigmas) - 1))
+ low_idx = t.floor().long()
+ high_idx = t.ceil().long()
+ w = t.frac()
+ log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
+ return log_sigma.exp().to(timestep.device)
+
+ def percent_to_sigma(self, percent):
+ if percent <= 0.0:
+ return 999999999.9
+ if percent >= 1.0:
+ return 0.0
+ percent = 1.0 - percent
+ return self.sigma(torch.tensor(percent * 999.0)).item()
+
+
+class ModelSamplingContinuousEDM(torch.nn.Module):
+ def __init__(self, model_config=None):
+ super().__init__()
+ self.sigma_data = 1.0
+
+ if model_config is not None:
+ sampling_settings = model_config.sampling_settings
+ else:
+ sampling_settings = {}
+
+ sigma_min = sampling_settings.get("sigma_min", 0.002)
+ sigma_max = sampling_settings.get("sigma_max", 120.0)
+ self.set_sigma_range(sigma_min, sigma_max)
+
+ def set_sigma_range(self, sigma_min, sigma_max):
+ sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), 1000).exp()
+
+ self.register_buffer('sigmas', sigmas) #for compatibility with some schedulers
+ self.register_buffer('log_sigmas', sigmas.log())
+
+ @property
+ def sigma_min(self):
+ return self.sigmas[0]
+
+ @property
+ def sigma_max(self):
+ return self.sigmas[-1]
+
+ def timestep(self, sigma):
+ return 0.25 * sigma.log()
+
+ def sigma(self, timestep):
+ return (timestep / 0.25).exp()
+
+ def percent_to_sigma(self, percent):
+ if percent <= 0.0:
+ return 999999999.9
+ if percent >= 1.0:
+ return 0.0
+ percent = 1.0 - percent
+
+ log_sigma_min = math.log(self.sigma_min)
+ return math.exp((math.log(self.sigma_max) - log_sigma_min) * percent + log_sigma_min)
diff --git a/comfy/ops.py b/comfy/ops.py
index 610d54584fa..0bfb698aa7f 100644
--- a/comfy/ops.py
+++ b/comfy/ops.py
@@ -1,29 +1,23 @@
import torch
from contextlib import contextmanager
-class Linear(torch.nn.Module):
- def __init__(self, in_features: int, out_features: int, bias: bool = True,
- device=None, dtype=None) -> None:
- factory_kwargs = {'device': device, 'dtype': dtype}
- super().__init__()
- self.in_features = in_features
- self.out_features = out_features
- self.weight = torch.nn.Parameter(torch.empty((out_features, in_features), **factory_kwargs))
- if bias:
- self.bias = torch.nn.Parameter(torch.empty(out_features, **factory_kwargs))
- else:
- self.register_parameter('bias', None)
-
- def forward(self, input):
- return torch.nn.functional.linear(input, self.weight, self.bias)
+class Linear(torch.nn.Linear):
+ def reset_parameters(self):
+ return None
class Conv2d(torch.nn.Conv2d):
def reset_parameters(self):
return None
+class Conv3d(torch.nn.Conv3d):
+ def reset_parameters(self):
+ return None
+
def conv_nd(dims, *args, **kwargs):
if dims == 2:
return Conv2d(*args, **kwargs)
+ elif dims == 3:
+ return Conv3d(*args, **kwargs)
else:
raise ValueError(f"unsupported dimensions: {dims}")
diff --git a/comfy/sample.py b/comfy/sample.py
index e4730b189ad..034db97ee88 100644
--- a/comfy/sample.py
+++ b/comfy/sample.py
@@ -1,6 +1,7 @@
import torch
import comfy.model_management
import comfy.samplers
+import comfy.conds
import comfy.utils
import math
import numpy as np
@@ -33,22 +34,24 @@ def prepare_mask(noise_mask, shape, device):
noise_mask = noise_mask.to(device)
return noise_mask
-def broadcast_cond(cond, batch, device):
- """broadcasts conditioning to the batch size"""
- copy = []
- for p in cond:
- t = comfy.utils.repeat_to_batch_size(p[0], batch)
- t = t.to(device)
- copy += [[t] + p[1:]]
- return copy
-
def get_models_from_cond(cond, model_type):
models = []
for c in cond:
- if model_type in c[1]:
- models += [c[1][model_type]]
+ if model_type in c:
+ models += [c[model_type]]
return models
+def convert_cond(cond):
+ out = []
+ for c in cond:
+ temp = c[1].copy()
+ model_conds = temp.get("model_conds", {})
+ if c[0] is not None:
+ model_conds["c_crossattn"] = comfy.conds.CONDCrossAttn(c[0])
+ temp["model_conds"] = model_conds
+ out.append(temp)
+ return out
+
def get_additional_models(positive, negative, dtype):
"""loads additional models in positive and negative conditioning"""
control_nets = set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control"))
@@ -70,28 +73,46 @@ def cleanup_additional_models(models):
if hasattr(m, 'cleanup'):
m.cleanup()
-def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None):
- device = comfy.model_management.get_torch_device()
+def prepare_sampling(model, noise_shape, positive, negative, noise_mask):
+ device = model.load_device
+ positive = convert_cond(positive)
+ negative = convert_cond(negative)
if noise_mask is not None:
- noise_mask = prepare_mask(noise_mask, noise.shape, device)
+ noise_mask = prepare_mask(noise_mask, noise_shape, device)
real_model = None
models, inference_memory = get_additional_models(positive, negative, model.model_dtype())
- comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(noise.shape[0] * noise.shape[2] * noise.shape[3]) + inference_memory)
+ comfy.model_management.load_models_gpu([model] + models, model.memory_required([noise_shape[0] * 2] + list(noise_shape[1:])) + inference_memory)
real_model = model.model
- noise = noise.to(device)
- latent_image = latent_image.to(device)
+ return real_model, positive, negative, noise_mask, models
+
- positive_copy = broadcast_cond(positive, noise.shape[0], device)
- negative_copy = broadcast_cond(negative, noise.shape[0], device)
+def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None):
+ real_model, positive_copy, negative_copy, noise_mask, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask)
+ noise = noise.to(model.load_device)
+ latent_image = latent_image.to(model.load_device)
- sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
+ sampler = comfy.samplers.KSampler(real_model, steps=steps, device=model.load_device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
samples = samples.cpu()
cleanup_additional_models(models)
+ cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control")))
return samples
+
+def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=None, callback=None, disable_pbar=False, seed=None):
+ real_model, positive_copy, negative_copy, noise_mask, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask)
+ noise = noise.to(model.load_device)
+ latent_image = latent_image.to(model.load_device)
+ sigmas = sigmas.to(model.load_device)
+
+ samples = comfy.samplers.sample(real_model, noise, positive_copy, negative_copy, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
+ samples = samples.cpu()
+ cleanup_additional_models(models)
+ cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control")))
+ return samples
+
diff --git a/comfy/samplers.py b/comfy/samplers.py
index e3192ca58f4..1d012a514a7 100644
--- a/comfy/samplers.py
+++ b/comfy/samplers.py
@@ -1,48 +1,42 @@
from .k_diffusion import sampling as k_diffusion_sampling
-from .k_diffusion import external as k_diffusion_external
from .extra_samplers import uni_pc
import torch
+import enum
from comfy import model_management
-from .ldm.models.diffusion.ddim import DDIMSampler
-from .ldm.modules.diffusionmodules.util import make_ddim_timesteps
import math
from comfy import model_base
import comfy.utils
+import comfy.conds
-def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9)
- return abs(a*b) // math.gcd(a, b)
#The main sampling function shared by all the samplers
-#Returns predicted noise
-def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, cond_concat=None, model_options={}, seed=None):
- def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in):
+#Returns denoised
+def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None):
+ def get_area_and_mult(conds, x_in, timestep_in):
area = (x_in.shape[2], x_in.shape[3], 0, 0)
strength = 1.0
- if 'timestep_start' in cond[1]:
- timestep_start = cond[1]['timestep_start']
+
+ if 'timestep_start' in conds:
+ timestep_start = conds['timestep_start']
if timestep_in[0] > timestep_start:
return None
- if 'timestep_end' in cond[1]:
- timestep_end = cond[1]['timestep_end']
+ if 'timestep_end' in conds:
+ timestep_end = conds['timestep_end']
if timestep_in[0] < timestep_end:
return None
- if 'area' in cond[1]:
- area = cond[1]['area']
- if 'strength' in cond[1]:
- strength = cond[1]['strength']
-
- adm_cond = None
- if 'adm_encoded' in cond[1]:
- adm_cond = cond[1]['adm_encoded']
+ if 'area' in conds:
+ area = conds['area']
+ if 'strength' in conds:
+ strength = conds['strength']
input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
- if 'mask' in cond[1]:
+ if 'mask' in conds:
# Scale the mask to the size of the input
# The mask should have been resized as we began the sampling process
mask_strength = 1.0
- if "mask_strength" in cond[1]:
- mask_strength = cond[1]["mask_strength"]
- mask = cond[1]['mask']
+ if "mask_strength" in conds:
+ mask_strength = conds["mask_strength"]
+ mask = conds['mask']
assert(mask.shape[1] == x_in.shape[2])
assert(mask.shape[2] == x_in.shape[3])
mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] * mask_strength
@@ -51,7 +45,7 @@ def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in):
mask = torch.ones_like(input_x)
mult = mask * strength
- if 'mask' not in cond[1]:
+ if 'mask' not in conds:
rr = 8
if area[2] != 0:
for t in range(rr):
@@ -67,24 +61,17 @@ def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in):
mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1))
conditionning = {}
- conditionning['c_crossattn'] = cond[0]
- if cond_concat_in is not None and len(cond_concat_in) > 0:
- cropped = []
- for x in cond_concat_in:
- cr = x[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
- cropped.append(cr)
- conditionning['c_concat'] = torch.cat(cropped, dim=1)
-
- if adm_cond is not None:
- conditionning['c_adm'] = adm_cond
+ model_conds = conds["model_conds"]
+ for c in model_conds:
+ conditionning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], device=x_in.device, area=area)
control = None
- if 'control' in cond[1]:
- control = cond[1]['control']
+ if 'control' in conds:
+ control = conds['control']
patches = None
- if 'gligen' in cond[1]:
- gligen = cond[1]['gligen']
+ if 'gligen' in conds:
+ gligen = conds['gligen']
patches = {}
gligen_type = gligen[0]
gligen_model = gligen[1]
@@ -102,22 +89,8 @@ def cond_equal_size(c1, c2):
return True
if c1.keys() != c2.keys():
return False
- if 'c_crossattn' in c1:
- s1 = c1['c_crossattn'].shape
- s2 = c2['c_crossattn'].shape
- if s1 != s2:
- if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen
- return False
-
- mult_min = lcm(s1[1], s2[1])
- diff = mult_min // min(s1[1], s2[1])
- if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much
- return False
- if 'c_concat' in c1:
- if c1['c_concat'].shape != c2['c_concat'].shape:
- return False
- if 'c_adm' in c1:
- if c1['c_adm'].shape != c2['c_adm'].shape:
+ for k in c1:
+ if not c1[k].can_concat(c2[k]):
return False
return True
@@ -146,53 +119,41 @@ def cond_cat(c_list):
c_concat = []
c_adm = []
crossattn_max_len = 0
+
+ temp = {}
for x in c_list:
- if 'c_crossattn' in x:
- c = x['c_crossattn']
- if crossattn_max_len == 0:
- crossattn_max_len = c.shape[1]
- else:
- crossattn_max_len = lcm(crossattn_max_len, c.shape[1])
- c_crossattn.append(c)
- if 'c_concat' in x:
- c_concat.append(x['c_concat'])
- if 'c_adm' in x:
- c_adm.append(x['c_adm'])
+ for k in x:
+ cur = temp.get(k, [])
+ cur.append(x[k])
+ temp[k] = cur
+
out = {}
- c_crossattn_out = []
- for c in c_crossattn:
- if c.shape[1] < crossattn_max_len:
- c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result
- c_crossattn_out.append(c)
-
- if len(c_crossattn_out) > 0:
- out['c_crossattn'] = torch.cat(c_crossattn_out)
- if len(c_concat) > 0:
- out['c_concat'] = torch.cat(c_concat)
- if len(c_adm) > 0:
- out['c_adm'] = torch.cat(c_adm)
+ for k in temp:
+ conds = temp[k]
+ out[k] = conds[0].concat(conds[1:])
+
return out
- def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, cond_concat_in, model_options):
+ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options):
out_cond = torch.zeros_like(x_in)
- out_count = torch.ones_like(x_in)/100000.0
+ out_count = torch.ones_like(x_in) * 1e-37
out_uncond = torch.zeros_like(x_in)
- out_uncond_count = torch.ones_like(x_in)/100000.0
+ out_uncond_count = torch.ones_like(x_in) * 1e-37
COND = 0
UNCOND = 1
to_run = []
for x in cond:
- p = get_area_and_mult(x, x_in, cond_concat_in, timestep)
+ p = get_area_and_mult(x, x_in, timestep)
if p is None:
continue
to_run += [(p, COND)]
if uncond is not None:
for x in uncond:
- p = get_area_and_mult(x, x_in, cond_concat_in, timestep)
+ p = get_area_and_mult(x, x_in, timestep)
if p is None:
continue
@@ -209,9 +170,11 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot
to_batch_temp.reverse()
to_batch = to_batch_temp[:1]
+ free_memory = model_management.get_free_memory(x_in.device)
for i in range(1, len(to_batch_temp) + 1):
batch_amount = to_batch_temp[:len(to_batch_temp)//i]
- if (len(batch_amount) * first_shape[0] * first_shape[2] * first_shape[3] < max_total_area):
+ input_shape = [len(batch_amount) * first_shape[0]] + list(first_shape)[1:]
+ if model.memory_required(input_shape) < free_memory:
to_batch = batch_amount
break
@@ -257,12 +220,14 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot
transformer_options["patches"] = patches
transformer_options["cond_or_uncond"] = cond_or_uncond[:]
+ transformer_options["sigmas"] = timestep
+
c['transformer_options'] = transformer_options
if 'model_function_wrapper' in model_options:
- output = model_options['model_function_wrapper'](model_function, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks)
+ output = model_options['model_function_wrapper'](model.apply_model, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks)
else:
- output = model_function(input_x, timestep_, **c).chunk(batch_chunks)
+ output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks)
del input_x
for o in range(batch_chunks):
@@ -278,49 +243,38 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot
del out_count
out_uncond /= out_uncond_count
del out_uncond_count
-
return out_cond, out_uncond
- max_total_area = model_management.maximum_batch_area()
if math.isclose(cond_scale, 1.0):
uncond = None
- cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
+ cond, uncond = calc_cond_uncond_batch(model, cond, uncond, x, timestep, model_options)
if "sampler_cfg_function" in model_options:
- args = {"cond": cond, "uncond": uncond, "cond_scale": cond_scale, "timestep": timestep}
- return model_options["sampler_cfg_function"](args)
+ args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep}
+ return x - model_options["sampler_cfg_function"](args)
else:
return uncond + (cond - uncond) * cond_scale
-
-class CompVisVDenoiser(k_diffusion_external.DiscreteVDDPMDenoiser):
- def __init__(self, model, quantize=False, device='cpu'):
- super().__init__(model, model.alphas_cumprod, quantize=quantize)
-
- def get_v(self, x, t, cond, **kwargs):
- return self.inner_model.apply_model(x, t, cond, **kwargs)
-
-
class CFGNoisePredictor(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
- self.alphas_cumprod = model.alphas_cumprod
- def apply_model(self, x, timestep, cond, uncond, cond_scale, cond_concat=None, model_options={}, seed=None):
- out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, cond_concat, model_options=model_options, seed=seed)
+ def apply_model(self, x, timestep, cond, uncond, cond_scale, model_options={}, seed=None):
+ out = sampling_function(self.inner_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed)
return out
-
+ def forward(self, *args, **kwargs):
+ return self.apply_model(*args, **kwargs)
class KSamplerX0Inpaint(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
- def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, cond_concat=None, model_options={}, seed=None):
+ def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, model_options={}, seed=None):
if denoise_mask is not None:
latent_mask = 1. - denoise_mask
x = x * denoise_mask + (self.latent_image + self.noise * sigma.reshape([sigma.shape[0]] + [1] * (len(self.noise.shape) - 1))) * latent_mask
- out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, cond_concat=cond_concat, model_options=model_options, seed=seed)
+ out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, model_options=model_options, seed=seed)
if denoise_mask is not None:
out *= denoise_mask
@@ -329,44 +283,43 @@ def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, cond_concat=
return out
def simple_scheduler(model, steps):
+ s = model.model_sampling
sigs = []
- ss = len(model.sigmas) / steps
+ ss = len(s.sigmas) / steps
for x in range(steps):
- sigs += [float(model.sigmas[-(1 + int(x * ss))])]
+ sigs += [float(s.sigmas[-(1 + int(x * ss))])]
sigs += [0.0]
return torch.FloatTensor(sigs)
def ddim_scheduler(model, steps):
+ s = model.model_sampling
sigs = []
- ddim_timesteps = make_ddim_timesteps(ddim_discr_method="uniform", num_ddim_timesteps=steps, num_ddpm_timesteps=model.inner_model.inner_model.num_timesteps, verbose=False)
- for x in range(len(ddim_timesteps) - 1, -1, -1):
- ts = ddim_timesteps[x]
- if ts > 999:
- ts = 999
- sigs.append(model.t_to_sigma(torch.tensor(ts)))
+ ss = len(s.sigmas) // steps
+ x = 1
+ while x < len(s.sigmas):
+ sigs += [float(s.sigmas[x])]
+ x += ss
+ sigs = sigs[::-1]
sigs += [0.0]
return torch.FloatTensor(sigs)
-def sgm_scheduler(model, steps):
+def normal_scheduler(model, steps, sgm=False, floor=False):
+ s = model.model_sampling
+ start = s.timestep(s.sigma_max)
+ end = s.timestep(s.sigma_min)
+
+ if sgm:
+ timesteps = torch.linspace(start, end, steps + 1)[:-1]
+ else:
+ timesteps = torch.linspace(start, end, steps)
+
sigs = []
- timesteps = torch.linspace(model.inner_model.inner_model.num_timesteps - 1, 0, steps + 1)[:-1].type(torch.int)
for x in range(len(timesteps)):
ts = timesteps[x]
- if ts > 999:
- ts = 999
- sigs.append(model.t_to_sigma(torch.tensor(ts)))
+ sigs.append(s.sigma(ts))
sigs += [0.0]
return torch.FloatTensor(sigs)
-def blank_inpaint_image_like(latent_image):
- blank_image = torch.ones_like(latent_image)
- # these are the values for "zero" in pixel space translated to latent space
- blank_image[:,0] *= 0.8223
- blank_image[:,1] *= -0.6876
- blank_image[:,2] *= 0.6364
- blank_image[:,3] *= 0.1380
- return blank_image
-
def get_mask_aabb(masks):
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device, dtype=torch.int)
@@ -395,19 +348,19 @@ def resolve_areas_and_cond_masks(conditions, h, w, device):
# While we're doing this, we can also resolve the mask device and scaling for performance reasons
for i in range(len(conditions)):
c = conditions[i]
- if 'area' in c[1]:
- area = c[1]['area']
+ if 'area' in c:
+ area = c['area']
if area[0] == "percentage":
- modified = c[1].copy()
+ modified = c.copy()
area = (max(1, round(area[1] * h)), max(1, round(area[2] * w)), round(area[3] * h), round(area[4] * w))
modified['area'] = area
- c = [c[0], modified]
+ c = modified
conditions[i] = c
- if 'mask' in c[1]:
- mask = c[1]['mask']
+ if 'mask' in c:
+ mask = c['mask']
mask = mask.to(device=device)
- modified = c[1].copy()
+ modified = c.copy()
if len(mask.shape) == 2:
mask = mask.unsqueeze(0)
if mask.shape[1] != h or mask.shape[2] != w:
@@ -428,66 +381,70 @@ def resolve_areas_and_cond_masks(conditions, h, w, device):
modified['area'] = area
modified['mask'] = mask
- conditions[i] = [c[0], modified]
+ conditions[i] = modified
def create_cond_with_same_area_if_none(conds, c):
- if 'area' not in c[1]:
+ if 'area' not in c:
return
- c_area = c[1]['area']
+ c_area = c['area']
smallest = None
for x in conds:
- if 'area' in x[1]:
- a = x[1]['area']
+ if 'area' in x:
+ a = x['area']
if c_area[2] >= a[2] and c_area[3] >= a[3]:
if a[0] + a[2] >= c_area[0] + c_area[2]:
if a[1] + a[3] >= c_area[1] + c_area[3]:
if smallest is None:
smallest = x
- elif 'area' not in smallest[1]:
+ elif 'area' not in smallest:
smallest = x
else:
- if smallest[1]['area'][0] * smallest[1]['area'][1] > a[0] * a[1]:
+ if smallest['area'][0] * smallest['area'][1] > a[0] * a[1]:
smallest = x
else:
if smallest is None:
smallest = x
if smallest is None:
return
- if 'area' in smallest[1]:
- if smallest[1]['area'] == c_area:
+ if 'area' in smallest:
+ if smallest['area'] == c_area:
return
- n = c[1].copy()
- conds += [[smallest[0], n]]
+
+ out = c.copy()
+ out['model_conds'] = smallest['model_conds'].copy() #TODO: which fields should be copied?
+ conds += [out]
def calculate_start_end_timesteps(model, conds):
+ s = model.model_sampling
for t in range(len(conds)):
x = conds[t]
timestep_start = None
timestep_end = None
- if 'start_percent' in x[1]:
- timestep_start = model.sigma_to_t(model.t_to_sigma(torch.tensor(x[1]['start_percent'] * 999.0)))
- if 'end_percent' in x[1]:
- timestep_end = model.sigma_to_t(model.t_to_sigma(torch.tensor(x[1]['end_percent'] * 999.0)))
+ if 'start_percent' in x:
+ timestep_start = s.percent_to_sigma(x['start_percent'])
+ if 'end_percent' in x:
+ timestep_end = s.percent_to_sigma(x['end_percent'])
if (timestep_start is not None) or (timestep_end is not None):
- n = x[1].copy()
+ n = x.copy()
if (timestep_start is not None):
n['timestep_start'] = timestep_start
if (timestep_end is not None):
n['timestep_end'] = timestep_end
- conds[t] = [x[0], n]
+ conds[t] = n
def pre_run_control(model, conds):
+ s = model.model_sampling
for t in range(len(conds)):
x = conds[t]
timestep_start = None
timestep_end = None
- percent_to_timestep_function = lambda a: model.sigma_to_t(model.t_to_sigma(torch.tensor(a) * 999.0))
- if 'control' in x[1]:
- x[1]['control'].pre_run(model.inner_model.inner_model, percent_to_timestep_function)
+ percent_to_timestep_function = lambda a: s.percent_to_sigma(a)
+ if 'control' in x:
+ x['control'].pre_run(model, percent_to_timestep_function)
def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func):
cond_cnets = []
@@ -496,16 +453,16 @@ def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func):
uncond_other = []
for t in range(len(conds)):
x = conds[t]
- if 'area' not in x[1]:
- if name in x[1] and x[1][name] is not None:
- cond_cnets.append(x[1][name])
+ if 'area' not in x:
+ if name in x and x[name] is not None:
+ cond_cnets.append(x[name])
else:
cond_other.append((x, t))
for t in range(len(uncond)):
x = uncond[t]
- if 'area' not in x[1]:
- if name in x[1] and x[1][name] is not None:
- uncond_cnets.append(x[1][name])
+ if 'area' not in x:
+ if name in x and x[name] is not None:
+ uncond_cnets.append(x[name])
else:
uncond_other.append((x, t))
@@ -515,50 +472,188 @@ def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func):
for x in range(len(cond_cnets)):
temp = uncond_other[x % len(uncond_other)]
o = temp[0]
- if name in o[1] and o[1][name] is not None:
- n = o[1].copy()
+ if name in o and o[name] is not None:
+ n = o.copy()
n[name] = uncond_fill_func(cond_cnets, x)
- uncond += [[o[0], n]]
+ uncond += [n]
else:
- n = o[1].copy()
+ n = o.copy()
n[name] = uncond_fill_func(cond_cnets, x)
- uncond[temp[1]] = [o[0], n]
+ uncond[temp[1]] = n
-def encode_adm(model, conds, batch_size, width, height, device, prompt_type):
+def encode_model_conds(model_function, conds, noise, device, prompt_type, **kwargs):
for t in range(len(conds)):
x = conds[t]
- adm_out = None
- if 'adm' in x[1]:
- adm_out = x[1]["adm"]
+ params = x.copy()
+ params["device"] = device
+ params["noise"] = noise
+ params["width"] = params.get("width", noise.shape[3] * 8)
+ params["height"] = params.get("height", noise.shape[2] * 8)
+ params["prompt_type"] = params.get("prompt_type", prompt_type)
+ for k in kwargs:
+ if k not in params:
+ params[k] = kwargs[k]
+
+ out = model_function(**params)
+ x = x.copy()
+ model_conds = x['model_conds'].copy()
+ for k in out:
+ model_conds[k] = out[k]
+ x['model_conds'] = model_conds
+ conds[t] = x
+ return conds
+
+class Sampler:
+ def sample(self):
+ pass
+
+ def max_denoise(self, model_wrap, sigmas):
+ max_sigma = float(model_wrap.inner_model.model_sampling.sigma_max)
+ sigma = float(sigmas[0])
+ return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma
+
+class UNIPC(Sampler):
+ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False):
+ return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar)
+
+class UNIPCBH2(Sampler):
+ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False):
+ return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar)
+
+KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
+ "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
+ "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"]
+
+class KSAMPLER(Sampler):
+ def __init__(self, sampler_function, extra_options={}, inpaint_options={}):
+ self.sampler_function = sampler_function
+ self.extra_options = extra_options
+ self.inpaint_options = inpaint_options
+
+ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False):
+ extra_args["denoise_mask"] = denoise_mask
+ model_k = KSamplerX0Inpaint(model_wrap)
+ model_k.latent_image = latent_image
+ if self.inpaint_options.get("random", False): #TODO: Should this be the default?
+ generator = torch.manual_seed(extra_args.get("seed", 41) + 1)
+ model_k.noise = torch.randn(noise.shape, generator=generator, device="cpu").to(noise.dtype).to(noise.device)
else:
- params = x[1].copy()
- params["width"] = params.get("width", width * 8)
- params["height"] = params.get("height", height * 8)
- params["prompt_type"] = params.get("prompt_type", prompt_type)
- adm_out = model.encode_adm(device=device, **params)
+ model_k.noise = noise
- if adm_out is not None:
- x[1] = x[1].copy()
- x[1]["adm_encoded"] = comfy.utils.repeat_to_batch_size(adm_out, batch_size).to(device)
+ if self.max_denoise(model_wrap, sigmas):
+ noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0)
+ else:
+ noise = noise * sigmas[0]
+
+ k_callback = None
+ total_steps = len(sigmas) - 1
+ if callback is not None:
+ k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps)
+
+ if latent_image is not None:
+ noise += latent_image
+
+ samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options)
+ return samples
- return conds
+def ksampler(sampler_name, extra_options={}, inpaint_options={}):
+ if sampler_name == "dpm_fast":
+ def dpm_fast_function(model, noise, sigmas, extra_args, callback, disable):
+ sigma_min = sigmas[-1]
+ if sigma_min == 0:
+ sigma_min = sigmas[-2]
+ total_steps = len(sigmas) - 1
+ return k_diffusion_sampling.sample_dpm_fast(model, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=callback, disable=disable)
+ sampler_function = dpm_fast_function
+ elif sampler_name == "dpm_adaptive":
+ def dpm_adaptive_function(model, noise, sigmas, extra_args, callback, disable):
+ sigma_min = sigmas[-1]
+ if sigma_min == 0:
+ sigma_min = sigmas[-2]
+ return k_diffusion_sampling.sample_dpm_adaptive(model, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=callback, disable=disable)
+ sampler_function = dpm_adaptive_function
+ else:
+ sampler_function = getattr(k_diffusion_sampling, "sample_{}".format(sampler_name))
+
+ return KSAMPLER(sampler_function, extra_options, inpaint_options)
+
+def wrap_model(model):
+ model_denoise = CFGNoisePredictor(model)
+ return model_denoise
+
+def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model_options={}, latent_image=None, denoise_mask=None, callback=None, disable_pbar=False, seed=None):
+ positive = positive[:]
+ negative = negative[:]
+
+ resolve_areas_and_cond_masks(positive, noise.shape[2], noise.shape[3], device)
+ resolve_areas_and_cond_masks(negative, noise.shape[2], noise.shape[3], device)
+
+ model_wrap = wrap_model(model)
+
+ calculate_start_end_timesteps(model, negative)
+ calculate_start_end_timesteps(model, positive)
+
+ #make sure each cond area has an opposite one with the same area
+ for c in positive:
+ create_cond_with_same_area_if_none(negative, c)
+ for c in negative:
+ create_cond_with_same_area_if_none(positive, c)
+
+ pre_run_control(model, negative + positive)
+
+ apply_empty_x_to_equal_area(list(filter(lambda c: c.get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x])
+ apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x])
+
+ if latent_image is not None:
+ latent_image = model.process_latent_in(latent_image)
+
+ if hasattr(model, 'extra_conds'):
+ positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask)
+ negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask)
+
+ extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed}
+
+ samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
+ return model.process_latent_out(samples.to(torch.float32))
+
+SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform"]
+SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"]
+
+def calculate_sigmas_scheduler(model, scheduler_name, steps):
+ if scheduler_name == "karras":
+ sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=float(model.model_sampling.sigma_min), sigma_max=float(model.model_sampling.sigma_max))
+ elif scheduler_name == "exponential":
+ sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=float(model.model_sampling.sigma_min), sigma_max=float(model.model_sampling.sigma_max))
+ elif scheduler_name == "normal":
+ sigmas = normal_scheduler(model, steps)
+ elif scheduler_name == "simple":
+ sigmas = simple_scheduler(model, steps)
+ elif scheduler_name == "ddim_uniform":
+ sigmas = ddim_scheduler(model, steps)
+ elif scheduler_name == "sgm_uniform":
+ sigmas = normal_scheduler(model, steps, sgm=True)
+ else:
+ print("error invalid scheduler", self.scheduler)
+ return sigmas
+
+def sampler_object(name):
+ if name == "uni_pc":
+ sampler = UNIPC()
+ elif name == "uni_pc_bh2":
+ sampler = UNIPCBH2()
+ elif name == "ddim":
+ sampler = ksampler("euler", inpaint_options={"random": True})
+ else:
+ sampler = ksampler(name)
+ return sampler
class KSampler:
- SCHEDULERS = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform"]
- SAMPLERS = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral",
- "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
- "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "ddim", "uni_pc", "uni_pc_bh2"]
+ SCHEDULERS = SCHEDULER_NAMES
+ SAMPLERS = SAMPLER_NAMES
def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=None, model_options={}):
self.model = model
- self.model_denoise = CFGNoisePredictor(self.model)
- if self.model.model_type == model_base.ModelType.V_PREDICTION:
- self.model_wrap = CompVisVDenoiser(self.model_denoise, quantize=True)
- else:
- self.model_wrap = k_diffusion_external.CompVisDenoiser(self.model_denoise, quantize=True)
-
- self.model_k = KSamplerX0Inpaint(self.model_wrap)
self.device = device
if scheduler not in self.SCHEDULERS:
scheduler = self.SCHEDULERS[0]
@@ -566,8 +661,6 @@ def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=N
sampler = self.SAMPLERS[0]
self.scheduler = scheduler
self.sampler = sampler
- self.sigma_min=float(self.model_wrap.sigma_min)
- self.sigma_max=float(self.model_wrap.sigma_max)
self.set_steps(steps, denoise)
self.denoise = denoise
self.model_options = model_options
@@ -576,24 +669,11 @@ def calculate_sigmas(self, steps):
sigmas = None
discard_penultimate_sigma = False
- if self.sampler in ['dpm_2', 'dpm_2_ancestral']:
+ if self.sampler in ['dpm_2', 'dpm_2_ancestral', 'uni_pc', 'uni_pc_bh2']:
steps += 1
discard_penultimate_sigma = True
- if self.scheduler == "karras":
- sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max)
- elif self.scheduler == "exponential":
- sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max)
- elif self.scheduler == "normal":
- sigmas = self.model_wrap.get_sigmas(steps)
- elif self.scheduler == "simple":
- sigmas = simple_scheduler(self.model_wrap, steps)
- elif self.scheduler == "ddim_uniform":
- sigmas = ddim_scheduler(self.model_wrap, steps)
- elif self.scheduler == "sgm_uniform":
- sigmas = sgm_scheduler(self.model_wrap, steps)
- else:
- print("error invalid scheduler", self.scheduler)
+ sigmas = calculate_sigmas_scheduler(self.model, self.scheduler, steps)
if discard_penultimate_sigma:
sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
@@ -611,10 +691,8 @@ def set_steps(self, steps, denoise=None):
def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None):
if sigmas is None:
sigmas = self.sigmas
- sigma_min = self.sigma_min
if last_step is not None and last_step < (len(sigmas) - 1):
- sigma_min = sigmas[last_step]
sigmas = sigmas[:last_step + 1]
if force_full_denoise:
sigmas[-1] = 0
@@ -628,117 +706,6 @@ def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=N
else:
return torch.zeros_like(noise)
- positive = positive[:]
- negative = negative[:]
-
- resolve_areas_and_cond_masks(positive, noise.shape[2], noise.shape[3], self.device)
- resolve_areas_and_cond_masks(negative, noise.shape[2], noise.shape[3], self.device)
-
- calculate_start_end_timesteps(self.model_wrap, negative)
- calculate_start_end_timesteps(self.model_wrap, positive)
-
- #make sure each cond area has an opposite one with the same area
- for c in positive:
- create_cond_with_same_area_if_none(negative, c)
- for c in negative:
- create_cond_with_same_area_if_none(positive, c)
-
- pre_run_control(self.model_wrap, negative + positive)
-
- apply_empty_x_to_equal_area(list(filter(lambda c: c[1].get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x])
- apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x])
-
- if self.model.is_adm():
- positive = encode_adm(self.model, positive, noise.shape[0], noise.shape[3], noise.shape[2], self.device, "positive")
- negative = encode_adm(self.model, negative, noise.shape[0], noise.shape[3], noise.shape[2], self.device, "negative")
-
- if latent_image is not None:
- latent_image = self.model.process_latent_in(latent_image)
-
- extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": self.model_options, "seed":seed}
-
- cond_concat = None
- if hasattr(self.model, 'concat_keys'): #inpaint
- cond_concat = []
- for ck in self.model.concat_keys:
- if denoise_mask is not None:
- if ck == "mask":
- cond_concat.append(denoise_mask[:,:1])
- elif ck == "masked_image":
- cond_concat.append(latent_image) #NOTE: the latent_image should be masked by the mask in pixel space
- else:
- if ck == "mask":
- cond_concat.append(torch.ones_like(noise)[:,:1])
- elif ck == "masked_image":
- cond_concat.append(blank_inpaint_image_like(noise))
- extra_args["cond_concat"] = cond_concat
-
- if sigmas[0] != self.sigmas[0] or (self.denoise is not None and self.denoise < 1.0):
- max_denoise = False
- else:
- max_denoise = True
-
-
- if self.sampler == "uni_pc":
- samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar)
- elif self.sampler == "uni_pc_bh2":
- samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar)
- elif self.sampler == "ddim":
- timesteps = []
- for s in range(sigmas.shape[0]):
- timesteps.insert(0, self.model_wrap.sigma_to_discrete_timestep(sigmas[s]))
- noise_mask = None
- if denoise_mask is not None:
- noise_mask = 1.0 - denoise_mask
-
- ddim_callback = None
- if callback is not None:
- total_steps = len(timesteps) - 1
- ddim_callback = lambda pred_x0, i: callback(i, pred_x0, None, total_steps)
-
- sampler = DDIMSampler(self.model, device=self.device)
- sampler.make_schedule_timesteps(ddim_timesteps=timesteps, verbose=False)
- z_enc = sampler.stochastic_encode(latent_image, torch.tensor([len(timesteps) - 1] * noise.shape[0]).to(self.device), noise=noise, max_denoise=max_denoise)
- samples, _ = sampler.sample_custom(ddim_timesteps=timesteps,
- conditioning=positive,
- batch_size=noise.shape[0],
- shape=noise.shape[1:],
- verbose=False,
- unconditional_guidance_scale=cfg,
- unconditional_conditioning=negative,
- eta=0.0,
- x_T=z_enc,
- x0=latent_image,
- img_callback=ddim_callback,
- denoise_function=self.model_wrap.predict_eps_discrete_timestep,
- extra_args=extra_args,
- mask=noise_mask,
- to_zero=sigmas[-1]==0,
- end_step=sigmas.shape[0] - 1,
- disable_pbar=disable_pbar)
-
- else:
- extra_args["denoise_mask"] = denoise_mask
- self.model_k.latent_image = latent_image
- self.model_k.noise = noise
-
- if max_denoise:
- noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0)
- else:
- noise = noise * sigmas[0]
-
- k_callback = None
- total_steps = len(sigmas) - 1
- if callback is not None:
- k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps)
-
- if latent_image is not None:
- noise += latent_image
- if self.sampler == "dpm_fast":
- samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=k_callback, disable=disable_pbar)
- elif self.sampler == "dpm_adaptive":
- samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback, disable=disable_pbar)
- else:
- samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar)
+ sampler = sampler_object(self.sampler)
- return self.model.process_latent_out(samples.to(torch.float32))
+ return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
diff --git a/comfy/sd.py b/comfy/sd.py
index 9bdb2ad64ff..f4f84d0a032 100644
--- a/comfy/sd.py
+++ b/comfy/sd.py
@@ -4,7 +4,7 @@
from comfy import model_management
from .ldm.util import instantiate_from_config
-from .ldm.models.autoencoder import AutoencoderKL
+from .ldm.models.autoencoder import AutoencoderKL, AutoencodingEngine
import yaml
import comfy.utils
@@ -23,6 +23,7 @@
import comfy.lora
import comfy.t2i_adapter.adapter
import comfy.supported_models_base
+import comfy.taesd.taesd
def load_model_weights(model, sd):
m, u = model.load_state_dict(sd, strict=False)
@@ -55,13 +56,26 @@ def load_clip_weights(model, sd):
def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
- key_map = comfy.lora.model_lora_keys_unet(model.model)
- key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map)
+ key_map = {}
+ if model is not None:
+ key_map = comfy.lora.model_lora_keys_unet(model.model, key_map)
+ if clip is not None:
+ key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map)
+
loaded = comfy.lora.load_lora(lora, key_map)
- new_modelpatcher = model.clone()
- k = new_modelpatcher.add_patches(loaded, strength_model)
- new_clip = clip.clone()
- k1 = new_clip.add_patches(loaded, strength_clip)
+ if model is not None:
+ new_modelpatcher = model.clone()
+ k = new_modelpatcher.add_patches(loaded, strength_model)
+ else:
+ k = ()
+ new_modelpatcher = None
+
+ if clip is not None:
+ new_clip = clip.clone()
+ k1 = new_clip.add_patches(loaded, strength_clip)
+ else:
+ k1 = ()
+ new_clip = None
k = set(k)
k1 = set(k1)
for x in loaded:
@@ -82,10 +96,7 @@ def __init__(self, target=None, embedding_directory=None, no_init=False):
load_device = model_management.text_encoder_device()
offload_device = model_management.text_encoder_offload_device()
params['device'] = offload_device
- if model_management.should_use_fp16(load_device, prioritize_performance=False):
- params['dtype'] = torch.float16
- else:
- params['dtype'] = torch.float32
+ params['dtype'] = model_management.text_encoder_dtype(load_device)
self.cond_stage_model = clip(**(params))
@@ -140,27 +151,48 @@ def get_key_patches(self):
return self.patcher.get_key_patches()
class VAE:
- def __init__(self, ckpt_path=None, device=None, config=None):
+ def __init__(self, sd=None, device=None, config=None):
+ if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
+ sd = diffusers_convert.convert_vae_state_dict(sd)
+
+ self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) #These are for AutoencoderKL and need tweaking (should be lower)
+ self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype)
+
if config is None:
- #default SD1.x/SD2.x VAE parameters
- ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
- self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
+ if "decoder.mid.block_1.mix_factor" in sd:
+ encoder_config = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
+ decoder_config = encoder_config.copy()
+ decoder_config["video_kernel_size"] = [3, 1, 1]
+ decoder_config["alpha"] = 0.0
+ self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"},
+ encoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Encoder", 'params': encoder_config},
+ decoder_config={'target': "comfy.ldm.modules.temporal_ae.VideoDecoder", 'params': decoder_config})
+ elif "taesd_decoder.1.weight" in sd:
+ self.first_stage_model = comfy.taesd.taesd.TAESD()
+ else:
+ #default SD1.x/SD2.x VAE parameters
+ ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
+ self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4)
else:
self.first_stage_model = AutoencoderKL(**(config['params']))
self.first_stage_model = self.first_stage_model.eval()
- if ckpt_path is not None:
- sd = comfy.utils.load_torch_file(ckpt_path)
- if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
- sd = diffusers_convert.convert_vae_state_dict(sd)
- self.first_stage_model.load_state_dict(sd, strict=False)
+
+ m, u = self.first_stage_model.load_state_dict(sd, strict=False)
+ if len(m) > 0:
+ print("Missing VAE keys", m)
+
+ if len(u) > 0:
+ print("Leftover VAE keys", u)
if device is None:
device = model_management.vae_device()
self.device = device
- self.offload_device = model_management.vae_offload_device()
+ offload_device = model_management.vae_offload_device()
self.vae_dtype = model_management.vae_dtype()
self.first_stage_model.to(self.vae_dtype)
+ self.patcher = comfy.model_patcher.ModelPatcher(self.first_stage_model, load_device=self.device, offload_device=offload_device)
+
def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
steps = samples.shape[0] * comfy.utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
steps += samples.shape[0] * comfy.utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
@@ -181,7 +213,7 @@ def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
steps += pixel_samples.shape[0] * comfy.utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pbar = comfy.utils.ProgressBar(steps)
- encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.vae_dtype).to(self.device) - 1.).sample().float()
+ encode_fn = lambda a: self.first_stage_model.encode((2. * a - 1.).to(self.vae_dtype).to(self.device)).float()
samples = comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
@@ -189,10 +221,9 @@ def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
return samples
def decode(self, samples_in):
- self.first_stage_model = self.first_stage_model.to(self.device)
try:
- memory_used = (2562 * samples_in.shape[2] * samples_in.shape[3] * 64) * 1.7
- model_management.free_memory(memory_used, self.device)
+ memory_used = self.memory_used_decode(samples_in.shape, self.vae_dtype)
+ model_management.load_models_gpu([self.patcher], memory_required=memory_used)
free_memory = model_management.get_free_memory(self.device)
batch_number = int(free_memory / memory_used)
batch_number = max(1, batch_number)
@@ -200,47 +231,42 @@ def decode(self, samples_in):
pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
for x in range(0, samples_in.shape[0], batch_number):
samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device)
- pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu().float()
+ pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples).cpu().float() + 1.0) / 2.0, min=0.0, max=1.0)
except model_management.OOM_EXCEPTION as e:
print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
pixel_samples = self.decode_tiled_(samples_in)
- self.first_stage_model = self.first_stage_model.to(self.offload_device)
pixel_samples = pixel_samples.cpu().movedim(1,-1)
return pixel_samples
def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
- self.first_stage_model = self.first_stage_model.to(self.device)
+ model_management.load_model_gpu(self.patcher)
output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
- self.first_stage_model = self.first_stage_model.to(self.offload_device)
return output.movedim(1,-1)
def encode(self, pixel_samples):
- self.first_stage_model = self.first_stage_model.to(self.device)
pixel_samples = pixel_samples.movedim(-1,1)
try:
- memory_used = (2078 * pixel_samples.shape[2] * pixel_samples.shape[3]) * 1.7 #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
- model_management.free_memory(memory_used, self.device)
+ memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype)
+ model_management.load_models_gpu([self.patcher], memory_required=memory_used)
free_memory = model_management.get_free_memory(self.device)
batch_number = int(free_memory / memory_used)
batch_number = max(1, batch_number)
samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
for x in range(0, pixel_samples.shape[0], batch_number):
pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device)
- samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu().float()
+ samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).cpu().float()
except model_management.OOM_EXCEPTION as e:
print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
samples = self.encode_tiled_(pixel_samples)
- self.first_stage_model = self.first_stage_model.to(self.offload_device)
return samples
def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
- self.first_stage_model = self.first_stage_model.to(self.device)
+ model_management.load_model_gpu(self.patcher)
pixel_samples = pixel_samples.movedim(-1,1)
samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
- self.first_stage_model = self.first_stage_model.to(self.offload_device)
return samples
def get_sd(self):
@@ -325,7 +351,9 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl
if "params" in model_config_params["unet_config"]:
unet_config = model_config_params["unet_config"]["params"]
if "use_fp16" in unet_config:
- fp16 = unet_config["use_fp16"]
+ fp16 = unet_config.pop("use_fp16")
+ if fp16:
+ unet_config["dtype"] = torch.float16
noise_aug_config = None
if "noise_aug_config" in model_config_params:
@@ -353,7 +381,7 @@ class EmptyClass:
from . import latent_formats
model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)
- model_config.unet_config = unet_config
+ model_config.unet_config = model_detection.convert_config(unet_config)
if config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], model_type=model_type)
@@ -371,10 +399,8 @@ class EmptyClass:
model.load_model_weights(state_dict, "model.diffusion_model.")
if output_vae:
- w = WeightsLoader()
- vae = VAE(config=vae_config)
- w.first_stage_model = vae.first_stage_model
- load_model_weights(w, state_dict)
+ vae_sd = comfy.utils.state_dict_prefix_replace(state_dict, {"first_stage_model.": ""}, filter_keys=True)
+ vae = VAE(sd=vae_sd, config=vae_config)
if output_clip:
w = WeightsLoader()
@@ -383,31 +409,34 @@ class EmptyClass:
if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
clip_target.clip = sd2_clip.SD2ClipModel
clip_target.tokenizer = sd2_clip.SD2Tokenizer
+ clip = CLIP(clip_target, embedding_directory=embedding_directory)
+ w.cond_stage_model = clip.cond_stage_model.clip_h
elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
clip_target.clip = sd1_clip.SD1ClipModel
clip_target.tokenizer = sd1_clip.SD1Tokenizer
- clip = CLIP(clip_target, embedding_directory=embedding_directory)
- w.cond_stage_model = clip.cond_stage_model
+ clip = CLIP(clip_target, embedding_directory=embedding_directory)
+ w.cond_stage_model = clip.cond_stage_model.clip_l
load_clip_weights(w, state_dict)
return (comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
-def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
+def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True):
sd = comfy.utils.load_torch_file(ckpt_path)
sd_keys = sd.keys()
clip = None
clipvision = None
vae = None
model = None
+ model_patcher = None
clip_target = None
parameters = comfy.utils.calculate_parameters(sd, "model.diffusion_model.")
- fp16 = model_management.should_use_fp16(model_params=parameters)
+ unet_dtype = model_management.unet_dtype(model_params=parameters)
class WeightsLoader(torch.nn.Module):
pass
- model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16)
+ model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", unet_dtype)
if model_config is None:
raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
@@ -415,55 +444,51 @@ class WeightsLoader(torch.nn.Module):
if output_clipvision:
clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
- dtype = torch.float32
- if fp16:
- dtype = torch.float16
-
- inital_load_device = model_management.unet_inital_load_device(parameters, dtype)
- offload_device = model_management.unet_offload_device()
- model = model_config.get_model(sd, "model.diffusion_model.", device=inital_load_device)
- model.load_model_weights(sd, "model.diffusion_model.")
+ if output_model:
+ inital_load_device = model_management.unet_inital_load_device(parameters, unet_dtype)
+ offload_device = model_management.unet_offload_device()
+ model = model_config.get_model(sd, "model.diffusion_model.", device=inital_load_device)
+ model.load_model_weights(sd, "model.diffusion_model.")
if output_vae:
- vae = VAE()
- w = WeightsLoader()
- w.first_stage_model = vae.first_stage_model
- load_model_weights(w, sd)
+ vae_sd = comfy.utils.state_dict_prefix_replace(sd, {"first_stage_model.": ""}, filter_keys=True)
+ vae_sd = model_config.process_vae_state_dict(vae_sd)
+ vae = VAE(sd=vae_sd)
if output_clip:
w = WeightsLoader()
clip_target = model_config.clip_target()
- clip = CLIP(clip_target, embedding_directory=embedding_directory)
- w.cond_stage_model = clip.cond_stage_model
- sd = model_config.process_clip_state_dict(sd)
- load_model_weights(w, sd)
+ if clip_target is not None:
+ clip = CLIP(clip_target, embedding_directory=embedding_directory)
+ w.cond_stage_model = clip.cond_stage_model
+ sd = model_config.process_clip_state_dict(sd)
+ load_model_weights(w, sd)
left_over = sd.keys()
if len(left_over) > 0:
print("left over keys:", left_over)
- model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device(), current_device=inital_load_device)
- if inital_load_device != torch.device("cpu"):
- print("loaded straight to GPU")
- model_management.load_model_gpu(model_patcher)
+ if output_model:
+ model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device(), current_device=inital_load_device)
+ if inital_load_device != torch.device("cpu"):
+ print("loaded straight to GPU")
+ model_management.load_model_gpu(model_patcher)
return (model_patcher, clip, vae, clipvision)
-def load_unet(unet_path): #load unet in diffusers format
- sd = comfy.utils.load_torch_file(unet_path)
+def load_unet_state_dict(sd): #load unet in diffusers format
parameters = comfy.utils.calculate_parameters(sd)
- fp16 = model_management.should_use_fp16(model_params=parameters)
+ unet_dtype = model_management.unet_dtype(model_params=parameters)
if "input_blocks.0.0.weight" in sd: #ldm
- model_config = model_detection.model_config_from_unet(sd, "", fp16)
+ model_config = model_detection.model_config_from_unet(sd, "", unet_dtype)
if model_config is None:
- raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path))
+ return None
new_sd = sd
else: #diffusers
- model_config = model_detection.model_config_from_diffusers_unet(sd, fp16)
+ model_config = model_detection.model_config_from_diffusers_unet(sd, unet_dtype)
if model_config is None:
- print("ERROR UNSUPPORTED UNET", unet_path)
return None
diffusers_keys = comfy.utils.unet_to_diffusers(model_config.unet_config)
@@ -478,8 +503,19 @@ def load_unet(unet_path): #load unet in diffusers format
model = model_config.get_model(new_sd, "")
model = model.to(offload_device)
model.load_model_weights(new_sd, "")
+ left_over = sd.keys()
+ if len(left_over) > 0:
+ print("left over keys in unet:", left_over)
return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device)
+def load_unet(unet_path):
+ sd = comfy.utils.load_torch_file(unet_path)
+ model = load_unet_state_dict(sd)
+ if model is None:
+ print("ERROR UNSUPPORTED UNET", unet_path)
+ raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path))
+ return model
+
def save_checkpoint(output_path, model, clip, vae, metadata=None):
model_management.load_models_gpu([model, clip.load_model()])
sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd())
diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py
index 9978b6c35c6..58acb97fce7 100644
--- a/comfy/sd1_clip.py
+++ b/comfy/sd1_clip.py
@@ -8,34 +8,56 @@
from . import model_management
import contextlib
+def gen_empty_tokens(special_tokens, length):
+ start_token = special_tokens.get("start", None)
+ end_token = special_tokens.get("end", None)
+ pad_token = special_tokens.get("pad")
+ output = []
+ if start_token is not None:
+ output.append(start_token)
+ if end_token is not None:
+ output.append(end_token)
+ output += [pad_token] * (length - len(output))
+ return output
+
class ClipTokenWeightEncoder:
def encode_token_weights(self, token_weight_pairs):
- to_encode = list(self.empty_tokens)
+ to_encode = list()
+ max_token_len = 0
+ has_weights = False
for x in token_weight_pairs:
tokens = list(map(lambda a: a[0], x))
+ max_token_len = max(len(tokens), max_token_len)
+ has_weights = has_weights or not all(map(lambda a: a[1] == 1.0, x))
to_encode.append(tokens)
+ sections = len(to_encode)
+ if has_weights or sections == 0:
+ to_encode.append(gen_empty_tokens(self.special_tokens, max_token_len))
+
out, pooled = self.encode(to_encode)
- z_empty = out[0:1]
- if pooled.shape[0] > 1:
- first_pooled = pooled[1:2]
+ if pooled is not None:
+ first_pooled = pooled[0:1].cpu()
else:
- first_pooled = pooled[0:1]
+ first_pooled = pooled
output = []
- for k in range(1, out.shape[0]):
+ for k in range(0, sections):
z = out[k:k+1]
- for i in range(len(z)):
- for j in range(len(z[i])):
- weight = token_weight_pairs[k - 1][j][1]
- z[i][j] = (z[i][j] - z_empty[0][j]) * weight + z_empty[0][j]
+ if has_weights:
+ z_empty = out[-1]
+ for i in range(len(z)):
+ for j in range(len(z[i])):
+ weight = token_weight_pairs[k][j][1]
+ if weight != 1.0:
+ z[i][j] = (z[i][j] - z_empty[j]) * weight + z_empty[j]
output.append(z)
if (len(output) == 0):
- return z_empty.cpu(), first_pooled.cpu()
- return torch.cat(output, dim=-2).cpu(), first_pooled.cpu()
+ return out[-1:].cpu(), first_pooled
+ return torch.cat(output, dim=-2).cpu(), first_pooled
-class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
+class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
"""Uses the CLIP transformer encoder for text (from huggingface)"""
LAYERS = [
"last",
@@ -43,37 +65,43 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
"hidden"
]
def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77,
- freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None, dtype=None): # clip-vit-base-patch32
+ freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None, dtype=None,
+ special_tokens={"start": 49406, "end": 49407, "pad": 49407},layer_norm_hidden_state=True, config_class=CLIPTextConfig,
+ model_class=CLIPTextModel, inner_name="text_model"): # clip-vit-base-patch32
super().__init__()
assert layer in self.LAYERS
self.num_layers = 12
if textmodel_path is not None:
- self.transformer = CLIPTextModel.from_pretrained(textmodel_path)
+ self.transformer = model_class.from_pretrained(textmodel_path)
else:
if textmodel_json_config is None:
textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json")
- config = CLIPTextConfig.from_json_file(textmodel_json_config)
+ config = config_class.from_json_file(textmodel_json_config)
self.num_layers = config.num_hidden_layers
with comfy.ops.use_comfy_ops(device, dtype):
with modeling_utils.no_init_weights():
- self.transformer = CLIPTextModel(config)
+ self.transformer = model_class(config)
+ self.inner_name = inner_name
if dtype is not None:
self.transformer.to(dtype)
- self.transformer.text_model.embeddings.token_embedding.to(torch.float32)
- self.transformer.text_model.embeddings.position_embedding.to(torch.float32)
+ inner_model = getattr(self.transformer, self.inner_name)
+ if hasattr(inner_model, "embeddings"):
+ inner_model.embeddings.to(torch.float32)
+ else:
+ self.transformer.set_input_embeddings(self.transformer.get_input_embeddings().to(torch.float32))
self.max_length = max_length
if freeze:
self.freeze()
self.layer = layer
self.layer_idx = None
- self.empty_tokens = [[49406] + [49407] * 76]
+ self.special_tokens = special_tokens
self.text_projection = torch.nn.Parameter(torch.eye(self.transformer.get_input_embeddings().weight.shape[1]))
self.logit_scale = torch.nn.Parameter(torch.tensor(4.6055))
self.enable_attention_masks = False
- self.layer_norm_hidden_state = True
+ self.layer_norm_hidden_state = layer_norm_hidden_state
if layer == "hidden":
assert layer_idx is not None
assert abs(layer_idx) <= self.num_layers
@@ -117,7 +145,7 @@ def set_up_textual_embeddings(self, tokens, current_embeds):
else:
print("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored", y.shape[0], current_embeds.weight.shape[1])
while len(tokens_temp) < len(x):
- tokens_temp += [self.empty_tokens[0][-1]]
+ tokens_temp += [self.special_tokens["pad"]]
out_tokens += [tokens_temp]
n = token_dict_size
@@ -142,12 +170,12 @@ def forward(self, tokens):
tokens = self.set_up_textual_embeddings(tokens, backup_embeds)
tokens = torch.LongTensor(tokens).to(device)
- if self.transformer.text_model.final_layer_norm.weight.dtype != torch.float32:
+ if getattr(self.transformer, self.inner_name).final_layer_norm.weight.dtype != torch.float32:
precision_scope = torch.autocast
else:
- precision_scope = lambda a, b: contextlib.nullcontext(a)
+ precision_scope = lambda a, dtype: contextlib.nullcontext(a)
- with precision_scope(model_management.get_autocast_device(device), torch.float32):
+ with precision_scope(model_management.get_autocast_device(device), dtype=torch.float32):
attention_mask = None
if self.enable_attention_masks:
attention_mask = torch.zeros_like(tokens)
@@ -168,12 +196,16 @@ def forward(self, tokens):
else:
z = outputs.hidden_states[self.layer_idx]
if self.layer_norm_hidden_state:
- z = self.transformer.text_model.final_layer_norm(z)
+ z = getattr(self.transformer, self.inner_name).final_layer_norm(z)
- pooled_output = outputs.pooler_output
- if self.text_projection is not None:
+ if hasattr(outputs, "pooler_output"):
+ pooled_output = outputs.pooler_output.float()
+ else:
+ pooled_output = None
+
+ if self.text_projection is not None and pooled_output is not None:
pooled_output = pooled_output.float().to(self.text_projection.device) @ self.text_projection.float()
- return z.float(), pooled_output.float()
+ return z.float(), pooled_output
def encode(self, tokens):
return self(tokens)
@@ -278,7 +310,13 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
valid_file = None
for embed_dir in embedding_directory:
- embed_path = os.path.join(embed_dir, embedding_name)
+ embed_path = os.path.abspath(os.path.join(embed_dir, embedding_name))
+ embed_dir = os.path.abspath(embed_dir)
+ try:
+ if os.path.commonpath((embed_dir, embed_path)) != embed_dir:
+ continue
+ except:
+ continue
if not os.path.isfile(embed_path):
extensions = ['.safetensors', '.pt', '.bin']
for x in extensions:
@@ -336,18 +374,25 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
embed_out = next(iter(values))
return embed_out
-class SD1Tokenizer:
- def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l'):
+class SDTokenizer:
+ def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, pad_to_max_length=True):
if tokenizer_path is None:
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
- self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
+ self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path)
self.max_length = max_length
- self.max_tokens_per_section = self.max_length - 2
empty = self.tokenizer('')["input_ids"]
- self.start_token = empty[0]
- self.end_token = empty[1]
+ if has_start_token:
+ self.tokens_start = 1
+ self.start_token = empty[0]
+ self.end_token = empty[1]
+ else:
+ self.tokens_start = 0
+ self.start_token = None
+ self.end_token = empty[0]
self.pad_with_end = pad_with_end
+ self.pad_to_max_length = pad_to_max_length
+
vocab = self.tokenizer.get_vocab()
self.inv_vocab = {v: k for k, v in vocab.items()}
self.embedding_directory = embedding_directory
@@ -408,11 +453,13 @@ def tokenize_with_weights(self, text:str, return_word_ids=False):
else:
continue
#parse word
- tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]])
+ tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][self.tokens_start:-1]])
#reshape token array to CLIP input size
batched_tokens = []
- batch = [(self.start_token, 1.0, 0)]
+ batch = []
+ if self.start_token is not None:
+ batch.append((self.start_token, 1.0, 0))
batched_tokens.append(batch)
for i, t_group in enumerate(tokens):
#determine if we're going to try and keep the tokens in a single batch
@@ -429,16 +476,21 @@ def tokenize_with_weights(self, text:str, return_word_ids=False):
#add end token and pad
else:
batch.append((self.end_token, 1.0, 0))
- batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
+ if self.pad_to_max_length:
+ batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
#start new batch
- batch = [(self.start_token, 1.0, 0)]
+ batch = []
+ if self.start_token is not None:
+ batch.append((self.start_token, 1.0, 0))
batched_tokens.append(batch)
else:
batch.extend([(t,w,i+1) for t,w in t_group])
t_group = []
#fill last batch
- batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1))
+ batch.append((self.end_token, 1.0, 0))
+ if self.pad_to_max_length:
+ batch.extend([(pad_token, 1.0, 0)] * (self.max_length - len(batch)))
if not return_word_ids:
batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens]
@@ -448,3 +500,40 @@ def tokenize_with_weights(self, text:str, return_word_ids=False):
def untokenize(self, token_weight_pair):
return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))
+
+
+class SD1Tokenizer:
+ def __init__(self, embedding_directory=None, clip_name="l", tokenizer=SDTokenizer):
+ self.clip_name = clip_name
+ self.clip = "clip_{}".format(self.clip_name)
+ setattr(self, self.clip, tokenizer(embedding_directory=embedding_directory))
+
+ def tokenize_with_weights(self, text:str, return_word_ids=False):
+ out = {}
+ out[self.clip_name] = getattr(self, self.clip).tokenize_with_weights(text, return_word_ids)
+ return out
+
+ def untokenize(self, token_weight_pair):
+ return getattr(self, self.clip).untokenize(token_weight_pair)
+
+
+class SD1ClipModel(torch.nn.Module):
+ def __init__(self, device="cpu", dtype=None, clip_name="l", clip_model=SDClipModel, **kwargs):
+ super().__init__()
+ self.clip_name = clip_name
+ self.clip = "clip_{}".format(self.clip_name)
+ setattr(self, self.clip, clip_model(device=device, dtype=dtype, **kwargs))
+
+ def clip_layer(self, layer_idx):
+ getattr(self, self.clip).clip_layer(layer_idx)
+
+ def reset_clip_layer(self):
+ getattr(self, self.clip).reset_clip_layer()
+
+ def encode_token_weights(self, token_weight_pairs):
+ token_weight_pairs = token_weight_pairs[self.clip_name]
+ out, pooled = getattr(self, self.clip).encode_token_weights(token_weight_pairs)
+ return out, pooled
+
+ def load_sd(self, sd):
+ return getattr(self, self.clip).load_sd(sd)
diff --git a/comfy/sd2_clip.py b/comfy/sd2_clip.py
index 05e50a0057b..2ee0ca05586 100644
--- a/comfy/sd2_clip.py
+++ b/comfy/sd2_clip.py
@@ -2,16 +2,23 @@
import torch
import os
-class SD2ClipModel(sd1_clip.SD1ClipModel):
+class SD2ClipHModel(sd1_clip.SDClipModel):
def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None, dtype=None):
if layer == "penultimate":
layer="hidden"
layer_idx=23
textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd2_clip_config.json")
- super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype)
- self.empty_tokens = [[49406] + [49407] + [0] * 75]
+ super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0})
-class SD2Tokenizer(sd1_clip.SD1Tokenizer):
+class SD2ClipHTokenizer(sd1_clip.SDTokenizer):
def __init__(self, tokenizer_path=None, embedding_directory=None):
super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024)
+
+class SD2Tokenizer(sd1_clip.SD1Tokenizer):
+ def __init__(self, embedding_directory=None):
+ super().__init__(embedding_directory=embedding_directory, clip_name="h", tokenizer=SD2ClipHTokenizer)
+
+class SD2ClipModel(sd1_clip.SD1ClipModel):
+ def __init__(self, device="cpu", dtype=None, **kwargs):
+ super().__init__(device=device, dtype=dtype, clip_name="h", clip_model=SD2ClipHModel, **kwargs)
diff --git a/comfy/sdxl_clip.py b/comfy/sdxl_clip.py
index e3ac2ee0b4a..673399e2222 100644
--- a/comfy/sdxl_clip.py
+++ b/comfy/sdxl_clip.py
@@ -2,28 +2,27 @@
import torch
import os
-class SDXLClipG(sd1_clip.SD1ClipModel):
+class SDXLClipG(sd1_clip.SDClipModel):
def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None, dtype=None):
if layer == "penultimate":
layer="hidden"
layer_idx=-2
textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_config_bigg.json")
- super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype)
- self.empty_tokens = [[49406] + [49407] + [0] * 75]
- self.layer_norm_hidden_state = False
+ super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype,
+ special_tokens={"start": 49406, "end": 49407, "pad": 0}, layer_norm_hidden_state=False)
def load_sd(self, sd):
return super().load_sd(sd)
-class SDXLClipGTokenizer(sd1_clip.SD1Tokenizer):
+class SDXLClipGTokenizer(sd1_clip.SDTokenizer):
def __init__(self, tokenizer_path=None, embedding_directory=None):
super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1280, embedding_key='clip_g')
-class SDXLTokenizer(sd1_clip.SD1Tokenizer):
+class SDXLTokenizer:
def __init__(self, embedding_directory=None):
- self.clip_l = sd1_clip.SD1Tokenizer(embedding_directory=embedding_directory)
+ self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory)
self.clip_g = SDXLClipGTokenizer(embedding_directory=embedding_directory)
def tokenize_with_weights(self, text:str, return_word_ids=False):
@@ -38,8 +37,7 @@ def untokenize(self, token_weight_pair):
class SDXLClipModel(torch.nn.Module):
def __init__(self, device="cpu", dtype=None):
super().__init__()
- self.clip_l = sd1_clip.SD1ClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype)
- self.clip_l.layer_norm_hidden_state = False
+ self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype, layer_norm_hidden_state=False)
self.clip_g = SDXLClipG(device=device, dtype=dtype)
def clip_layer(self, layer_idx):
@@ -63,21 +61,6 @@ def load_sd(self, sd):
else:
return self.clip_l.load_sd(sd)
-class SDXLRefinerClipModel(torch.nn.Module):
+class SDXLRefinerClipModel(sd1_clip.SD1ClipModel):
def __init__(self, device="cpu", dtype=None):
- super().__init__()
- self.clip_g = SDXLClipG(device=device, dtype=dtype)
-
- def clip_layer(self, layer_idx):
- self.clip_g.clip_layer(layer_idx)
-
- def reset_clip_layer(self):
- self.clip_g.reset_clip_layer()
-
- def encode_token_weights(self, token_weight_pairs):
- token_weight_pairs_g = token_weight_pairs["g"]
- g_out, g_pooled = self.clip_g.encode_token_weights(token_weight_pairs_g)
- return g_out, g_pooled
-
- def load_sd(self, sd):
- return self.clip_g.load_sd(sd)
+ super().__init__(device=device, dtype=dtype, clip_name="g", clip_model=SDXLClipG)
diff --git a/comfy/supported_models.py b/comfy/supported_models.py
index bb8ae2148fd..7e2ac677d51 100644
--- a/comfy/supported_models.py
+++ b/comfy/supported_models.py
@@ -17,6 +17,7 @@ class SD15(supported_models_base.BASE):
"model_channels": 320,
"use_linear_in_transformer": False,
"adm_in_channels": None,
+ "use_temporal_attention": False,
}
unet_extra_config = {
@@ -38,8 +39,15 @@ def process_clip_state_dict(self, state_dict):
if ids.dtype == torch.float32:
state_dict['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
+ replace_prefix = {}
+ replace_prefix["cond_stage_model."] = "cond_stage_model.clip_l."
+ state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix)
return state_dict
+ def process_clip_state_dict_for_saving(self, state_dict):
+ replace_prefix = {"clip_l.": "cond_stage_model."}
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+
def clip_target(self):
return supported_models_base.ClipTarget(sd1_clip.SD1Tokenizer, sd1_clip.SD1ClipModel)
@@ -49,6 +57,7 @@ class SD20(supported_models_base.BASE):
"model_channels": 320,
"use_linear_in_transformer": True,
"adm_in_channels": None,
+ "use_temporal_attention": False,
}
latent_format = latent_formats.SD15
@@ -62,12 +71,12 @@ def model_type(self, state_dict, prefix=""):
return model_base.ModelType.EPS
def process_clip_state_dict(self, state_dict):
- state_dict = utils.transformers_convert(state_dict, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
+ state_dict = utils.transformers_convert(state_dict, "cond_stage_model.model.", "cond_stage_model.clip_h.transformer.text_model.", 24)
return state_dict
def process_clip_state_dict_for_saving(self, state_dict):
replace_prefix = {}
- replace_prefix[""] = "cond_stage_model.model."
+ replace_prefix["clip_h"] = "cond_stage_model.model"
state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix)
state_dict = diffusers_convert.convert_text_enc_state_dict_v20(state_dict)
return state_dict
@@ -81,6 +90,7 @@ class SD21UnclipL(SD20):
"model_channels": 320,
"use_linear_in_transformer": True,
"adm_in_channels": 1536,
+ "use_temporal_attention": False,
}
clip_vision_prefix = "embedder.model.visual."
@@ -93,6 +103,7 @@ class SD21UnclipH(SD20):
"model_channels": 320,
"use_linear_in_transformer": True,
"adm_in_channels": 2048,
+ "use_temporal_attention": False,
}
clip_vision_prefix = "embedder.model.visual."
@@ -104,7 +115,8 @@ class SDXLRefiner(supported_models_base.BASE):
"use_linear_in_transformer": True,
"context_dim": 1280,
"adm_in_channels": 2560,
- "transformer_depth": [0, 4, 4, 0],
+ "transformer_depth": [0, 0, 4, 4, 4, 4, 0, 0],
+ "use_temporal_attention": False,
}
latent_format = latent_formats.SDXL
@@ -139,9 +151,10 @@ class SDXL(supported_models_base.BASE):
unet_config = {
"model_channels": 320,
"use_linear_in_transformer": True,
- "transformer_depth": [0, 2, 10],
+ "transformer_depth": [0, 0, 2, 2, 10, 10],
"context_dim": 2048,
- "adm_in_channels": 2816
+ "adm_in_channels": 2816,
+ "use_temporal_attention": False,
}
latent_format = latent_formats.SDXL
@@ -165,6 +178,7 @@ def process_clip_state_dict(self, state_dict):
replace_prefix["conditioner.embedders.0.transformer.text_model"] = "cond_stage_model.clip_l.transformer.text_model"
state_dict = utils.transformers_convert(state_dict, "conditioner.embedders.1.model.", "cond_stage_model.clip_g.transformer.text_model.", 32)
keys_to_replace["conditioner.embedders.1.model.text_projection"] = "cond_stage_model.clip_g.text_projection"
+ keys_to_replace["conditioner.embedders.1.model.text_projection.weight"] = "cond_stage_model.clip_g.text_projection"
keys_to_replace["conditioner.embedders.1.model.logit_scale"] = "cond_stage_model.clip_g.logit_scale"
state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix)
@@ -189,5 +203,40 @@ def process_clip_state_dict_for_saving(self, state_dict):
def clip_target(self):
return supported_models_base.ClipTarget(sdxl_clip.SDXLTokenizer, sdxl_clip.SDXLClipModel)
+class SSD1B(SDXL):
+ unet_config = {
+ "model_channels": 320,
+ "use_linear_in_transformer": True,
+ "transformer_depth": [0, 0, 2, 2, 4, 4],
+ "context_dim": 2048,
+ "adm_in_channels": 2816,
+ "use_temporal_attention": False,
+ }
+
+class SVD_img2vid(supported_models_base.BASE):
+ unet_config = {
+ "model_channels": 320,
+ "in_channels": 8,
+ "use_linear_in_transformer": True,
+ "transformer_depth": [1, 1, 1, 1, 1, 1, 0, 0],
+ "context_dim": 1024,
+ "adm_in_channels": 768,
+ "use_temporal_attention": True,
+ "use_temporal_resblock": True
+ }
+
+ clip_vision_prefix = "conditioner.embedders.0.open_clip.model.visual."
+
+ latent_format = latent_formats.SD15
+
+ sampling_settings = {"sigma_max": 700.0, "sigma_min": 0.002}
+
+ def get_model(self, state_dict, prefix="", device=None):
+ out = model_base.SVD_img2vid(self, device=device)
+ return out
+
+ def clip_target(self):
+ return None
-models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL]
+models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B]
+models += [SVD_img2vid]
diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py
index 88a1d7fde49..3412cfea030 100644
--- a/comfy/supported_models_base.py
+++ b/comfy/supported_models_base.py
@@ -19,7 +19,7 @@ class BASE:
clip_prefix = []
clip_vision_prefix = None
noise_aug_config = None
- beta_schedule = "linear"
+ sampling_settings = {}
latent_format = latent_formats.LatentFormat
@classmethod
@@ -53,6 +53,12 @@ def get_model(self, state_dict, prefix="", device=None):
def process_clip_state_dict(self, state_dict):
return state_dict
+ def process_unet_state_dict(self, state_dict):
+ return state_dict
+
+ def process_vae_state_dict(self, state_dict):
+ return state_dict
+
def process_clip_state_dict_for_saving(self, state_dict):
replace_prefix = {"": "cond_stage_model."}
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
diff --git a/comfy/taesd/taesd.py b/comfy/taesd/taesd.py
index 1549345ae53..46f3097a2a1 100644
--- a/comfy/taesd/taesd.py
+++ b/comfy/taesd/taesd.py
@@ -6,6 +6,8 @@
import torch
import torch.nn as nn
+import comfy.utils
+
def conv(n_in, n_out, **kwargs):
return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs)
@@ -44,15 +46,16 @@ class TAESD(nn.Module):
latent_magnitude = 3
latent_shift = 0.5
- def __init__(self, encoder_path="taesd_encoder.pth", decoder_path="taesd_decoder.pth"):
+ def __init__(self, encoder_path=None, decoder_path=None):
"""Initialize pretrained TAESD on the given device from the given checkpoints."""
super().__init__()
- self.encoder = Encoder()
- self.decoder = Decoder()
+ self.taesd_encoder = Encoder()
+ self.taesd_decoder = Decoder()
+ self.vae_scale = torch.nn.Parameter(torch.tensor(1.0))
if encoder_path is not None:
- self.encoder.load_state_dict(torch.load(encoder_path, map_location="cpu", weights_only=True))
+ self.taesd_encoder.load_state_dict(comfy.utils.load_torch_file(encoder_path, safe_load=True))
if decoder_path is not None:
- self.decoder.load_state_dict(torch.load(decoder_path, map_location="cpu", weights_only=True))
+ self.taesd_decoder.load_state_dict(comfy.utils.load_torch_file(decoder_path, safe_load=True))
@staticmethod
def scale_latents(x):
@@ -63,3 +66,11 @@ def scale_latents(x):
def unscale_latents(x):
"""[0, 1] -> raw latents"""
return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude)
+
+ def decode(self, x):
+ x_sample = self.taesd_decoder(x * self.vae_scale)
+ x_sample = x_sample.sub(0.5).mul(2)
+ return x_sample
+
+ def encode(self, x):
+ return self.taesd_encoder(x * 0.5 + 0.5) / self.vae_scale
diff --git a/comfy/utils.py b/comfy/utils.py
index 7843b58ccad..294bbb425ff 100644
--- a/comfy/utils.py
+++ b/comfy/utils.py
@@ -47,12 +47,17 @@ def state_dict_key_replace(state_dict, keys_to_replace):
state_dict[keys_to_replace[x]] = state_dict.pop(x)
return state_dict
-def state_dict_prefix_replace(state_dict, replace_prefix):
+def state_dict_prefix_replace(state_dict, replace_prefix, filter_keys=False):
+ if filter_keys:
+ out = {}
+ else:
+ out = state_dict
for rp in replace_prefix:
replace = list(map(lambda a: (a, "{}{}".format(replace_prefix[rp], a[len(rp):])), filter(lambda a: a.startswith(rp), state_dict.keys())))
for x in replace:
- state_dict[x[1]] = state_dict.pop(x[0])
- return state_dict
+ w = state_dict.pop(x[0])
+ out[x[1]] = w
+ return out
def transformers_convert(sd, prefix_from, prefix_to, number):
@@ -165,25 +170,12 @@ def transformers_convert(sd, prefix_from, prefix_to, number):
def unet_to_diffusers(unet_config):
num_res_blocks = unet_config["num_res_blocks"]
- attention_resolutions = unet_config["attention_resolutions"]
channel_mult = unet_config["channel_mult"]
- transformer_depth = unet_config["transformer_depth"]
+ transformer_depth = unet_config["transformer_depth"][:]
+ transformer_depth_output = unet_config["transformer_depth_output"][:]
num_blocks = len(channel_mult)
- if isinstance(num_res_blocks, int):
- num_res_blocks = [num_res_blocks] * num_blocks
- if isinstance(transformer_depth, int):
- transformer_depth = [transformer_depth] * num_blocks
-
- transformers_per_layer = []
- res = 1
- for i in range(num_blocks):
- transformers = 0
- if res in attention_resolutions:
- transformers = transformer_depth[i]
- transformers_per_layer.append(transformers)
- res *= 2
-
- transformers_mid = unet_config.get("transformer_depth_middle", transformer_depth[-1])
+
+ transformers_mid = unet_config.get("transformer_depth_middle", None)
diffusers_unet_map = {}
for x in range(num_blocks):
@@ -191,10 +183,11 @@ def unet_to_diffusers(unet_config):
for i in range(num_res_blocks[x]):
for b in UNET_MAP_RESNET:
diffusers_unet_map["down_blocks.{}.resnets.{}.{}".format(x, i, UNET_MAP_RESNET[b])] = "input_blocks.{}.0.{}".format(n, b)
- if transformers_per_layer[x] > 0:
+ num_transformers = transformer_depth.pop(0)
+ if num_transformers > 0:
for b in UNET_MAP_ATTENTIONS:
diffusers_unet_map["down_blocks.{}.attentions.{}.{}".format(x, i, b)] = "input_blocks.{}.1.{}".format(n, b)
- for t in range(transformers_per_layer[x]):
+ for t in range(num_transformers):
for b in TRANSFORMER_BLOCKS:
diffusers_unet_map["down_blocks.{}.attentions.{}.transformer_blocks.{}.{}".format(x, i, t, b)] = "input_blocks.{}.1.transformer_blocks.{}.{}".format(n, t, b)
n += 1
@@ -213,7 +206,6 @@ def unet_to_diffusers(unet_config):
diffusers_unet_map["mid_block.resnets.{}.{}".format(i, UNET_MAP_RESNET[b])] = "middle_block.{}.{}".format(n, b)
num_res_blocks = list(reversed(num_res_blocks))
- transformers_per_layer = list(reversed(transformers_per_layer))
for x in range(num_blocks):
n = (num_res_blocks[x] + 1) * x
l = num_res_blocks[x] + 1
@@ -222,11 +214,12 @@ def unet_to_diffusers(unet_config):
for b in UNET_MAP_RESNET:
diffusers_unet_map["up_blocks.{}.resnets.{}.{}".format(x, i, UNET_MAP_RESNET[b])] = "output_blocks.{}.0.{}".format(n, b)
c += 1
- if transformers_per_layer[x] > 0:
+ num_transformers = transformer_depth_output.pop()
+ if num_transformers > 0:
c += 1
for b in UNET_MAP_ATTENTIONS:
diffusers_unet_map["up_blocks.{}.attentions.{}.{}".format(x, i, b)] = "output_blocks.{}.1.{}".format(n, b)
- for t in range(transformers_per_layer[x]):
+ for t in range(num_transformers):
for b in TRANSFORMER_BLOCKS:
diffusers_unet_map["up_blocks.{}.attentions.{}.transformer_blocks.{}.{}".format(x, i, t, b)] = "output_blocks.{}.1.transformer_blocks.{}.{}".format(n, t, b)
if i == l - 1:
@@ -265,9 +258,17 @@ def set_attr(obj, attr, value):
for name in attrs[:-1]:
obj = getattr(obj, name)
prev = getattr(obj, attrs[-1])
- setattr(obj, attrs[-1], torch.nn.Parameter(value))
+ setattr(obj, attrs[-1], torch.nn.Parameter(value, requires_grad=False))
del prev
+def copy_to_param(obj, attr, value):
+ # inplace update tensor instead of replacing it
+ attrs = attr.split(".")
+ for name in attrs[:-1]:
+ obj = getattr(obj, name)
+ prev = getattr(obj, attrs[-1])
+ prev.data.copy_(value)
+
def get_attr(obj, attr):
attrs = attr.split(".")
for name in attrs:
@@ -306,23 +307,25 @@ def slerp(b1, b2, r):
res[dot < 1e-5 - 1] = (b1 * (1.0-r) + b2 * r)[dot < 1e-5 - 1]
return res
- def generate_bilinear_data(length_old, length_new):
- coords_1 = torch.arange(length_old).reshape((1,1,1,-1)).to(torch.float32)
+ def generate_bilinear_data(length_old, length_new, device):
+ coords_1 = torch.arange(length_old, dtype=torch.float32, device=device).reshape((1,1,1,-1))
coords_1 = torch.nn.functional.interpolate(coords_1, size=(1, length_new), mode="bilinear")
ratios = coords_1 - coords_1.floor()
coords_1 = coords_1.to(torch.int64)
- coords_2 = torch.arange(length_old).reshape((1,1,1,-1)).to(torch.float32) + 1
+ coords_2 = torch.arange(length_old, dtype=torch.float32, device=device).reshape((1,1,1,-1)) + 1
coords_2[:,:,:,-1] -= 1
coords_2 = torch.nn.functional.interpolate(coords_2, size=(1, length_new), mode="bilinear")
coords_2 = coords_2.to(torch.int64)
return ratios, coords_1, coords_2
-
+
+ orig_dtype = samples.dtype
+ samples = samples.float()
n,c,h,w = samples.shape
h_new, w_new = (height, width)
#linear w
- ratios, coords_1, coords_2 = generate_bilinear_data(w, w_new)
+ ratios, coords_1, coords_2 = generate_bilinear_data(w, w_new, samples.device)
coords_1 = coords_1.expand((n, c, h, -1))
coords_2 = coords_2.expand((n, c, h, -1))
ratios = ratios.expand((n, 1, h, -1))
@@ -335,7 +338,7 @@ def generate_bilinear_data(length_old, length_new):
result = result.reshape(n, h, w_new, c).movedim(-1, 1)
#linear h
- ratios, coords_1, coords_2 = generate_bilinear_data(h, h_new)
+ ratios, coords_1, coords_2 = generate_bilinear_data(h, h_new, samples.device)
coords_1 = coords_1.reshape((1,1,-1,1)).expand((n, c, -1, w_new))
coords_2 = coords_2.reshape((1,1,-1,1)).expand((n, c, -1, w_new))
ratios = ratios.reshape((1,1,-1,1)).expand((n, 1, -1, w_new))
@@ -346,7 +349,7 @@ def generate_bilinear_data(length_old, length_new):
result = slerp(pass_1, pass_2, ratios)
result = result.reshape(n, h_new, w_new, c).movedim(-1, 1)
- return result
+ return result.to(orig_dtype)
def lanczos(samples, width, height):
images = [Image.fromarray(np.clip(255. * image.movedim(0, -1).cpu().numpy(), 0, 255).astype(np.uint8)) for image in samples]
@@ -408,6 +411,10 @@ def tiled_scale(samples, function, tile_x=64, tile_y=64, overlap = 8, upscale_am
output[b:b+1] = out/out_div
return output
+PROGRESS_BAR_ENABLED = True
+def set_progress_bar_enabled(enabled):
+ global PROGRESS_BAR_ENABLED
+ PROGRESS_BAR_ENABLED = enabled
PROGRESS_BAR_HOOK = None
def set_progress_bar_global_hook(function):
diff --git a/comfy_extras/nodes_compositing.py b/comfy_extras/nodes_compositing.py
new file mode 100644
index 00000000000..181b36ed68e
--- /dev/null
+++ b/comfy_extras/nodes_compositing.py
@@ -0,0 +1,202 @@
+import numpy as np
+import torch
+import comfy.utils
+from enum import Enum
+
+def resize_mask(mask, shape):
+ return torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[0], shape[1]), mode="bilinear").squeeze(1)
+
+class PorterDuffMode(Enum):
+ ADD = 0
+ CLEAR = 1
+ DARKEN = 2
+ DST = 3
+ DST_ATOP = 4
+ DST_IN = 5
+ DST_OUT = 6
+ DST_OVER = 7
+ LIGHTEN = 8
+ MULTIPLY = 9
+ OVERLAY = 10
+ SCREEN = 11
+ SRC = 12
+ SRC_ATOP = 13
+ SRC_IN = 14
+ SRC_OUT = 15
+ SRC_OVER = 16
+ XOR = 17
+
+
+def porter_duff_composite(src_image: torch.Tensor, src_alpha: torch.Tensor, dst_image: torch.Tensor, dst_alpha: torch.Tensor, mode: PorterDuffMode):
+ if mode == PorterDuffMode.ADD:
+ out_alpha = torch.clamp(src_alpha + dst_alpha, 0, 1)
+ out_image = torch.clamp(src_image + dst_image, 0, 1)
+ elif mode == PorterDuffMode.CLEAR:
+ out_alpha = torch.zeros_like(dst_alpha)
+ out_image = torch.zeros_like(dst_image)
+ elif mode == PorterDuffMode.DARKEN:
+ out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
+ out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image + torch.min(src_image, dst_image)
+ elif mode == PorterDuffMode.DST:
+ out_alpha = dst_alpha
+ out_image = dst_image
+ elif mode == PorterDuffMode.DST_ATOP:
+ out_alpha = src_alpha
+ out_image = src_alpha * dst_image + (1 - dst_alpha) * src_image
+ elif mode == PorterDuffMode.DST_IN:
+ out_alpha = src_alpha * dst_alpha
+ out_image = dst_image * src_alpha
+ elif mode == PorterDuffMode.DST_OUT:
+ out_alpha = (1 - src_alpha) * dst_alpha
+ out_image = (1 - src_alpha) * dst_image
+ elif mode == PorterDuffMode.DST_OVER:
+ out_alpha = dst_alpha + (1 - dst_alpha) * src_alpha
+ out_image = dst_image + (1 - dst_alpha) * src_image
+ elif mode == PorterDuffMode.LIGHTEN:
+ out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
+ out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image + torch.max(src_image, dst_image)
+ elif mode == PorterDuffMode.MULTIPLY:
+ out_alpha = src_alpha * dst_alpha
+ out_image = src_image * dst_image
+ elif mode == PorterDuffMode.OVERLAY:
+ out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
+ out_image = torch.where(2 * dst_image < dst_alpha, 2 * src_image * dst_image,
+ src_alpha * dst_alpha - 2 * (dst_alpha - src_image) * (src_alpha - dst_image))
+ elif mode == PorterDuffMode.SCREEN:
+ out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
+ out_image = src_image + dst_image - src_image * dst_image
+ elif mode == PorterDuffMode.SRC:
+ out_alpha = src_alpha
+ out_image = src_image
+ elif mode == PorterDuffMode.SRC_ATOP:
+ out_alpha = dst_alpha
+ out_image = dst_alpha * src_image + (1 - src_alpha) * dst_image
+ elif mode == PorterDuffMode.SRC_IN:
+ out_alpha = src_alpha * dst_alpha
+ out_image = src_image * dst_alpha
+ elif mode == PorterDuffMode.SRC_OUT:
+ out_alpha = (1 - dst_alpha) * src_alpha
+ out_image = (1 - dst_alpha) * src_image
+ elif mode == PorterDuffMode.SRC_OVER:
+ out_alpha = src_alpha + (1 - src_alpha) * dst_alpha
+ out_image = src_image + (1 - src_alpha) * dst_image
+ elif mode == PorterDuffMode.XOR:
+ out_alpha = (1 - dst_alpha) * src_alpha + (1 - src_alpha) * dst_alpha
+ out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image
+ else:
+ out_alpha = None
+ out_image = None
+ return out_image, out_alpha
+
+
+class PorterDuffImageComposite:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "source": ("IMAGE",),
+ "source_alpha": ("MASK",),
+ "destination": ("IMAGE",),
+ "destination_alpha": ("MASK",),
+ "mode": ([mode.name for mode in PorterDuffMode], {"default": PorterDuffMode.DST.name}),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE", "MASK")
+ FUNCTION = "composite"
+ CATEGORY = "mask/compositing"
+
+ def composite(self, source: torch.Tensor, source_alpha: torch.Tensor, destination: torch.Tensor, destination_alpha: torch.Tensor, mode):
+ batch_size = min(len(source), len(source_alpha), len(destination), len(destination_alpha))
+ out_images = []
+ out_alphas = []
+
+ for i in range(batch_size):
+ src_image = source[i]
+ dst_image = destination[i]
+
+ assert src_image.shape[2] == dst_image.shape[2] # inputs need to have same number of channels
+
+ src_alpha = source_alpha[i].unsqueeze(2)
+ dst_alpha = destination_alpha[i].unsqueeze(2)
+
+ if dst_alpha.shape[:2] != dst_image.shape[:2]:
+ upscale_input = dst_alpha.unsqueeze(0).permute(0, 3, 1, 2)
+ upscale_output = comfy.utils.common_upscale(upscale_input, dst_image.shape[1], dst_image.shape[0], upscale_method='bicubic', crop='center')
+ dst_alpha = upscale_output.permute(0, 2, 3, 1).squeeze(0)
+ if src_image.shape != dst_image.shape:
+ upscale_input = src_image.unsqueeze(0).permute(0, 3, 1, 2)
+ upscale_output = comfy.utils.common_upscale(upscale_input, dst_image.shape[1], dst_image.shape[0], upscale_method='bicubic', crop='center')
+ src_image = upscale_output.permute(0, 2, 3, 1).squeeze(0)
+ if src_alpha.shape != dst_alpha.shape:
+ upscale_input = src_alpha.unsqueeze(0).permute(0, 3, 1, 2)
+ upscale_output = comfy.utils.common_upscale(upscale_input, dst_alpha.shape[1], dst_alpha.shape[0], upscale_method='bicubic', crop='center')
+ src_alpha = upscale_output.permute(0, 2, 3, 1).squeeze(0)
+
+ out_image, out_alpha = porter_duff_composite(src_image, src_alpha, dst_image, dst_alpha, PorterDuffMode[mode])
+
+ out_images.append(out_image)
+ out_alphas.append(out_alpha.squeeze(2))
+
+ result = (torch.stack(out_images), torch.stack(out_alphas))
+ return result
+
+
+class SplitImageWithAlpha:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "image": ("IMAGE",),
+ }
+ }
+
+ CATEGORY = "mask/compositing"
+ RETURN_TYPES = ("IMAGE", "MASK")
+ FUNCTION = "split_image_with_alpha"
+
+ def split_image_with_alpha(self, image: torch.Tensor):
+ out_images = [i[:,:,:3] for i in image]
+ out_alphas = [i[:,:,3] if i.shape[2] > 3 else torch.ones_like(i[:,:,0]) for i in image]
+ result = (torch.stack(out_images), 1.0 - torch.stack(out_alphas))
+ return result
+
+
+class JoinImageWithAlpha:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "image": ("IMAGE",),
+ "alpha": ("MASK",),
+ }
+ }
+
+ CATEGORY = "mask/compositing"
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "join_image_with_alpha"
+
+ def join_image_with_alpha(self, image: torch.Tensor, alpha: torch.Tensor):
+ batch_size = min(len(image), len(alpha))
+ out_images = []
+
+ alpha = 1.0 - resize_mask(alpha, image.shape[1:])
+ for i in range(batch_size):
+ out_images.append(torch.cat((image[i][:,:,:3], alpha[i].unsqueeze(2)), dim=2))
+
+ result = (torch.stack(out_images),)
+ return result
+
+
+NODE_CLASS_MAPPINGS = {
+ "PorterDuffImageComposite": PorterDuffImageComposite,
+ "SplitImageWithAlpha": SplitImageWithAlpha,
+ "JoinImageWithAlpha": JoinImageWithAlpha,
+}
+
+
+NODE_DISPLAY_NAME_MAPPINGS = {
+ "PorterDuffImageComposite": "Porter-Duff Image Composite",
+ "SplitImageWithAlpha": "Split Image with Alpha",
+ "JoinImageWithAlpha": "Join Image with Alpha",
+}
diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py
new file mode 100644
index 00000000000..008d0b8d6be
--- /dev/null
+++ b/comfy_extras/nodes_custom_sampler.py
@@ -0,0 +1,285 @@
+import comfy.samplers
+import comfy.sample
+from comfy.k_diffusion import sampling as k_diffusion_sampling
+import latent_preview
+import torch
+import comfy.utils
+
+
+class BasicScheduler:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"model": ("MODEL",),
+ "scheduler": (comfy.samplers.SCHEDULER_NAMES, ),
+ "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS",)
+ CATEGORY = "sampling/custom_sampling/schedulers"
+
+ FUNCTION = "get_sigmas"
+
+ def get_sigmas(self, model, scheduler, steps):
+ sigmas = comfy.samplers.calculate_sigmas_scheduler(model.model, scheduler, steps).cpu()
+ return (sigmas, )
+
+
+class KarrasScheduler:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
+ "sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
+ "sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
+ "rho": ("FLOAT", {"default": 7.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS",)
+ CATEGORY = "sampling/custom_sampling/schedulers"
+
+ FUNCTION = "get_sigmas"
+
+ def get_sigmas(self, steps, sigma_max, sigma_min, rho):
+ sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho)
+ return (sigmas, )
+
+class ExponentialScheduler:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
+ "sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
+ "sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS",)
+ CATEGORY = "sampling/custom_sampling/schedulers"
+
+ FUNCTION = "get_sigmas"
+
+ def get_sigmas(self, steps, sigma_max, sigma_min):
+ sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=sigma_min, sigma_max=sigma_max)
+ return (sigmas, )
+
+class PolyexponentialScheduler:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
+ "sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
+ "sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
+ "rho": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS",)
+ CATEGORY = "sampling/custom_sampling/schedulers"
+
+ FUNCTION = "get_sigmas"
+
+ def get_sigmas(self, steps, sigma_max, sigma_min, rho):
+ sigmas = k_diffusion_sampling.get_sigmas_polyexponential(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho)
+ return (sigmas, )
+
+class SDTurboScheduler:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"model": ("MODEL",),
+ "steps": ("INT", {"default": 1, "min": 1, "max": 10}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS",)
+ CATEGORY = "sampling/custom_sampling/schedulers"
+
+ FUNCTION = "get_sigmas"
+
+ def get_sigmas(self, model, steps):
+ timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[:steps]
+ sigmas = model.model.model_sampling.sigma(timesteps)
+ sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])
+ return (sigmas, )
+
+class VPScheduler:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
+ "beta_d": ("FLOAT", {"default": 19.9, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}), #TODO: fix default values
+ "beta_min": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}),
+ "eps_s": ("FLOAT", {"default": 0.001, "min": 0.0, "max": 1.0, "step":0.0001, "round": False}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS",)
+ CATEGORY = "sampling/custom_sampling/schedulers"
+
+ FUNCTION = "get_sigmas"
+
+ def get_sigmas(self, steps, beta_d, beta_min, eps_s):
+ sigmas = k_diffusion_sampling.get_sigmas_vp(n=steps, beta_d=beta_d, beta_min=beta_min, eps_s=eps_s)
+ return (sigmas, )
+
+class SplitSigmas:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"sigmas": ("SIGMAS", ),
+ "step": ("INT", {"default": 0, "min": 0, "max": 10000}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS","SIGMAS")
+ CATEGORY = "sampling/custom_sampling/sigmas"
+
+ FUNCTION = "get_sigmas"
+
+ def get_sigmas(self, sigmas, step):
+ sigmas1 = sigmas[:step + 1]
+ sigmas2 = sigmas[step:]
+ return (sigmas1, sigmas2)
+
+class FlipSigmas:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"sigmas": ("SIGMAS", ),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS",)
+ CATEGORY = "sampling/custom_sampling/sigmas"
+
+ FUNCTION = "get_sigmas"
+
+ def get_sigmas(self, sigmas):
+ sigmas = sigmas.flip(0)
+ if sigmas[0] == 0:
+ sigmas[0] = 0.0001
+ return (sigmas,)
+
+class KSamplerSelect:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"sampler_name": (comfy.samplers.SAMPLER_NAMES, ),
+ }
+ }
+ RETURN_TYPES = ("SAMPLER",)
+ CATEGORY = "sampling/custom_sampling/samplers"
+
+ FUNCTION = "get_sampler"
+
+ def get_sampler(self, sampler_name):
+ sampler = comfy.samplers.sampler_object(sampler_name)
+ return (sampler, )
+
+class SamplerDPMPP_2M_SDE:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"solver_type": (['midpoint', 'heun'], ),
+ "eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
+ "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
+ "noise_device": (['gpu', 'cpu'], ),
+ }
+ }
+ RETURN_TYPES = ("SAMPLER",)
+ CATEGORY = "sampling/custom_sampling/samplers"
+
+ FUNCTION = "get_sampler"
+
+ def get_sampler(self, solver_type, eta, s_noise, noise_device):
+ if noise_device == 'cpu':
+ sampler_name = "dpmpp_2m_sde"
+ else:
+ sampler_name = "dpmpp_2m_sde_gpu"
+ sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type})
+ return (sampler, )
+
+
+class SamplerDPMPP_SDE:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
+ "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
+ "r": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
+ "noise_device": (['gpu', 'cpu'], ),
+ }
+ }
+ RETURN_TYPES = ("SAMPLER",)
+ CATEGORY = "sampling/custom_sampling/samplers"
+
+ FUNCTION = "get_sampler"
+
+ def get_sampler(self, eta, s_noise, r, noise_device):
+ if noise_device == 'cpu':
+ sampler_name = "dpmpp_sde"
+ else:
+ sampler_name = "dpmpp_sde_gpu"
+ sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "r": r})
+ return (sampler, )
+
+class SamplerCustom:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"model": ("MODEL",),
+ "add_noise": ("BOOLEAN", {"default": True}),
+ "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
+ "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
+ "positive": ("CONDITIONING", ),
+ "negative": ("CONDITIONING", ),
+ "sampler": ("SAMPLER", ),
+ "sigmas": ("SIGMAS", ),
+ "latent_image": ("LATENT", ),
+ }
+ }
+
+ RETURN_TYPES = ("LATENT","LATENT")
+ RETURN_NAMES = ("output", "denoised_output")
+
+ FUNCTION = "sample"
+
+ CATEGORY = "sampling/custom_sampling"
+
+ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, sigmas, latent_image):
+ latent = latent_image
+ latent_image = latent["samples"]
+ if not add_noise:
+ noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
+ else:
+ batch_inds = latent["batch_index"] if "batch_index" in latent else None
+ noise = comfy.sample.prepare_noise(latent_image, noise_seed, batch_inds)
+
+ noise_mask = None
+ if "noise_mask" in latent:
+ noise_mask = latent["noise_mask"]
+
+ x0_output = {}
+ callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output)
+
+ disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
+ samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=noise_seed)
+
+ out = latent.copy()
+ out["samples"] = samples
+ if "x0" in x0_output:
+ out_denoised = latent.copy()
+ out_denoised["samples"] = model.model.process_latent_out(x0_output["x0"].cpu())
+ else:
+ out_denoised = out
+ return (out, out_denoised)
+
+NODE_CLASS_MAPPINGS = {
+ "SamplerCustom": SamplerCustom,
+ "BasicScheduler": BasicScheduler,
+ "KarrasScheduler": KarrasScheduler,
+ "ExponentialScheduler": ExponentialScheduler,
+ "PolyexponentialScheduler": PolyexponentialScheduler,
+ "VPScheduler": VPScheduler,
+ "SDTurboScheduler": SDTurboScheduler,
+ "KSamplerSelect": KSamplerSelect,
+ "SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE,
+ "SamplerDPMPP_SDE": SamplerDPMPP_SDE,
+ "SplitSigmas": SplitSigmas,
+ "FlipSigmas": FlipSigmas,
+}
diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py
index 07a88bd9614..7512b841d74 100644
--- a/comfy_extras/nodes_freelunch.py
+++ b/comfy_extras/nodes_freelunch.py
@@ -61,7 +61,53 @@ def output_block_patch(h, hsp, transformer_options):
m.set_model_output_block_patch(output_block_patch)
return (m, )
+class FreeU_V2:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "b1": ("FLOAT", {"default": 1.3, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "b2": ("FLOAT", {"default": 1.4, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "s1": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "s2": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}),
+ }}
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "patch"
+
+ CATEGORY = "_for_testing"
+
+ def patch(self, model, b1, b2, s1, s2):
+ model_channels = model.model.model_config.unet_config["model_channels"]
+ scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)}
+ on_cpu_devices = {}
+
+ def output_block_patch(h, hsp, transformer_options):
+ scale = scale_dict.get(h.shape[1], None)
+ if scale is not None:
+ hidden_mean = h.mean(1).unsqueeze(1)
+ B = hidden_mean.shape[0]
+ hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True)
+ hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True)
+ hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3)
+
+ h[:,:h.shape[1] // 2] = h[:,:h.shape[1] // 2] * ((scale[0] - 1 ) * hidden_mean + 1)
+
+ if hsp.device not in on_cpu_devices:
+ try:
+ hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
+ except:
+ print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.")
+ on_cpu_devices[hsp.device] = True
+ hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
+ else:
+ hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
+
+ return h, hsp
+
+ m = model.clone()
+ m.set_model_output_block_patch(output_block_patch)
+ return (m, )
NODE_CLASS_MAPPINGS = {
"FreeU": FreeU,
+ "FreeU_V2": FreeU_V2,
}
diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py
index d16c49aeb24..f692945a86b 100644
--- a/comfy_extras/nodes_hypernetwork.py
+++ b/comfy_extras/nodes_hypernetwork.py
@@ -19,6 +19,7 @@ def load_hypernetwork_patch(path, strength):
"tanh": torch.nn.Tanh,
"sigmoid": torch.nn.Sigmoid,
"softsign": torch.nn.Softsign,
+ "mish": torch.nn.Mish,
}
if activation_func not in valid_activation:
@@ -42,7 +43,8 @@ def load_hypernetwork_patch(path, strength):
linears = list(map(lambda a: a[:-len(".weight")], linears))
layers = []
- for i in range(len(linears)):
+ i = 0
+ while i < len(linears):
lin_name = linears[i]
last_layer = (i == (len(linears) - 1))
penultimate_layer = (i == (len(linears) - 2))
@@ -56,10 +58,17 @@ def load_hypernetwork_patch(path, strength):
if (not last_layer) or (activate_output):
layers.append(valid_activation[activation_func]())
if is_layer_norm:
- layers.append(torch.nn.LayerNorm(lin_weight.shape[0]))
+ i += 1
+ ln_name = linears[i]
+ ln_weight = attn_weights['{}.weight'.format(ln_name)]
+ ln_bias = attn_weights['{}.bias'.format(ln_name)]
+ ln = torch.nn.LayerNorm(ln_weight.shape[0])
+ ln.load_state_dict({"weight": ln_weight, "bias": ln_bias})
+ layers.append(ln)
if use_dropout:
if (not last_layer) and (not penultimate_layer or last_layer_dropout):
layers.append(torch.nn.Dropout(p=0.3))
+ i += 1
output.append(torch.nn.Sequential(*layers))
out[dim] = torch.nn.ModuleList(output)
diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py
new file mode 100644
index 00000000000..0d7d4c95483
--- /dev/null
+++ b/comfy_extras/nodes_hypertile.py
@@ -0,0 +1,83 @@
+#Taken from: https://github.com/tfernd/HyperTile/
+
+import math
+from einops import rearrange
+import random
+
+def random_divisor(value: int, min_value: int, /, max_options: int = 1, counter = 0) -> int:
+ min_value = min(min_value, value)
+
+ # All big divisors of value (inclusive)
+ divisors = [i for i in range(min_value, value + 1) if value % i == 0]
+
+ ns = [value // i for i in divisors[:max_options]] # has at least 1 element
+
+ random.seed(counter)
+ idx = random.randint(0, len(ns) - 1)
+
+ return ns[idx]
+
+class HyperTile:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "tile_size": ("INT", {"default": 256, "min": 1, "max": 2048}),
+ "swap_size": ("INT", {"default": 2, "min": 1, "max": 128}),
+ "max_depth": ("INT", {"default": 0, "min": 0, "max": 10}),
+ "scale_depth": ("BOOLEAN", {"default": False}),
+ }}
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "patch"
+
+ CATEGORY = "_for_testing"
+
+ def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
+ model_channels = model.model.model_config.unet_config["model_channels"]
+
+ apply_to = set()
+ temp = model_channels
+ for x in range(max_depth + 1):
+ apply_to.add(temp)
+ temp *= 2
+
+ latent_tile_size = max(32, tile_size) // 8
+ self.temp = None
+ self.counter = 1
+
+ def hypertile_in(q, k, v, extra_options):
+ if q.shape[-1] in apply_to:
+ shape = extra_options["original_shape"]
+ aspect_ratio = shape[-1] / shape[-2]
+
+ hw = q.size(1)
+ h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
+
+ factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1
+ nh = random_divisor(h, latent_tile_size * factor, swap_size, self.counter)
+ self.counter += 1
+ nw = random_divisor(w, latent_tile_size * factor, swap_size, self.counter)
+ self.counter += 1
+
+ if nh * nw > 1:
+ q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
+ self.temp = (nh, nw, h, w)
+ return q, k, v
+
+ return q, k, v
+ def hypertile_out(out, extra_options):
+ if self.temp is not None:
+ nh, nw, h, w = self.temp
+ self.temp = None
+ out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw)
+ out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw)
+ return out
+
+
+ m = model.clone()
+ m.set_model_attn1_patch(hypertile_in)
+ m.set_model_attn1_output_patch(hypertile_out)
+ return (m, )
+
+NODE_CLASS_MAPPINGS = {
+ "HyperTile": HyperTile,
+}
diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py
new file mode 100644
index 00000000000..5ad2235a523
--- /dev/null
+++ b/comfy_extras/nodes_images.py
@@ -0,0 +1,175 @@
+import nodes
+import folder_paths
+from comfy.cli_args import args
+
+from PIL import Image
+from PIL.PngImagePlugin import PngInfo
+
+import numpy as np
+import json
+import os
+
+MAX_RESOLUTION = nodes.MAX_RESOLUTION
+
+class ImageCrop:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "image": ("IMAGE",),
+ "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
+ "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
+ }}
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "crop"
+
+ CATEGORY = "image/transform"
+
+ def crop(self, image, width, height, x, y):
+ x = min(x, image.shape[2] - 1)
+ y = min(y, image.shape[1] - 1)
+ to_x = width + x
+ to_y = height + y
+ img = image[:,y:to_y, x:to_x, :]
+ return (img,)
+
+class RepeatImageBatch:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "image": ("IMAGE",),
+ "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
+ }}
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "repeat"
+
+ CATEGORY = "image/batch"
+
+ def repeat(self, image, amount):
+ s = image.repeat((amount, 1,1,1))
+ return (s,)
+
+class SaveAnimatedWEBP:
+ def __init__(self):
+ self.output_dir = folder_paths.get_output_directory()
+ self.type = "output"
+ self.prefix_append = ""
+
+ methods = {"default": 4, "fastest": 0, "slowest": 6}
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"images": ("IMAGE", ),
+ "filename_prefix": ("STRING", {"default": "ComfyUI"}),
+ "fps": ("FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01}),
+ "lossless": ("BOOLEAN", {"default": True}),
+ "quality": ("INT", {"default": 80, "min": 0, "max": 100}),
+ "method": (list(s.methods.keys()),),
+ # "num_frames": ("INT", {"default": 0, "min": 0, "max": 8192}),
+ },
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
+ }
+
+ RETURN_TYPES = ()
+ FUNCTION = "save_images"
+
+ OUTPUT_NODE = True
+
+ CATEGORY = "_for_testing"
+
+ def save_images(self, images, fps, filename_prefix, lossless, quality, method, num_frames=0, prompt=None, extra_pnginfo=None):
+ method = self.methods.get(method)
+ filename_prefix += self.prefix_append
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
+ results = list()
+ pil_images = []
+ for image in images:
+ i = 255. * image.cpu().numpy()
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
+ pil_images.append(img)
+
+ metadata = pil_images[0].getexif()
+ if not args.disable_metadata:
+ if prompt is not None:
+ metadata[0x0110] = "prompt:{}".format(json.dumps(prompt))
+ if extra_pnginfo is not None:
+ inital_exif = 0x010f
+ for x in extra_pnginfo:
+ metadata[inital_exif] = "{}:{}".format(x, json.dumps(extra_pnginfo[x]))
+ inital_exif -= 1
+
+ if num_frames == 0:
+ num_frames = len(pil_images)
+
+ c = len(pil_images)
+ for i in range(0, c, num_frames):
+ file = f"{filename}_{counter:05}_.webp"
+ pil_images[i].save(os.path.join(full_output_folder, file), save_all=True, duration=int(1000.0/fps), append_images=pil_images[i + 1:i + num_frames], exif=metadata, lossless=lossless, quality=quality, method=method)
+ results.append({
+ "filename": file,
+ "subfolder": subfolder,
+ "type": self.type
+ })
+ counter += 1
+
+ animated = num_frames != 1
+ return { "ui": { "images": results, "animated": (animated,) } }
+
+class SaveAnimatedPNG:
+ def __init__(self):
+ self.output_dir = folder_paths.get_output_directory()
+ self.type = "output"
+ self.prefix_append = ""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"images": ("IMAGE", ),
+ "filename_prefix": ("STRING", {"default": "ComfyUI"}),
+ "fps": ("FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01}),
+ "compress_level": ("INT", {"default": 4, "min": 0, "max": 9})
+ },
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
+ }
+
+ RETURN_TYPES = ()
+ FUNCTION = "save_images"
+
+ OUTPUT_NODE = True
+
+ CATEGORY = "_for_testing"
+
+ def save_images(self, images, fps, compress_level, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
+ filename_prefix += self.prefix_append
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
+ results = list()
+ pil_images = []
+ for image in images:
+ i = 255. * image.cpu().numpy()
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
+ pil_images.append(img)
+
+ metadata = None
+ if not args.disable_metadata:
+ metadata = PngInfo()
+ if prompt is not None:
+ metadata.add(b"comf", "prompt".encode("latin-1", "strict") + b"\0" + json.dumps(prompt).encode("latin-1", "strict"), after_idat=True)
+ if extra_pnginfo is not None:
+ for x in extra_pnginfo:
+ metadata.add(b"comf", x.encode("latin-1", "strict") + b"\0" + json.dumps(extra_pnginfo[x]).encode("latin-1", "strict"), after_idat=True)
+
+ file = f"{filename}_{counter:05}_.png"
+ pil_images[0].save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level, save_all=True, duration=int(1000.0/fps), append_images=pil_images[1:])
+ results.append({
+ "filename": file,
+ "subfolder": subfolder,
+ "type": self.type
+ })
+
+ return { "ui": { "images": results, "animated": (True,)} }
+
+NODE_CLASS_MAPPINGS = {
+ "ImageCrop": ImageCrop,
+ "RepeatImageBatch": RepeatImageBatch,
+ "SaveAnimatedWEBP": SaveAnimatedWEBP,
+ "SaveAnimatedPNG": SaveAnimatedPNG,
+}
diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py
index 001de39fceb..cedf39d6346 100644
--- a/comfy_extras/nodes_latent.py
+++ b/comfy_extras/nodes_latent.py
@@ -1,4 +1,5 @@
import comfy.utils
+import torch
def reshape_latent_to(target_shape, latent):
if latent.shape[1:] != target_shape[1:]:
@@ -67,8 +68,43 @@ def op(self, samples, multiplier):
samples_out["samples"] = s1 * multiplier
return (samples_out,)
+class LatentInterpolate:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "samples1": ("LATENT",),
+ "samples2": ("LATENT",),
+ "ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
+ }}
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "op"
+
+ CATEGORY = "latent/advanced"
+
+ def op(self, samples1, samples2, ratio):
+ samples_out = samples1.copy()
+
+ s1 = samples1["samples"]
+ s2 = samples2["samples"]
+
+ s2 = reshape_latent_to(s1.shape, s2)
+
+ m1 = torch.linalg.vector_norm(s1, dim=(1))
+ m2 = torch.linalg.vector_norm(s2, dim=(1))
+
+ s1 = torch.nan_to_num(s1 / m1)
+ s2 = torch.nan_to_num(s2 / m2)
+
+ t = (s1 * ratio + s2 * (1.0 - ratio))
+ mt = torch.linalg.vector_norm(t, dim=(1))
+ st = torch.nan_to_num(t / mt)
+
+ samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio))
+ return (samples_out,)
+
NODE_CLASS_MAPPINGS = {
"LatentAdd": LatentAdd,
"LatentSubtract": LatentSubtract,
"LatentMultiply": LatentMultiply,
+ "LatentInterpolate": LatentInterpolate,
}
diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py
index af7cb07bfb3..d8c65c2b6b9 100644
--- a/comfy_extras/nodes_mask.py
+++ b/comfy_extras/nodes_mask.py
@@ -114,7 +114,7 @@ def INPUT_TYPES(s):
return {
"required": {
"image": ("IMAGE",),
- "channel": (["red", "green", "blue"],),
+ "channel": (["red", "green", "blue", "alpha"],),
}
}
@@ -124,7 +124,7 @@ def INPUT_TYPES(s):
FUNCTION = "image_to_mask"
def image_to_mask(self, image, channel):
- channels = ["red", "green", "blue"]
+ channels = ["red", "green", "blue", "alpha"]
mask = image[:, :, :, channels.index(channel)]
return (mask,)
@@ -240,8 +240,8 @@ def combine(self, destination, source, x, y, operation):
right, bottom = (min(left + source.shape[-1], destination.shape[-1]), min(top + source.shape[-2], destination.shape[-2]))
visible_width, visible_height = (right - left, bottom - top,)
- source_portion = source[:visible_height, :visible_width]
- destination_portion = destination[top:bottom, left:right]
+ source_portion = source[:, :visible_height, :visible_width]
+ destination_portion = destination[:, top:bottom, left:right]
if operation == "multiply":
output[:, top:bottom, left:right] = destination_portion * source_portion
@@ -282,10 +282,10 @@ def INPUT_TYPES(cls):
def feather(self, mask, left, top, right, bottom):
output = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).clone()
- left = min(left, output.shape[1])
- right = min(right, output.shape[1])
- top = min(top, output.shape[0])
- bottom = min(bottom, output.shape[0])
+ left = min(left, output.shape[-1])
+ right = min(right, output.shape[-1])
+ top = min(top, output.shape[-2])
+ bottom = min(bottom, output.shape[-2])
for x in range(left):
feather_rate = (x + 1.0) / left
@@ -331,15 +331,14 @@ def expand_mask(self, mask, expand, tapered_corners):
out = []
for m in mask:
output = m.numpy()
- while expand < 0:
- output = scipy.ndimage.grey_erosion(output, footprint=kernel)
- expand += 1
- while expand > 0:
- output = scipy.ndimage.grey_dilation(output, footprint=kernel)
- expand -= 1
+ for _ in range(abs(expand)):
+ if expand < 0:
+ output = scipy.ndimage.grey_erosion(output, footprint=kernel)
+ else:
+ output = scipy.ndimage.grey_dilation(output, footprint=kernel)
output = torch.from_numpy(output)
out.append(output)
- return (torch.cat(out, dim=0),)
+ return (torch.stack(out, dim=0),)
diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py
new file mode 100644
index 00000000000..efcdf1932e4
--- /dev/null
+++ b/comfy_extras/nodes_model_advanced.py
@@ -0,0 +1,205 @@
+import folder_paths
+import comfy.sd
+import comfy.model_sampling
+import torch
+
+class LCM(comfy.model_sampling.EPS):
+ def calculate_denoised(self, sigma, model_output, model_input):
+ timestep = self.timestep(sigma).view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
+ sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
+ x0 = model_input - model_output * sigma
+
+ sigma_data = 0.5
+ scaled_timestep = timestep * 10.0 #timestep_scaling
+
+ c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2)
+ c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5
+
+ return c_out * x0 + c_skip * model_input
+
+class ModelSamplingDiscreteDistilled(torch.nn.Module):
+ original_timesteps = 50
+
+ def __init__(self):
+ super().__init__()
+ self.sigma_data = 1.0
+ timesteps = 1000
+ beta_start = 0.00085
+ beta_end = 0.012
+
+ betas = torch.linspace(beta_start**0.5, beta_end**0.5, timesteps, dtype=torch.float32) ** 2
+ alphas = 1.0 - betas
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
+
+ self.skip_steps = timesteps // self.original_timesteps
+
+
+ alphas_cumprod_valid = torch.zeros((self.original_timesteps), dtype=torch.float32)
+ for x in range(self.original_timesteps):
+ alphas_cumprod_valid[self.original_timesteps - 1 - x] = alphas_cumprod[timesteps - 1 - x * self.skip_steps]
+
+ sigmas = ((1 - alphas_cumprod_valid) / alphas_cumprod_valid) ** 0.5
+ self.set_sigmas(sigmas)
+
+ def set_sigmas(self, sigmas):
+ self.register_buffer('sigmas', sigmas)
+ self.register_buffer('log_sigmas', sigmas.log())
+
+ @property
+ def sigma_min(self):
+ return self.sigmas[0]
+
+ @property
+ def sigma_max(self):
+ return self.sigmas[-1]
+
+ def timestep(self, sigma):
+ log_sigma = sigma.log()
+ dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
+ return (dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1)).to(sigma.device)
+
+ def sigma(self, timestep):
+ t = torch.clamp(((timestep.float().to(self.log_sigmas.device) - (self.skip_steps - 1)) / self.skip_steps).float(), min=0, max=(len(self.sigmas) - 1))
+ low_idx = t.floor().long()
+ high_idx = t.ceil().long()
+ w = t.frac()
+ log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
+ return log_sigma.exp().to(timestep.device)
+
+ def percent_to_sigma(self, percent):
+ if percent <= 0.0:
+ return 999999999.9
+ if percent >= 1.0:
+ return 0.0
+ percent = 1.0 - percent
+ return self.sigma(torch.tensor(percent * 999.0)).item()
+
+
+def rescale_zero_terminal_snr_sigmas(sigmas):
+ alphas_cumprod = 1 / ((sigmas * sigmas) + 1)
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
+
+ # Store old values.
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
+
+ # Shift so the last timestep is zero.
+ alphas_bar_sqrt -= (alphas_bar_sqrt_T)
+
+ # Scale so the first timestep is back to the old value.
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
+
+ # Convert alphas_bar_sqrt to betas
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
+ alphas_bar[-1] = 4.8973451890853435e-08
+ return ((1 - alphas_bar) / alphas_bar) ** 0.5
+
+class ModelSamplingDiscrete:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "sampling": (["eps", "v_prediction", "lcm"],),
+ "zsnr": ("BOOLEAN", {"default": False}),
+ }}
+
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "patch"
+
+ CATEGORY = "advanced/model"
+
+ def patch(self, model, sampling, zsnr):
+ m = model.clone()
+
+ sampling_base = comfy.model_sampling.ModelSamplingDiscrete
+ if sampling == "eps":
+ sampling_type = comfy.model_sampling.EPS
+ elif sampling == "v_prediction":
+ sampling_type = comfy.model_sampling.V_PREDICTION
+ elif sampling == "lcm":
+ sampling_type = LCM
+ sampling_base = ModelSamplingDiscreteDistilled
+
+ class ModelSamplingAdvanced(sampling_base, sampling_type):
+ pass
+
+ model_sampling = ModelSamplingAdvanced()
+ if zsnr:
+ model_sampling.set_sigmas(rescale_zero_terminal_snr_sigmas(model_sampling.sigmas))
+
+ m.add_object_patch("model_sampling", model_sampling)
+ return (m, )
+
+class ModelSamplingContinuousEDM:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "sampling": (["v_prediction", "eps"],),
+ "sigma_max": ("FLOAT", {"default": 120.0, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
+ "sigma_min": ("FLOAT", {"default": 0.002, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
+ }}
+
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "patch"
+
+ CATEGORY = "advanced/model"
+
+ def patch(self, model, sampling, sigma_max, sigma_min):
+ m = model.clone()
+
+ if sampling == "eps":
+ sampling_type = comfy.model_sampling.EPS
+ elif sampling == "v_prediction":
+ sampling_type = comfy.model_sampling.V_PREDICTION
+
+ class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingContinuousEDM, sampling_type):
+ pass
+
+ model_sampling = ModelSamplingAdvanced()
+ model_sampling.set_sigma_range(sigma_min, sigma_max)
+ m.add_object_patch("model_sampling", model_sampling)
+ return (m, )
+
+class RescaleCFG:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "multiplier": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}),
+ }}
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "patch"
+
+ CATEGORY = "advanced/model"
+
+ def patch(self, model, multiplier):
+ def rescale_cfg(args):
+ cond = args["cond"]
+ uncond = args["uncond"]
+ cond_scale = args["cond_scale"]
+ sigma = args["sigma"]
+ sigma = sigma.view(sigma.shape[:1] + (1,) * (cond.ndim - 1))
+ x_orig = args["input"]
+
+ #rescale cfg has to be done on v-pred model output
+ x = x_orig / (sigma * sigma + 1.0)
+ cond = ((x - (x_orig - cond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma)
+ uncond = ((x - (x_orig - uncond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma)
+
+ #rescalecfg
+ x_cfg = uncond + cond_scale * (cond - uncond)
+ ro_pos = torch.std(cond, dim=(1,2,3), keepdim=True)
+ ro_cfg = torch.std(x_cfg, dim=(1,2,3), keepdim=True)
+
+ x_rescaled = x_cfg * (ro_pos / ro_cfg)
+ x_final = multiplier * x_rescaled + (1.0 - multiplier) * x_cfg
+
+ return x_orig - (x - x_final * sigma / (sigma * sigma + 1.0) ** 0.5)
+
+ m = model.clone()
+ m.set_model_sampler_cfg_function(rescale_cfg)
+ return (m, )
+
+NODE_CLASS_MAPPINGS = {
+ "ModelSamplingDiscrete": ModelSamplingDiscrete,
+ "ModelSamplingContinuousEDM": ModelSamplingContinuousEDM,
+ "RescaleCFG": RescaleCFG,
+}
diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py
new file mode 100644
index 00000000000..48bcc689273
--- /dev/null
+++ b/comfy_extras/nodes_model_downscale.py
@@ -0,0 +1,53 @@
+import torch
+import comfy.utils
+
+class PatchModelAddDownscale:
+ upscale_methods = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "block_number": ("INT", {"default": 3, "min": 1, "max": 32, "step": 1}),
+ "downscale_factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}),
+ "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
+ "end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}),
+ "downscale_after_skip": ("BOOLEAN", {"default": True}),
+ "downscale_method": (s.upscale_methods,),
+ "upscale_method": (s.upscale_methods,),
+ }}
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "patch"
+
+ CATEGORY = "_for_testing"
+
+ def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method):
+ sigma_start = model.model.model_sampling.percent_to_sigma(start_percent)
+ sigma_end = model.model.model_sampling.percent_to_sigma(end_percent)
+
+ def input_block_patch(h, transformer_options):
+ if transformer_options["block"][1] == block_number:
+ sigma = transformer_options["sigmas"][0].item()
+ if sigma <= sigma_start and sigma >= sigma_end:
+ h = comfy.utils.common_upscale(h, round(h.shape[-1] * (1.0 / downscale_factor)), round(h.shape[-2] * (1.0 / downscale_factor)), downscale_method, "disabled")
+ return h
+
+ def output_block_patch(h, hsp, transformer_options):
+ if h.shape[2] != hsp.shape[2]:
+ h = comfy.utils.common_upscale(h, hsp.shape[-1], hsp.shape[-2], upscale_method, "disabled")
+ return h, hsp
+
+ m = model.clone()
+ if downscale_after_skip:
+ m.set_model_input_block_patch_after_skip(input_block_patch)
+ else:
+ m.set_model_input_block_patch(input_block_patch)
+ m.set_model_output_block_patch(output_block_patch)
+ return (m, )
+
+NODE_CLASS_MAPPINGS = {
+ "PatchModelAddDownscale": PatchModelAddDownscale,
+}
+
+NODE_DISPLAY_NAME_MAPPINGS = {
+ # Sampling
+ "PatchModelAddDownscale": "PatchModelAddDownscale (Kohya Deep Shrink)",
+}
diff --git a/comfy_extras/nodes_model_merging.py b/comfy_extras/nodes_model_merging.py
index 3d42d78067c..dad1dd6378d 100644
--- a/comfy_extras/nodes_model_merging.py
+++ b/comfy_extras/nodes_model_merging.py
@@ -1,6 +1,7 @@
import comfy.sd
import comfy.utils
import comfy.model_base
+import comfy.model_management
import folder_paths
import json
@@ -178,6 +179,95 @@ def save(self, model, clip, vae, filename_prefix, prompt=None, extra_pnginfo=Non
comfy.sd.save_checkpoint(output_checkpoint, model, clip, vae, metadata=metadata)
return {}
+class CLIPSave:
+ def __init__(self):
+ self.output_dir = folder_paths.get_output_directory()
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "clip": ("CLIP",),
+ "filename_prefix": ("STRING", {"default": "clip/ComfyUI"}),},
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},}
+ RETURN_TYPES = ()
+ FUNCTION = "save"
+ OUTPUT_NODE = True
+
+ CATEGORY = "advanced/model_merging"
+
+ def save(self, clip, filename_prefix, prompt=None, extra_pnginfo=None):
+ prompt_info = ""
+ if prompt is not None:
+ prompt_info = json.dumps(prompt)
+
+ metadata = {}
+ if not args.disable_metadata:
+ metadata["prompt"] = prompt_info
+ if extra_pnginfo is not None:
+ for x in extra_pnginfo:
+ metadata[x] = json.dumps(extra_pnginfo[x])
+
+ comfy.model_management.load_models_gpu([clip.load_model()])
+ clip_sd = clip.get_sd()
+
+ for prefix in ["clip_l.", "clip_g.", ""]:
+ k = list(filter(lambda a: a.startswith(prefix), clip_sd.keys()))
+ current_clip_sd = {}
+ for x in k:
+ current_clip_sd[x] = clip_sd.pop(x)
+ if len(current_clip_sd) == 0:
+ continue
+
+ p = prefix[:-1]
+ replace_prefix = {}
+ filename_prefix_ = filename_prefix
+ if len(p) > 0:
+ filename_prefix_ = "{}_{}".format(filename_prefix_, p)
+ replace_prefix[prefix] = ""
+ replace_prefix["transformer."] = ""
+
+ full_output_folder, filename, counter, subfolder, filename_prefix_ = folder_paths.get_save_image_path(filename_prefix_, self.output_dir)
+
+ output_checkpoint = f"{filename}_{counter:05}_.safetensors"
+ output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
+
+ current_clip_sd = comfy.utils.state_dict_prefix_replace(current_clip_sd, replace_prefix)
+
+ comfy.utils.save_torch_file(current_clip_sd, output_checkpoint, metadata=metadata)
+ return {}
+
+class VAESave:
+ def __init__(self):
+ self.output_dir = folder_paths.get_output_directory()
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "vae": ("VAE",),
+ "filename_prefix": ("STRING", {"default": "vae/ComfyUI_vae"}),},
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},}
+ RETURN_TYPES = ()
+ FUNCTION = "save"
+ OUTPUT_NODE = True
+
+ CATEGORY = "advanced/model_merging"
+
+ def save(self, vae, filename_prefix, prompt=None, extra_pnginfo=None):
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
+ prompt_info = ""
+ if prompt is not None:
+ prompt_info = json.dumps(prompt)
+
+ metadata = {}
+ if not args.disable_metadata:
+ metadata["prompt"] = prompt_info
+ if extra_pnginfo is not None:
+ for x in extra_pnginfo:
+ metadata[x] = json.dumps(extra_pnginfo[x])
+
+ output_checkpoint = f"{filename}_{counter:05}_.safetensors"
+ output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
+
+ comfy.utils.save_torch_file(vae.get_sd(), output_checkpoint, metadata=metadata)
+ return {}
NODE_CLASS_MAPPINGS = {
"ModelMergeSimple": ModelMergeSimple,
@@ -186,4 +276,6 @@ def save(self, model, clip, vae, filename_prefix, prompt=None, extra_pnginfo=Non
"ModelMergeAdd": ModelAdd,
"CheckpointSave": CheckpointSave,
"CLIPMergeSimple": CLIPMergeSimple,
+ "CLIPSave": CLIPSave,
+ "VAESave": VAESave,
}
diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py
index 3f651e59456..12704f545d6 100644
--- a/comfy_extras/nodes_post_processing.py
+++ b/comfy_extras/nodes_post_processing.py
@@ -23,7 +23,7 @@ def INPUT_TYPES(s):
"max": 1.0,
"step": 0.01
}),
- "blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light"],),
+ "blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light", "difference"],),
},
}
@@ -54,6 +54,8 @@ def blend_mode(self, img1, img2, mode):
return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2))
elif mode == "soft_light":
return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (self.g(img1) - img1))
+ elif mode == "difference":
+ return img1 - img2
else:
raise ValueError(f"Unsupported blend mode: {mode}")
@@ -126,7 +128,7 @@ def INPUT_TYPES(s):
"max": 256,
"step": 1
}),
- "dither": (["none", "floyd-steinberg"],),
+ "dither": (["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"],),
},
}
@@ -135,19 +137,47 @@ def INPUT_TYPES(s):
CATEGORY = "image/postprocessing"
- def quantize(self, image: torch.Tensor, colors: int = 256, dither: str = "FLOYDSTEINBERG"):
+ def bayer(im, pal_im, order):
+ def normalized_bayer_matrix(n):
+ if n == 0:
+ return np.zeros((1,1), "float32")
+ else:
+ q = 4 ** n
+ m = q * normalized_bayer_matrix(n - 1)
+ return np.bmat(((m-1.5, m+0.5), (m+1.5, m-0.5))) / q
+
+ num_colors = len(pal_im.getpalette()) // 3
+ spread = 2 * 256 / num_colors
+ bayer_n = int(math.log2(order))
+ bayer_matrix = torch.from_numpy(spread * normalized_bayer_matrix(bayer_n) + 0.5)
+
+ result = torch.from_numpy(np.array(im).astype(np.float32))
+ tw = math.ceil(result.shape[0] / bayer_matrix.shape[0])
+ th = math.ceil(result.shape[1] / bayer_matrix.shape[1])
+ tiled_matrix = bayer_matrix.tile(tw, th).unsqueeze(-1)
+ result.add_(tiled_matrix[:result.shape[0],:result.shape[1]]).clamp_(0, 255)
+ result = result.to(dtype=torch.uint8)
+
+ im = Image.fromarray(result.cpu().numpy())
+ im = im.quantize(palette=pal_im, dither=Image.Dither.NONE)
+ return im
+
+ def quantize(self, image: torch.Tensor, colors: int, dither: str):
batch_size, height, width, _ = image.shape
result = torch.zeros_like(image)
- dither_option = Image.Dither.FLOYDSTEINBERG if dither == "floyd-steinberg" else Image.Dither.NONE
-
for b in range(batch_size):
- tensor_image = image[b]
- img = (tensor_image * 255).to(torch.uint8).numpy()
- pil_image = Image.fromarray(img, mode='RGB')
+ im = Image.fromarray((image[b] * 255).to(torch.uint8).numpy(), mode='RGB')
+
+ pal_im = im.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836
- palette = pil_image.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836
- quantized_image = pil_image.quantize(colors=colors, palette=palette, dither=dither_option)
+ if dither == "none":
+ quantized_image = im.quantize(palette=pal_im, dither=Image.Dither.NONE)
+ elif dither == "floyd-steinberg":
+ quantized_image = im.quantize(palette=pal_im, dither=Image.Dither.FLOYDSTEINBERG)
+ elif dither.startswith("bayer"):
+ order = int(dither.split('-')[-1])
+ quantized_image = Quantize.bayer(im, pal_im, order)
quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255
result[b] = quantized_array
diff --git a/comfy_extras/nodes_rebatch.py b/comfy_extras/nodes_rebatch.py
index 0a9daf27276..88a4ebe29f6 100644
--- a/comfy_extras/nodes_rebatch.py
+++ b/comfy_extras/nodes_rebatch.py
@@ -4,7 +4,7 @@ class LatentRebatch:
@classmethod
def INPUT_TYPES(s):
return {"required": { "latents": ("LATENT",),
- "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
}}
RETURN_TYPES = ("LATENT",)
INPUT_IS_LIST = True
diff --git a/comfy_extras/nodes_video_model.py b/comfy_extras/nodes_video_model.py
new file mode 100644
index 00000000000..26a717a3836
--- /dev/null
+++ b/comfy_extras/nodes_video_model.py
@@ -0,0 +1,89 @@
+import nodes
+import torch
+import comfy.utils
+import comfy.sd
+import folder_paths
+
+
+class ImageOnlyCheckpointLoader:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
+ }}
+ RETURN_TYPES = ("MODEL", "CLIP_VISION", "VAE")
+ FUNCTION = "load_checkpoint"
+
+ CATEGORY = "loaders/video_models"
+
+ def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
+ ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
+ out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=False, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
+ return (out[0], out[3], out[2])
+
+
+class SVD_img2vid_Conditioning:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "clip_vision": ("CLIP_VISION",),
+ "init_image": ("IMAGE",),
+ "vae": ("VAE",),
+ "width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}),
+ "height": ("INT", {"default": 576, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}),
+ "video_frames": ("INT", {"default": 14, "min": 1, "max": 4096}),
+ "motion_bucket_id": ("INT", {"default": 127, "min": 1, "max": 1023}),
+ "fps": ("INT", {"default": 6, "min": 1, "max": 1024}),
+ "augmentation_level": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01})
+ }}
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
+ RETURN_NAMES = ("positive", "negative", "latent")
+
+ FUNCTION = "encode"
+
+ CATEGORY = "conditioning/video_models"
+
+ def encode(self, clip_vision, init_image, vae, width, height, video_frames, motion_bucket_id, fps, augmentation_level):
+ output = clip_vision.encode_image(init_image)
+ pooled = output.image_embeds.unsqueeze(0)
+ pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
+ encode_pixels = pixels[:,:,:,:3]
+ if augmentation_level > 0:
+ encode_pixels += torch.randn_like(pixels) * augmentation_level
+ t = vae.encode(encode_pixels)
+ positive = [[pooled, {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": t}]]
+ negative = [[torch.zeros_like(pooled), {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": torch.zeros_like(t)}]]
+ latent = torch.zeros([video_frames, 4, height // 8, width // 8])
+ return (positive, negative, {"samples":latent})
+
+class VideoLinearCFGGuidance:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "min_cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}),
+ }}
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "patch"
+
+ CATEGORY = "sampling/video_models"
+
+ def patch(self, model, min_cfg):
+ def linear_cfg(args):
+ cond = args["cond"]
+ uncond = args["uncond"]
+ cond_scale = args["cond_scale"]
+
+ scale = torch.linspace(min_cfg, cond_scale, cond.shape[0], device=cond.device).reshape((cond.shape[0], 1, 1, 1))
+ return uncond + scale * (cond - uncond)
+
+ m = model.clone()
+ m.set_model_sampler_cfg_function(linear_cfg)
+ return (m, )
+
+NODE_CLASS_MAPPINGS = {
+ "ImageOnlyCheckpointLoader": ImageOnlyCheckpointLoader,
+ "SVD_img2vid_Conditioning": SVD_img2vid_Conditioning,
+ "VideoLinearCFGGuidance": VideoLinearCFGGuidance,
+}
+
+NODE_DISPLAY_NAME_MAPPINGS = {
+ "ImageOnlyCheckpointLoader": "Image Only Checkpoint Loader (img2vid model)",
+}
diff --git a/execution.py b/execution.py
index da0991fe9f1..4451400c09a 100644
--- a/execution.py
+++ b/execution.py
@@ -2,6 +2,7 @@
import sys
import copy
import json
+import logging
import threading
import heapq
import traceback
@@ -176,7 +177,7 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data, execute
if server.client_id is not None:
server.send_sync("executed", { "node": unique_id, "output": output_ui, "prompt_id": prompt_id }, server.client_id)
except comfy.model_management.InterruptProcessingException as iex:
- print("Processing interrupted")
+ logging.info("Processing interrupted")
# skip formatting inputs/outputs
error_details = {
@@ -197,8 +198,8 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data, execute
for node_id, node_outputs in outputs.items():
output_data_formatted[node_id] = [[format_value(x) for x in l] for l in node_outputs]
- print("!!! Exception during processing !!!")
- print(traceback.format_exc())
+ logging.error("!!! Exception during processing !!!")
+ logging.error(traceback.format_exc())
error_details = {
"node_id": unique_id,
@@ -656,11 +657,11 @@ def validate_prompt(prompt):
if valid is True:
good_outputs.add(o)
else:
- print(f"Failed to validate prompt for output {o}:")
+ logging.error(f"Failed to validate prompt for output {o}:")
if len(reasons) > 0:
- print("* (prompt):")
+ logging.error("* (prompt):")
for reason in reasons:
- print(f" - {reason['message']}: {reason['details']}")
+ logging.error(f" - {reason['message']}: {reason['details']}")
errors += [(o, reasons)]
for node_id, result in validated.items():
valid = result[0]
@@ -676,11 +677,11 @@ def validate_prompt(prompt):
"dependent_outputs": [],
"class_type": class_type
}
- print(f"* {class_type} {node_id}:")
+ logging.error(f"* {class_type} {node_id}:")
for reason in reasons:
- print(f" - {reason['message']}: {reason['details']}")
+ logging.error(f" - {reason['message']}: {reason['details']}")
node_errors[node_id]["dependent_outputs"].append(o)
- print("Output will be ignored")
+ logging.error("Output will be ignored")
if len(good_outputs) == 0:
errors_list = []
@@ -700,6 +701,7 @@ def validate_prompt(prompt):
return (True, None, list(good_outputs), node_errors)
+MAXIMUM_HISTORY_SIZE = 10000
class PromptQueue:
def __init__(self, server):
@@ -718,10 +720,12 @@ def put(self, item):
self.server.queue_updated()
self.not_empty.notify()
- def get(self):
+ def get(self, timeout=None):
with self.not_empty:
while len(self.queue) == 0:
- self.not_empty.wait()
+ self.not_empty.wait(timeout=timeout)
+ if timeout is not None and len(self.queue) == 0:
+ return None
item = heapq.heappop(self.queue)
i = self.task_counter
self.currently_running[i] = copy.deepcopy(item)
@@ -732,6 +736,8 @@ def get(self):
def task_done(self, item_id, outputs):
with self.mutex:
prompt = self.currently_running.pop(item_id)
+ if len(self.history) > MAXIMUM_HISTORY_SIZE:
+ self.history.pop(next(iter(self.history)))
self.history[prompt[1]] = { "prompt": prompt, "outputs": {} }
for o in outputs:
self.history[prompt[1]]["outputs"][o] = outputs[o]
@@ -766,10 +772,20 @@ def delete_queue_item(self, function):
return True
return False
- def get_history(self, prompt_id=None):
+ def get_history(self, prompt_id=None, max_items=None, offset=-1):
with self.mutex:
if prompt_id is None:
- return copy.deepcopy(self.history)
+ out = {}
+ i = 0
+ if offset < 0 and max_items is not None:
+ offset = len(self.history) - max_items
+ for k in self.history:
+ if i >= offset:
+ out[k] = self.history[k]
+ if max_items is not None and len(out) >= max_items:
+ break
+ i += 1
+ return out
elif prompt_id in self.history:
return {prompt_id: copy.deepcopy(self.history[prompt_id])}
else:
diff --git a/extra_model_paths.yaml.example b/extra_model_paths.yaml.example
index 36078fffc7b..846d04dbeb4 100644
--- a/extra_model_paths.yaml.example
+++ b/extra_model_paths.yaml.example
@@ -1,5 +1,6 @@
#Rename this to extra_model_paths.yaml and ComfyUI will load it
+
#config for a1111 ui
#all you have to do is change the base_path to where yours is installed
a111:
@@ -19,6 +20,21 @@ a111:
hypernetworks: models/hypernetworks
controlnet: models/ControlNet
+#config for comfyui
+#your base path should be either an existing comfy install or a central folder where you store all of your models, loras, etc.
+
+#comfyui:
+# base_path: path/to/comfyui/
+# checkpoints: models/checkpoints/
+# clip: models/clip/
+# clip_vision: models/clip_vision/
+# configs: models/configs/
+# controlnet: models/controlnet/
+# embeddings: models/embeddings/
+# loras: models/loras/
+# upscale_models: models/upscale_models/
+# vae: models/vae/
+
#other_ui:
# base_path: path/to/ui
# checkpoints: models/checkpoints
diff --git a/folder_paths.py b/folder_paths.py
index 4a10c68e7e7..98704945e56 100644
--- a/folder_paths.py
+++ b/folder_paths.py
@@ -29,6 +29,8 @@
folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions)
+folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""})
+
output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
@@ -36,7 +38,10 @@
filename_list_cache = {}
if not os.path.exists(input_directory):
- os.makedirs(input_directory)
+ try:
+ os.makedirs(input_directory)
+ except:
+ print("Failed to create input directory")
def set_output_directory(output_dir):
global output_directory
@@ -46,6 +51,10 @@ def set_temp_directory(temp_dir):
global temp_directory
temp_directory = temp_dir
+def set_input_directory(input_dir):
+ global input_directory
+ input_directory = input_dir
+
def get_output_directory():
global output_directory
return output_directory
@@ -140,7 +149,7 @@ def recursive_search(directory, excluded_dir_names=None):
return result, dirs
def filter_files_extensions(files, extensions):
- return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files)))
+ return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions or len(extensions) == 0, files)))
@@ -222,8 +231,12 @@ def compute_vars(input, image_width, image_height):
full_output_folder = os.path.join(output_dir, subfolder)
if os.path.commonpath((output_dir, os.path.abspath(full_output_folder))) != output_dir:
- print("Saving image outside the output folder is not allowed.")
- return {}
+ err = "**** ERROR: Saving image outside the output folder is not allowed." + \
+ "\n full_output_folder: " + os.path.abspath(full_output_folder) + \
+ "\n output_dir: " + output_dir + \
+ "\n commonpath: " + os.path.commonpath((output_dir, os.path.abspath(full_output_folder)))
+ print(err)
+ raise Exception(err)
try:
counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
diff --git a/latent_preview.py b/latent_preview.py
index 87240a58291..61754751efe 100644
--- a/latent_preview.py
+++ b/latent_preview.py
@@ -5,6 +5,7 @@
from comfy.cli_args import args, LatentPreviewMethod
from comfy.taesd.taesd import TAESD
import folder_paths
+import comfy.utils
MAX_PREVIEW_RESOLUTION = 512
@@ -21,10 +22,7 @@ def __init__(self, taesd):
self.taesd = taesd
def decode_latent_to_preview(self, x0):
- x_sample = self.taesd.decoder(x0)[0].detach()
- # x_sample = self.taesd.unscale_latents(x_sample).div(4).add(0.5) # returns value in [-2, 2]
- x_sample = x_sample.sub(0.5).mul(2)
-
+ x_sample = self.taesd.decode(x0[:1])[0].detach()
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
@@ -55,7 +53,12 @@ def get_previewer(device, latent_format):
# TODO previewer methods
taesd_decoder_path = None
if latent_format.taesd_decoder_name is not None:
- taesd_decoder_path = folder_paths.get_full_path("vae_approx", latent_format.taesd_decoder_name)
+ taesd_decoder_path = next(
+ (fn for fn in folder_paths.get_filename_list("vae_approx")
+ if fn.startswith(latent_format.taesd_decoder_name)),
+ ""
+ )
+ taesd_decoder_path = folder_paths.get_full_path("vae_approx", taesd_decoder_path)
if method == LatentPreviewMethod.Auto:
method = LatentPreviewMethod.Latent2RGB
@@ -74,4 +77,21 @@ def get_previewer(device, latent_format):
previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors)
return previewer
+def prepare_callback(model, steps, x0_output_dict=None):
+ preview_format = "JPEG"
+ if preview_format not in ["JPEG", "PNG"]:
+ preview_format = "JPEG"
+
+ previewer = get_previewer(model.load_device, model.model.latent_format)
+
+ pbar = comfy.utils.ProgressBar(steps)
+ def callback(step, x0, x, total_steps):
+ if x0_output_dict is not None:
+ x0_output_dict["x0"] = x0
+
+ preview_bytes = None
+ if previewer:
+ preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
+ pbar.update_absolute(step + 1, total_steps, preview_bytes)
+ return callback
diff --git a/main.py b/main.py
index 7c5eaee0a83..1f9c5f443c3 100644
--- a/main.py
+++ b/main.py
@@ -88,18 +88,37 @@ def cuda_malloc_warning():
def prompt_worker(q, server):
e = execution.PromptExecutor(server)
+ last_gc_collect = 0
+ need_gc = False
+ gc_collect_interval = 10.0
+
while True:
- item, item_id = q.get()
- execution_start_time = time.perf_counter()
- prompt_id = item[1]
- e.execute(item[2], prompt_id, item[3], item[4])
- q.task_done(item_id, e.outputs_ui)
- if server.client_id is not None:
- server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id)
-
- print("Prompt executed in {:.2f} seconds".format(time.perf_counter() - execution_start_time))
- gc.collect()
- comfy.model_management.soft_empty_cache()
+ timeout = None
+ if need_gc:
+ timeout = max(gc_collect_interval - (current_time - last_gc_collect), 0.0)
+
+ queue_item = q.get(timeout=timeout)
+ if queue_item is not None:
+ item, item_id = queue_item
+ execution_start_time = time.perf_counter()
+ prompt_id = item[1]
+ e.execute(item[2], prompt_id, item[3], item[4])
+ need_gc = True
+ q.task_done(item_id, e.outputs_ui)
+ if server.client_id is not None:
+ server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id)
+
+ current_time = time.perf_counter()
+ execution_time = current_time - execution_start_time
+ print("Prompt executed in {:.2f} seconds".format(execution_time))
+
+ if need_gc:
+ current_time = time.perf_counter()
+ if (current_time - last_gc_collect) > gc_collect_interval:
+ gc.collect()
+ comfy.model_management.soft_empty_cache()
+ last_gc_collect = current_time
+ need_gc = False
async def run(server, address='', port=8188, verbose=True, call_on_start=None):
await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop())
@@ -175,6 +194,16 @@ def load_extra_path_config(yaml_path):
print(f"Setting output directory to: {output_dir}")
folder_paths.set_output_directory(output_dir)
+ #These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes
+ folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints"))
+ folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip"))
+ folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae"))
+
+ if args.input_directory:
+ input_dir = os.path.abspath(args.input_directory)
+ print(f"Setting input directory to: {input_dir}")
+ folder_paths.set_input_directory(input_dir)
+
if args.quick_test_for_ci:
exit(0)
diff --git a/nodes.py b/nodes.py
index 4abb0d24d74..24e591fdde8 100644
--- a/nodes.py
+++ b/nodes.py
@@ -248,8 +248,8 @@ def set_range(self, conditioning, start, end):
c = []
for t in conditioning:
d = t[1].copy()
- d['start_percent'] = 1.0 - start
- d['end_percent'] = 1.0 - end
+ d['start_percent'] = start
+ d['end_percent'] = end
n = [t[0], d]
c.append(n)
return (c, )
@@ -572,10 +572,69 @@ def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
return (model_lora, clip_lora)
+class LoraLoaderModelOnly(LoraLoader):
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "lora_name": (folder_paths.get_filename_list("loras"), ),
+ "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
+ }}
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "load_lora_model_only"
+
+ def load_lora_model_only(self, model, lora_name, strength_model):
+ return (self.load_lora(model, None, lora_name, strength_model, 0)[0],)
+
class VAELoader:
+ @staticmethod
+ def vae_list():
+ vaes = folder_paths.get_filename_list("vae")
+ approx_vaes = folder_paths.get_filename_list("vae_approx")
+ sdxl_taesd_enc = False
+ sdxl_taesd_dec = False
+ sd1_taesd_enc = False
+ sd1_taesd_dec = False
+
+ for v in approx_vaes:
+ if v.startswith("taesd_decoder."):
+ sd1_taesd_dec = True
+ elif v.startswith("taesd_encoder."):
+ sd1_taesd_enc = True
+ elif v.startswith("taesdxl_decoder."):
+ sdxl_taesd_dec = True
+ elif v.startswith("taesdxl_encoder."):
+ sdxl_taesd_enc = True
+ if sd1_taesd_dec and sd1_taesd_enc:
+ vaes.append("taesd")
+ if sdxl_taesd_dec and sdxl_taesd_enc:
+ vaes.append("taesdxl")
+ return vaes
+
+ @staticmethod
+ def load_taesd(name):
+ sd = {}
+ approx_vaes = folder_paths.get_filename_list("vae_approx")
+
+ encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes))
+ decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes))
+
+ enc = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", encoder))
+ for k in enc:
+ sd["taesd_encoder.{}".format(k)] = enc[k]
+
+ dec = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", decoder))
+ for k in dec:
+ sd["taesd_decoder.{}".format(k)] = dec[k]
+
+ if name == "taesd":
+ sd["vae_scale"] = torch.tensor(0.18215)
+ elif name == "taesdxl":
+ sd["vae_scale"] = torch.tensor(0.13025)
+ return sd
+
@classmethod
def INPUT_TYPES(s):
- return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
+ return {"required": { "vae_name": (s.vae_list(), )}}
RETURN_TYPES = ("VAE",)
FUNCTION = "load_vae"
@@ -583,8 +642,12 @@ def INPUT_TYPES(s):
#TODO: scale factor?
def load_vae(self, vae_name):
- vae_path = folder_paths.get_full_path("vae", vae_name)
- vae = comfy.sd.VAE(ckpt_path=vae_path)
+ if vae_name in ["taesd", "taesdxl"]:
+ sd = self.load_taesd(vae_name)
+ else:
+ vae_path = folder_paths.get_full_path("vae", vae_name)
+ sd = comfy.utils.load_torch_file(vae_path)
+ vae = comfy.sd.VAE(sd=sd)
return (vae,)
class ControlNetLoader:
@@ -684,7 +747,7 @@ def apply_controlnet(self, positive, negative, control_net, image, strength, sta
if prev_cnet in cnets:
c_net = cnets[prev_cnet]
else:
- c_net = control_net.copy().set_cond_hint(control_hint, strength, (1.0 - start_percent, 1.0 - end_percent))
+ c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent))
c_net.set_previous_controlnet(prev_cnet)
cnets[prev_cnet] = c_net
@@ -1189,11 +1252,8 @@ def set_mask(self, samples, mask):
s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
return (s,)
-
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
- device = comfy.model_management.get_torch_device()
latent_image = latent["samples"]
-
if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
else:
@@ -1204,22 +1264,11 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
if "noise_mask" in latent:
noise_mask = latent["noise_mask"]
- preview_format = "JPEG"
- if preview_format not in ["JPEG", "PNG"]:
- preview_format = "JPEG"
-
- previewer = latent_preview.get_previewer(device, model.model.latent_format)
-
- pbar = comfy.utils.ProgressBar(steps)
- def callback(step, x0, x, total_steps):
- preview_bytes = None
- if previewer:
- preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
- pbar.update_absolute(step + 1, total_steps, preview_bytes)
-
+ callback = latent_preview.prepare_callback(model, steps)
+ disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
- force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, seed=seed)
+ force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
out = latent.copy()
out["samples"] = samples
return (out, )
@@ -1231,7 +1280,7 @@ def INPUT_TYPES(s):
{"model": ("MODEL",),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
- "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}),
+ "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
"positive": ("CONDITIONING", ),
@@ -1257,7 +1306,7 @@ def INPUT_TYPES(s):
"add_noise": (["enable", "disable"], ),
"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
- "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}),
+ "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
"positive": ("CONDITIONING", ),
@@ -1288,6 +1337,7 @@ def __init__(self):
self.output_dir = folder_paths.get_output_directory()
self.type = "output"
self.prefix_append = ""
+ self.compress_level = 4
@classmethod
def INPUT_TYPES(s):
@@ -1321,7 +1371,7 @@ def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pngi
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
file = f"{filename}_{counter:05}_.png"
- img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
+ img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level)
results.append({
"filename": file,
"subfolder": subfolder,
@@ -1336,6 +1386,7 @@ def __init__(self):
self.output_dir = folder_paths.get_temp_directory()
self.type = "temp"
self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
+ self.compress_level = 1
@classmethod
def INPUT_TYPES(s):
@@ -1667,6 +1718,7 @@ def expand_image(self, image, left, top, right, bottom, feathering):
"ConditioningZeroOut": ConditioningZeroOut,
"ConditioningSetTimestepRange": ConditioningSetTimestepRange,
+ "LoraLoaderModelOnly": LoraLoaderModelOnly,
}
NODE_DISPLAY_NAME_MAPPINGS = {
@@ -1674,7 +1726,7 @@ def expand_image(self, image, left, top, right, bottom, feathering):
"KSampler": "KSampler",
"KSamplerAdvanced": "KSampler (Advanced)",
# Loaders
- "CheckpointLoader": "Load Checkpoint (With Config)",
+ "CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)",
"CheckpointLoaderSimple": "Load Checkpoint",
"VAELoader": "Load VAE",
"LoraLoader": "Load LoRA",
@@ -1772,7 +1824,7 @@ def load_custom_nodes():
node_paths = folder_paths.get_folder_paths("custom_nodes")
node_import_times = []
for custom_node_path in node_paths:
- possible_modules = os.listdir(custom_node_path)
+ possible_modules = os.listdir(os.path.realpath(custom_node_path))
if "__pycache__" in possible_modules:
possible_modules.remove("__pycache__")
@@ -1795,15 +1847,29 @@ def load_custom_nodes():
print()
def init_custom_nodes():
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_latent.py"))
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py"))
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_rebatch.py"))
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_model_merging.py"))
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_tomesd.py"))
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_clip_sdxl.py"))
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_canny.py"))
- load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_freelunch.py"))
+ extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras")
+ extras_files = [
+ "nodes_latent.py",
+ "nodes_hypernetwork.py",
+ "nodes_upscale_model.py",
+ "nodes_post_processing.py",
+ "nodes_mask.py",
+ "nodes_compositing.py",
+ "nodes_rebatch.py",
+ "nodes_model_merging.py",
+ "nodes_tomesd.py",
+ "nodes_clip_sdxl.py",
+ "nodes_canny.py",
+ "nodes_freelunch.py",
+ "nodes_custom_sampler.py",
+ "nodes_hypertile.py",
+ "nodes_model_advanced.py",
+ "nodes_model_downscale.py",
+ "nodes_images.py",
+ "nodes_video_model.py",
+ ]
+
+ for node_file in extras_files:
+ load_custom_node(os.path.join(extras_dir, node_file))
+
load_custom_nodes()
diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb
index 4fdccaace44..ec83265b42c 100644
--- a/notebooks/comfyui_colab.ipynb
+++ b/notebooks/comfyui_colab.ipynb
@@ -47,7 +47,7 @@
" !git pull\n",
"\n",
"!echo -= Install dependencies =-\n",
- "!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117"
+ "!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu121 --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117"
]
},
{
diff --git a/server.py b/server.py
index b2e16716ba8..9b1e3269d7f 100644
--- a/server.py
+++ b/server.py
@@ -82,7 +82,8 @@ def __init__(self, loop):
if args.enable_cors_header:
middlewares.append(create_cors_middleware(args.enable_cors_header))
- self.app = web.Application(client_max_size=104857600, middlewares=middlewares)
+ max_upload_size = round(args.max_upload_size * 1024 * 1024)
+ self.app = web.Application(client_max_size=max_upload_size, middlewares=middlewares)
self.sockets = dict()
self.web_root = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "web")
@@ -413,7 +414,11 @@ def node_info(node_class):
async def get_object_info(request):
out = {}
for x in nodes.NODE_CLASS_MAPPINGS:
- out[x] = node_info(x)
+ try:
+ out[x] = node_info(x)
+ except Exception as e:
+ print(f"[ERROR] An error occurred while retrieving information for the '{x}' node.", file=sys.stderr)
+ traceback.print_exc()
return web.json_response(out)
@routes.get("/object_info/{node_class}")
@@ -426,7 +431,10 @@ async def get_object_info_node(request):
@routes.get("/history")
async def get_history(request):
- return web.json_response(self.prompt_queue.get_history())
+ max_items = request.rel_url.query.get("max_items", None)
+ if max_items is not None:
+ max_items = int(max_items)
+ return web.json_response(self.prompt_queue.get_history(max_items=max_items))
@routes.get("/history/{prompt_id}")
async def get_history(request):
@@ -568,7 +576,7 @@ async def send_image(self, image_data, sid=None):
bytesIO = BytesIO()
header = struct.pack(">I", type_num)
bytesIO.write(header)
- image.save(bytesIO, format=image_type, quality=95, compress_level=4)
+ image.save(bytesIO, format=image_type, quality=95, compress_level=1)
preview_bytes = bytesIO.getvalue()
await self.send_bytes(BinaryEventTypes.PREVIEW_IMAGE, preview_bytes, sid=sid)
diff --git a/tests-ui/.gitignore b/tests-ui/.gitignore
new file mode 100644
index 00000000000..b512c09d476
--- /dev/null
+++ b/tests-ui/.gitignore
@@ -0,0 +1 @@
+node_modules
\ No newline at end of file
diff --git a/tests-ui/babel.config.json b/tests-ui/babel.config.json
new file mode 100644
index 00000000000..526ddfd8df1
--- /dev/null
+++ b/tests-ui/babel.config.json
@@ -0,0 +1,3 @@
+{
+ "presets": ["@babel/preset-env"]
+}
diff --git a/tests-ui/globalSetup.js b/tests-ui/globalSetup.js
new file mode 100644
index 00000000000..b9d97f58a96
--- /dev/null
+++ b/tests-ui/globalSetup.js
@@ -0,0 +1,14 @@
+module.exports = async function () {
+ global.ResizeObserver = class ResizeObserver {
+ observe() {}
+ unobserve() {}
+ disconnect() {}
+ };
+
+ const { nop } = require("./utils/nopProxy");
+ global.enableWebGLCanvas = nop;
+
+ HTMLCanvasElement.prototype.getContext = nop;
+
+ localStorage["Comfy.Settings.Comfy.Logging.Enabled"] = "false";
+};
diff --git a/tests-ui/jest.config.js b/tests-ui/jest.config.js
new file mode 100644
index 00000000000..b5a5d646da7
--- /dev/null
+++ b/tests-ui/jest.config.js
@@ -0,0 +1,9 @@
+/** @type {import('jest').Config} */
+const config = {
+ testEnvironment: "jsdom",
+ setupFiles: ["./globalSetup.js"],
+ clearMocks: true,
+ resetModules: true,
+};
+
+module.exports = config;
diff --git a/tests-ui/package-lock.json b/tests-ui/package-lock.json
new file mode 100644
index 00000000000..35911cd7ffd
--- /dev/null
+++ b/tests-ui/package-lock.json
@@ -0,0 +1,5566 @@
+{
+ "name": "comfui-tests",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "comfui-tests",
+ "version": "1.0.0",
+ "license": "GPL-3.0",
+ "devDependencies": {
+ "@babel/preset-env": "^7.22.20",
+ "@types/jest": "^29.5.5",
+ "jest": "^29.7.0",
+ "jest-environment-jsdom": "^29.7.0"
+ }
+ },
+ "node_modules/@ampproject/remapping": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz",
+ "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.0",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.22.13",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz",
+ "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/highlight": "^7.22.13",
+ "chalk": "^2.4.2"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/code-frame/node_modules/ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "dependencies": {
+ "color-convert": "^1.9.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/code-frame/node_modules/chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/code-frame/node_modules/color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dev": true,
+ "dependencies": {
+ "color-name": "1.1.3"
+ }
+ },
+ "node_modules/@babel/code-frame/node_modules/color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
+ "dev": true
+ },
+ "node_modules/@babel/code-frame/node_modules/escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/@babel/code-frame/node_modules/has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/code-frame/node_modules/supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.20.tgz",
+ "integrity": "sha512-BQYjKbpXjoXwFW5jGqiizJQQT/aC7pFm9Ok1OWssonuguICi264lbgMzRp2ZMmRSlfkX6DsWDDcsrctK8Rwfiw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.0.tgz",
+ "integrity": "sha512-97z/ju/Jy1rZmDxybphrBuI+jtJjFVoz7Mr9yUQVVVi+DNZE333uFQeMOqcCIy1x3WYBIbWftUSLmbNXNT7qFQ==",
+ "dev": true,
+ "dependencies": {
+ "@ampproject/remapping": "^2.2.0",
+ "@babel/code-frame": "^7.22.13",
+ "@babel/generator": "^7.23.0",
+ "@babel/helper-compilation-targets": "^7.22.15",
+ "@babel/helper-module-transforms": "^7.23.0",
+ "@babel/helpers": "^7.23.0",
+ "@babel/parser": "^7.23.0",
+ "@babel/template": "^7.22.15",
+ "@babel/traverse": "^7.23.0",
+ "@babel/types": "^7.23.0",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz",
+ "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.23.0",
+ "@jridgewell/gen-mapping": "^0.3.2",
+ "@jridgewell/trace-mapping": "^0.3.17",
+ "jsesc": "^2.5.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-annotate-as-pure": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz",
+ "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz",
+ "integrity": "sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.15"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz",
+ "integrity": "sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/compat-data": "^7.22.9",
+ "@babel/helper-validator-option": "^7.22.15",
+ "browserslist": "^4.21.9",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-create-class-features-plugin": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.15.tgz",
+ "integrity": "sha512-jKkwA59IXcvSaiK2UN45kKwSC9o+KuoXsBDvHvU/7BecYIp8GQ2UwrVvFgJASUT+hBnwJx6MhvMCuMzwZZ7jlg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-member-expression-to-functions": "^7.22.15",
+ "@babel/helper-optimise-call-expression": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.9",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.6",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-create-regexp-features-plugin": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz",
+ "integrity": "sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "regexpu-core": "^5.3.1",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-define-polyfill-provider": {
+ "version": "0.4.2",
+ "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.2.tgz",
+ "integrity": "sha512-k0qnnOqHn5dK9pZpfD5XXZ9SojAITdCKRn2Lp6rnDGzIbaP0rHyMPk/4wsSxVBVz4RfN0q6VpXWP2pDGIoQ7hw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-compilation-targets": "^7.22.6",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "debug": "^4.1.1",
+ "lodash.debounce": "^4.0.8",
+ "resolve": "^1.14.2"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0"
+ }
+ },
+ "node_modules/@babel/helper-environment-visitor": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz",
+ "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-function-name": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz",
+ "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/template": "^7.22.15",
+ "@babel/types": "^7.23.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-hoist-variables": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz",
+ "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-member-expression-to-functions": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.23.0.tgz",
+ "integrity": "sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.23.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz",
+ "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.15"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.0.tgz",
+ "integrity": "sha512-WhDWw1tdrlT0gMgUJSlX0IQvoO1eN279zrAUbVB+KpV2c3Tylz8+GnKOLllCS6Z/iZQEyVYxhZVUdPTqs2YYPw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-environment-visitor": "^7.22.20",
+ "@babel/helper-module-imports": "^7.22.15",
+ "@babel/helper-simple-access": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.6",
+ "@babel/helper-validator-identifier": "^7.22.20"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-optimise-call-expression": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz",
+ "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz",
+ "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-remap-async-to-generator": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz",
+ "integrity": "sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-environment-visitor": "^7.22.20",
+ "@babel/helper-wrap-function": "^7.22.20"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-replace-supers": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.20.tgz",
+ "integrity": "sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-environment-visitor": "^7.22.20",
+ "@babel/helper-member-expression-to-functions": "^7.22.15",
+ "@babel/helper-optimise-call-expression": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-simple-access": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz",
+ "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-skip-transparent-expression-wrappers": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz",
+ "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-split-export-declaration": {
+ "version": "7.22.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz",
+ "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz",
+ "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
+ "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz",
+ "integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-wrap-function": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.20.tgz",
+ "integrity": "sha512-pms/UwkOpnQe/PDAEdV/d7dVCoBbB+R4FvYoHGZz+4VPcg7RtYy2KP7S2lbuWM6FCSgob5wshfGESbC/hzNXZw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/template": "^7.22.15",
+ "@babel/types": "^7.22.19"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.23.1",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.1.tgz",
+ "integrity": "sha512-chNpneuK18yW5Oxsr+t553UZzzAs3aZnFm4bxhebsNTeshrC95yA7l5yl7GBAG+JG1rF0F7zzD2EixK9mWSDoA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/template": "^7.22.15",
+ "@babel/traverse": "^7.23.0",
+ "@babel/types": "^7.23.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/highlight": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz",
+ "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.22.20",
+ "chalk": "^2.4.2",
+ "js-tokens": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "dependencies": {
+ "color-convert": "^1.9.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dev": true,
+ "dependencies": {
+ "color-name": "1.1.3"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
+ "dev": true
+ },
+ "node_modules/@babel/highlight/node_modules/escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz",
+ "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==",
+ "dev": true,
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.15.tgz",
+ "integrity": "sha512-FB9iYlz7rURmRJyXRKEnalYPPdn87H5no108cyuQQyMwlpJ2SJtpIUBI27kdTin956pz+LPypkPVPUTlxOmrsg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.15.tgz",
+ "integrity": "sha512-Hyph9LseGvAeeXzikV88bczhsrLrIZqDPxO+sSmAunMPaGrBGhfMWzCPYTtiW9t+HzSE2wtV8e5cc5P6r1xMDQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/plugin-transform-optional-chaining": "^7.22.15"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.13.0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-private-property-in-object": {
+ "version": "7.21.0-placeholder-for-preset-env.2",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz",
+ "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-async-generators": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
+ "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-bigint": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz",
+ "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-class-properties": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz",
+ "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-class-static-block": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz",
+ "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-dynamic-import": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz",
+ "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-export-namespace-from": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz",
+ "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-import-assertions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz",
+ "integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-import-attributes": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz",
+ "integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-import-meta": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz",
+ "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-json-strings": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
+ "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-jsx": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz",
+ "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-logical-assignment-operators": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz",
+ "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
+ "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-numeric-separator": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz",
+ "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-object-rest-spread": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
+ "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-optional-catch-binding": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
+ "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-optional-chaining": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
+ "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-private-property-in-object": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz",
+ "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-top-level-await": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz",
+ "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-typescript": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz",
+ "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-unicode-sets-regex": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz",
+ "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.18.6",
+ "@babel/helper-plugin-utils": "^7.18.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-arrow-functions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz",
+ "integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-async-generator-functions": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.22.15.tgz",
+ "integrity": "sha512-jBm1Es25Y+tVoTi5rfd5t1KLmL8ogLKpXszboWOTTtGFGz2RKnQe2yn7HbZ+kb/B8N0FVSGQo874NSlOU1T4+w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-remap-async-to-generator": "^7.22.9",
+ "@babel/plugin-syntax-async-generators": "^7.8.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-async-to-generator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz",
+ "integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-remap-async-to-generator": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-block-scoped-functions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz",
+ "integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-block-scoping": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.23.0.tgz",
+ "integrity": "sha512-cOsrbmIOXmf+5YbL99/S49Y3j46k/T16b9ml8bm9lP6N9US5iQ2yBK7gpui1pg0V/WMcXdkfKbTb7HXq9u+v4g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-class-properties": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz",
+ "integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-class-static-block": {
+ "version": "7.22.11",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.11.tgz",
+ "integrity": "sha512-GMM8gGmqI7guS/llMFk1bJDkKfn3v3C4KHK9Yg1ey5qcHcOlKb0QvcMrgzvxo+T03/4szNh5lghY+fEC98Kq9g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-create-class-features-plugin": "^7.22.11",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-class-static-block": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.12.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-classes": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.15.tgz",
+ "integrity": "sha512-VbbC3PGjBdE0wAWDdHM9G8Gm977pnYI0XpqMd6LrKISj8/DJXEsWqgRuTYaNE9Bv0JGhTZUzHDlMk18IpOuoqw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-compilation-targets": "^7.22.15",
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-optimise-call-expression": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.9",
+ "@babel/helper-split-export-declaration": "^7.22.6",
+ "globals": "^11.1.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-computed-properties": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz",
+ "integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/template": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-destructuring": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.23.0.tgz",
+ "integrity": "sha512-vaMdgNXFkYrB+8lbgniSYWHsgqK5gjaMNcc84bMIOMRLH0L9AqYq3hwMdvnyqj1OPqea8UtjPEuS/DCenah1wg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-dotall-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz",
+ "integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-duplicate-keys": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz",
+ "integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-dynamic-import": {
+ "version": "7.22.11",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.11.tgz",
+ "integrity": "sha512-g/21plo58sfteWjaO0ZNVb+uEOkJNjAaHhbejrnBmu011l/eNDScmkbjCC3l4FKb10ViaGU4aOkFznSu2zRHgA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-exponentiation-operator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz",
+ "integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-export-namespace-from": {
+ "version": "7.22.11",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.11.tgz",
+ "integrity": "sha512-xa7aad7q7OiT8oNZ1mU7NrISjlSkVdMbNxn9IuLZyL9AJEhs1Apba3I+u5riX1dIkdptP5EKDG5XDPByWxtehw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-export-namespace-from": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-for-of": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.15.tgz",
+ "integrity": "sha512-me6VGeHsx30+xh9fbDLLPi0J1HzmeIIyenoOQHuw2D4m2SAU3NrspX5XxJLBpqn5yrLzrlw2Iy3RA//Bx27iOA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-function-name": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz",
+ "integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-json-strings": {
+ "version": "7.22.11",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.11.tgz",
+ "integrity": "sha512-CxT5tCqpA9/jXFlme9xIBCc5RPtdDq3JpkkhgHQqtDdiTnTI0jtZ0QzXhr5DILeYifDPp2wvY2ad+7+hLMW5Pw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-json-strings": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz",
+ "integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-logical-assignment-operators": {
+ "version": "7.22.11",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.11.tgz",
+ "integrity": "sha512-qQwRTP4+6xFCDV5k7gZBF3C31K34ut0tbEcTKxlX/0KXxm9GLcO14p570aWxFvVzx6QAfPgq7gaeIHXJC8LswQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-member-expression-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz",
+ "integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-amd": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.23.0.tgz",
+ "integrity": "sha512-xWT5gefv2HGSm4QHtgc1sYPbseOyf+FFDo2JbpE25GWl5BqTGO9IMwTYJRoIdjsF85GE+VegHxSCUt5EvoYTAw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-module-transforms": "^7.23.0",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-commonjs": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.23.0.tgz",
+ "integrity": "sha512-32Xzss14/UVc7k9g775yMIvkVK8xwKE0DPdP5JTapr3+Z9w4tzeOuLNY6BXDQR6BdnzIlXnCGAzsk/ICHBLVWQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-module-transforms": "^7.23.0",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-simple-access": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-systemjs": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.23.0.tgz",
+ "integrity": "sha512-qBej6ctXZD2f+DhlOC9yO47yEYgUh5CZNz/aBoH4j/3NOlRfJXJbY7xDQCqQVf9KbrqGzIWER1f23doHGrIHFg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-hoist-variables": "^7.22.5",
+ "@babel/helper-module-transforms": "^7.23.0",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-identifier": "^7.22.20"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-umd": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz",
+ "integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-named-capturing-groups-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz",
+ "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-new-target": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz",
+ "integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-nullish-coalescing-operator": {
+ "version": "7.22.11",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.11.tgz",
+ "integrity": "sha512-YZWOw4HxXrotb5xsjMJUDlLgcDXSfO9eCmdl1bgW4+/lAGdkjaEvOnQ4p5WKKdUgSzO39dgPl0pTnfxm0OAXcg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-numeric-separator": {
+ "version": "7.22.11",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.11.tgz",
+ "integrity": "sha512-3dzU4QGPsILdJbASKhF/V2TVP+gJya1PsueQCxIPCEcerqF21oEcrob4mzjsp2Py/1nLfF5m+xYNMDpmA8vffg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-object-rest-spread": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.15.tgz",
+ "integrity": "sha512-fEB+I1+gAmfAyxZcX1+ZUwLeAuuf8VIg67CTznZE0MqVFumWkh8xWtn58I4dxdVf080wn7gzWoF8vndOViJe9Q==",
+ "dev": true,
+ "dependencies": {
+ "@babel/compat-data": "^7.22.9",
+ "@babel/helper-compilation-targets": "^7.22.15",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-transform-parameters": "^7.22.15"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-object-super": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz",
+ "integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-optional-catch-binding": {
+ "version": "7.22.11",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.11.tgz",
+ "integrity": "sha512-rli0WxesXUeCJnMYhzAglEjLWVDF6ahb45HuprcmQuLidBJFWjNnOzssk2kuc6e33FlLaiZhG/kUIzUMWdBKaQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-optional-chaining": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.23.0.tgz",
+ "integrity": "sha512-sBBGXbLJjxTzLBF5rFWaikMnOGOk/BmK6vVByIdEggZ7Vn6CvWXZyRkkLFK6WE0IF8jSliyOkUN6SScFgzCM0g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-parameters": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.15.tgz",
+ "integrity": "sha512-hjk7qKIqhyzhhUvRT683TYQOFa/4cQKwQy7ALvTpODswN40MljzNDa0YldevS6tGbxwaEKVn502JmY0dP7qEtQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-private-methods": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz",
+ "integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-private-property-in-object": {
+ "version": "7.22.11",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.11.tgz",
+ "integrity": "sha512-sSCbqZDBKHetvjSwpyWzhuHkmW5RummxJBVbYLkGkaiTOWGxml7SXt0iWa03bzxFIx7wOj3g/ILRd0RcJKBeSQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-create-class-features-plugin": "^7.22.11",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-private-property-in-object": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-property-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz",
+ "integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-regenerator": {
+ "version": "7.22.10",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.10.tgz",
+ "integrity": "sha512-F28b1mDt8KcT5bUyJc/U9nwzw6cV+UmTeRlXYIl2TNqMMJif0Jeey9/RQ3C4NOd2zp0/TRsDns9ttj2L523rsw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "regenerator-transform": "^0.15.2"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-reserved-words": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz",
+ "integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-shorthand-properties": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz",
+ "integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-spread": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz",
+ "integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-sticky-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz",
+ "integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-template-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz",
+ "integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-typeof-symbol": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz",
+ "integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-unicode-escapes": {
+ "version": "7.22.10",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.10.tgz",
+ "integrity": "sha512-lRfaRKGZCBqDlRU3UIFovdp9c9mEvlylmpod0/OatICsSfuQ9YFthRo1tpTkGsklEefZdqlEFdY4A2dwTb6ohg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-unicode-property-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz",
+ "integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-unicode-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz",
+ "integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-unicode-sets-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz",
+ "integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/preset-env": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.22.20.tgz",
+ "integrity": "sha512-11MY04gGC4kSzlPHRfvVkNAZhUxOvm7DCJ37hPDnUENwe06npjIRAfInEMTGSb4LZK5ZgDFkv5hw0lGebHeTyg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/compat-data": "^7.22.20",
+ "@babel/helper-compilation-targets": "^7.22.15",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.15",
+ "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.15",
+ "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.15",
+ "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2",
+ "@babel/plugin-syntax-async-generators": "^7.8.4",
+ "@babel/plugin-syntax-class-properties": "^7.12.13",
+ "@babel/plugin-syntax-class-static-block": "^7.14.5",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3",
+ "@babel/plugin-syntax-export-namespace-from": "^7.8.3",
+ "@babel/plugin-syntax-import-assertions": "^7.22.5",
+ "@babel/plugin-syntax-import-attributes": "^7.22.5",
+ "@babel/plugin-syntax-import-meta": "^7.10.4",
+ "@babel/plugin-syntax-json-strings": "^7.8.3",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3",
+ "@babel/plugin-syntax-private-property-in-object": "^7.14.5",
+ "@babel/plugin-syntax-top-level-await": "^7.14.5",
+ "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6",
+ "@babel/plugin-transform-arrow-functions": "^7.22.5",
+ "@babel/plugin-transform-async-generator-functions": "^7.22.15",
+ "@babel/plugin-transform-async-to-generator": "^7.22.5",
+ "@babel/plugin-transform-block-scoped-functions": "^7.22.5",
+ "@babel/plugin-transform-block-scoping": "^7.22.15",
+ "@babel/plugin-transform-class-properties": "^7.22.5",
+ "@babel/plugin-transform-class-static-block": "^7.22.11",
+ "@babel/plugin-transform-classes": "^7.22.15",
+ "@babel/plugin-transform-computed-properties": "^7.22.5",
+ "@babel/plugin-transform-destructuring": "^7.22.15",
+ "@babel/plugin-transform-dotall-regex": "^7.22.5",
+ "@babel/plugin-transform-duplicate-keys": "^7.22.5",
+ "@babel/plugin-transform-dynamic-import": "^7.22.11",
+ "@babel/plugin-transform-exponentiation-operator": "^7.22.5",
+ "@babel/plugin-transform-export-namespace-from": "^7.22.11",
+ "@babel/plugin-transform-for-of": "^7.22.15",
+ "@babel/plugin-transform-function-name": "^7.22.5",
+ "@babel/plugin-transform-json-strings": "^7.22.11",
+ "@babel/plugin-transform-literals": "^7.22.5",
+ "@babel/plugin-transform-logical-assignment-operators": "^7.22.11",
+ "@babel/plugin-transform-member-expression-literals": "^7.22.5",
+ "@babel/plugin-transform-modules-amd": "^7.22.5",
+ "@babel/plugin-transform-modules-commonjs": "^7.22.15",
+ "@babel/plugin-transform-modules-systemjs": "^7.22.11",
+ "@babel/plugin-transform-modules-umd": "^7.22.5",
+ "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5",
+ "@babel/plugin-transform-new-target": "^7.22.5",
+ "@babel/plugin-transform-nullish-coalescing-operator": "^7.22.11",
+ "@babel/plugin-transform-numeric-separator": "^7.22.11",
+ "@babel/plugin-transform-object-rest-spread": "^7.22.15",
+ "@babel/plugin-transform-object-super": "^7.22.5",
+ "@babel/plugin-transform-optional-catch-binding": "^7.22.11",
+ "@babel/plugin-transform-optional-chaining": "^7.22.15",
+ "@babel/plugin-transform-parameters": "^7.22.15",
+ "@babel/plugin-transform-private-methods": "^7.22.5",
+ "@babel/plugin-transform-private-property-in-object": "^7.22.11",
+ "@babel/plugin-transform-property-literals": "^7.22.5",
+ "@babel/plugin-transform-regenerator": "^7.22.10",
+ "@babel/plugin-transform-reserved-words": "^7.22.5",
+ "@babel/plugin-transform-shorthand-properties": "^7.22.5",
+ "@babel/plugin-transform-spread": "^7.22.5",
+ "@babel/plugin-transform-sticky-regex": "^7.22.5",
+ "@babel/plugin-transform-template-literals": "^7.22.5",
+ "@babel/plugin-transform-typeof-symbol": "^7.22.5",
+ "@babel/plugin-transform-unicode-escapes": "^7.22.10",
+ "@babel/plugin-transform-unicode-property-regex": "^7.22.5",
+ "@babel/plugin-transform-unicode-regex": "^7.22.5",
+ "@babel/plugin-transform-unicode-sets-regex": "^7.22.5",
+ "@babel/preset-modules": "0.1.6-no-external-plugins",
+ "@babel/types": "^7.22.19",
+ "babel-plugin-polyfill-corejs2": "^0.4.5",
+ "babel-plugin-polyfill-corejs3": "^0.8.3",
+ "babel-plugin-polyfill-regenerator": "^0.5.2",
+ "core-js-compat": "^3.31.0",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/preset-modules": {
+ "version": "0.1.6-no-external-plugins",
+ "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz",
+ "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.0.0",
+ "@babel/types": "^7.4.4",
+ "esutils": "^2.0.2"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0"
+ }
+ },
+ "node_modules/@babel/regjsgen": {
+ "version": "0.8.0",
+ "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz",
+ "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==",
+ "dev": true
+ },
+ "node_modules/@babel/runtime": {
+ "version": "7.23.1",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.1.tgz",
+ "integrity": "sha512-hC2v6p8ZSI/W0HUzh3V8C5g+NwSKzKPtJwSpTjwl0o297GP9+ZLQSkdvHz46CM3LqyoXxq+5G9komY+eSqSO0g==",
+ "dev": true,
+ "dependencies": {
+ "regenerator-runtime": "^0.14.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
+ "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.22.13",
+ "@babel/parser": "^7.22.15",
+ "@babel/types": "^7.22.15"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz",
+ "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.22.13",
+ "@babel/generator": "^7.23.0",
+ "@babel/helper-environment-visitor": "^7.22.20",
+ "@babel/helper-function-name": "^7.23.0",
+ "@babel/helper-hoist-variables": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.6",
+ "@babel/parser": "^7.23.0",
+ "@babel/types": "^7.23.0",
+ "debug": "^4.1.0",
+ "globals": "^11.1.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz",
+ "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.22.5",
+ "@babel/helper-validator-identifier": "^7.22.20",
+ "to-fast-properties": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@bcoe/v8-coverage": {
+ "version": "0.2.3",
+ "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz",
+ "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==",
+ "dev": true
+ },
+ "node_modules/@istanbuljs/load-nyc-config": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz",
+ "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==",
+ "dev": true,
+ "dependencies": {
+ "camelcase": "^5.3.1",
+ "find-up": "^4.1.0",
+ "get-package-type": "^0.1.0",
+ "js-yaml": "^3.13.1",
+ "resolve-from": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@istanbuljs/schema": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz",
+ "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@jest/console": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz",
+ "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==",
+ "dev": true,
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/core": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz",
+ "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==",
+ "dev": true,
+ "dependencies": {
+ "@jest/console": "^29.7.0",
+ "@jest/reporters": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "ansi-escapes": "^4.2.1",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "exit": "^0.1.2",
+ "graceful-fs": "^4.2.9",
+ "jest-changed-files": "^29.7.0",
+ "jest-config": "^29.7.0",
+ "jest-haste-map": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-regex-util": "^29.6.3",
+ "jest-resolve": "^29.7.0",
+ "jest-resolve-dependencies": "^29.7.0",
+ "jest-runner": "^29.7.0",
+ "jest-runtime": "^29.7.0",
+ "jest-snapshot": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "jest-watcher": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "pretty-format": "^29.7.0",
+ "slash": "^3.0.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@jest/environment": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz",
+ "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==",
+ "dev": true,
+ "dependencies": {
+ "@jest/fake-timers": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "jest-mock": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/expect": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz",
+ "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==",
+ "dev": true,
+ "dependencies": {
+ "expect": "^29.7.0",
+ "jest-snapshot": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/expect-utils": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz",
+ "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==",
+ "dev": true,
+ "dependencies": {
+ "jest-get-type": "^29.6.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/fake-timers": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz",
+ "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==",
+ "dev": true,
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@sinonjs/fake-timers": "^10.0.2",
+ "@types/node": "*",
+ "jest-message-util": "^29.7.0",
+ "jest-mock": "^29.7.0",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/globals": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz",
+ "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==",
+ "dev": true,
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/expect": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "jest-mock": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/reporters": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz",
+ "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==",
+ "dev": true,
+ "dependencies": {
+ "@bcoe/v8-coverage": "^0.2.3",
+ "@jest/console": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@jridgewell/trace-mapping": "^0.3.18",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "collect-v8-coverage": "^1.0.0",
+ "exit": "^0.1.2",
+ "glob": "^7.1.3",
+ "graceful-fs": "^4.2.9",
+ "istanbul-lib-coverage": "^3.0.0",
+ "istanbul-lib-instrument": "^6.0.0",
+ "istanbul-lib-report": "^3.0.0",
+ "istanbul-lib-source-maps": "^4.0.0",
+ "istanbul-reports": "^3.1.3",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-worker": "^29.7.0",
+ "slash": "^3.0.0",
+ "string-length": "^4.0.1",
+ "strip-ansi": "^6.0.0",
+ "v8-to-istanbul": "^9.0.1"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/source-map": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz",
+ "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.18",
+ "callsites": "^3.0.0",
+ "graceful-fs": "^4.2.9"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/test-result": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz",
+ "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==",
+ "dev": true,
+ "dependencies": {
+ "@jest/console": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "collect-v8-coverage": "^1.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/test-sequencer": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz",
+ "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==",
+ "dev": true,
+ "dependencies": {
+ "@jest/test-result": "^29.7.0",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/transform": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz",
+ "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/core": "^7.11.6",
+ "@jest/types": "^29.6.3",
+ "@jridgewell/trace-mapping": "^0.3.18",
+ "babel-plugin-istanbul": "^6.1.1",
+ "chalk": "^4.0.0",
+ "convert-source-map": "^2.0.0",
+ "fast-json-stable-stringify": "^2.1.0",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "jest-regex-util": "^29.6.3",
+ "jest-util": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "pirates": "^4.0.4",
+ "slash": "^3.0.0",
+ "write-file-atomic": "^4.0.2"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.3",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz",
+ "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/set-array": "^1.0.1",
+ "@jridgewell/sourcemap-codec": "^1.4.10",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz",
+ "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/set-array": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz",
+ "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.4.15",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
+ "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==",
+ "dev": true
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.19",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz",
+ "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true
+ },
+ "node_modules/@sinonjs/commons": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.0.tgz",
+ "integrity": "sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA==",
+ "dev": true,
+ "dependencies": {
+ "type-detect": "4.0.8"
+ }
+ },
+ "node_modules/@sinonjs/fake-timers": {
+ "version": "10.3.0",
+ "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz",
+ "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==",
+ "dev": true,
+ "dependencies": {
+ "@sinonjs/commons": "^3.0.0"
+ }
+ },
+ "node_modules/@tootallnate/once": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz",
+ "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==",
+ "dev": true,
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@types/babel__core": {
+ "version": "7.20.2",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.2.tgz",
+ "integrity": "sha512-pNpr1T1xLUc2l3xJKuPtsEky3ybxN3m4fJkknfIpTCTfIZCDW57oAg+EfCgIIp2rvCe0Wn++/FfodDS4YXxBwA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "node_modules/@types/babel__generator": {
+ "version": "7.6.5",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.5.tgz",
+ "integrity": "sha512-h9yIuWbJKdOPLJTbmSpPzkF67e659PbQDba7ifWm5BJ8xTv+sDmS7rFmywkWOvXedGTivCdeGSIIX8WLcRTz8w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__template": {
+ "version": "7.4.2",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.2.tgz",
+ "integrity": "sha512-/AVzPICMhMOMYoSx9MoKpGDKdBRsIXMNByh1PXSZoa+v6ZoLa8xxtsT/uLQ/NJm0XVAWl/BvId4MlDeXJaeIZQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__traverse": {
+ "version": "7.20.2",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.2.tgz",
+ "integrity": "sha512-ojlGK1Hsfce93J0+kn3H5R73elidKUaZonirN33GSmgTUMpzI/MIFfSpF3haANe3G1bEBS9/9/QEqwTzwqFsKw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.20.7"
+ }
+ },
+ "node_modules/@types/graceful-fs": {
+ "version": "4.1.7",
+ "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.7.tgz",
+ "integrity": "sha512-MhzcwU8aUygZroVwL2jeYk6JisJrPl/oov/gsgGCue9mkgl9wjGbzReYQClxiUgFDnib9FuHqTndccKeZKxTRw==",
+ "dev": true,
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/istanbul-lib-coverage": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz",
+ "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==",
+ "dev": true
+ },
+ "node_modules/@types/istanbul-lib-report": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz",
+ "integrity": "sha512-gPQuzaPR5h/djlAv2apEG1HVOyj1IUs7GpfMZixU0/0KXT3pm64ylHuMUI1/Akh+sq/iikxg6Z2j+fcMDXaaTQ==",
+ "dev": true,
+ "dependencies": {
+ "@types/istanbul-lib-coverage": "*"
+ }
+ },
+ "node_modules/@types/istanbul-reports": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.2.tgz",
+ "integrity": "sha512-kv43F9eb3Lhj+lr/Hn6OcLCs/sSM8bt+fIaP11rCYngfV6NVjzWXJ17owQtDQTL9tQ8WSLUrGsSJ6rJz0F1w1A==",
+ "dev": true,
+ "dependencies": {
+ "@types/istanbul-lib-report": "*"
+ }
+ },
+ "node_modules/@types/jest": {
+ "version": "29.5.5",
+ "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.5.tgz",
+ "integrity": "sha512-ebylz2hnsWR9mYvmBFbXJXr+33UPc4+ZdxyDXh5w0FlPBTfCVN3wPL+kuOiQt3xvrK419v7XWeAs+AeOksafXg==",
+ "dev": true,
+ "dependencies": {
+ "expect": "^29.0.0",
+ "pretty-format": "^29.0.0"
+ }
+ },
+ "node_modules/@types/jsdom": {
+ "version": "20.0.1",
+ "resolved": "https://registry.npmjs.org/@types/jsdom/-/jsdom-20.0.1.tgz",
+ "integrity": "sha512-d0r18sZPmMQr1eG35u12FZfhIXNrnsPU/g5wvRKCUf/tOGilKKwYMYGqh33BNR6ba+2gkHw1EUiHoN3mn7E5IQ==",
+ "dev": true,
+ "dependencies": {
+ "@types/node": "*",
+ "@types/tough-cookie": "*",
+ "parse5": "^7.0.0"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "20.8.3",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.8.3.tgz",
+ "integrity": "sha512-jxiZQFpb+NlH5kjW49vXxvxTjeeqlbsnTAdBTKpzEdPs9itay7MscYXz3Fo9VYFEsfQ6LJFitHad3faerLAjCw==",
+ "dev": true
+ },
+ "node_modules/@types/stack-utils": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz",
+ "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==",
+ "dev": true
+ },
+ "node_modules/@types/tough-cookie": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.3.tgz",
+ "integrity": "sha512-THo502dA5PzG/sfQH+42Lw3fvmYkceefOspdCwpHRul8ik2Jv1K8I5OZz1AT3/rs46kwgMCe9bSBmDLYkkOMGg==",
+ "dev": true
+ },
+ "node_modules/@types/yargs": {
+ "version": "17.0.28",
+ "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.28.tgz",
+ "integrity": "sha512-N3e3fkS86hNhtk6BEnc0rj3zcehaxx8QWhCROJkqpl5Zaoi7nAic3jH8q94jVD3zu5LGk+PUB6KAiDmimYOEQw==",
+ "dev": true,
+ "dependencies": {
+ "@types/yargs-parser": "*"
+ }
+ },
+ "node_modules/@types/yargs-parser": {
+ "version": "21.0.1",
+ "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.1.tgz",
+ "integrity": "sha512-axdPBuLuEJt0c4yI5OZssC19K2Mq1uKdrfZBzuxLvaztgqUtFYZUNw7lETExPYJR9jdEoIg4mb7RQKRQzOkeGQ==",
+ "dev": true
+ },
+ "node_modules/abab": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.6.tgz",
+ "integrity": "sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==",
+ "dev": true
+ },
+ "node_modules/acorn": {
+ "version": "8.10.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz",
+ "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==",
+ "dev": true,
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/acorn-globals": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-7.0.1.tgz",
+ "integrity": "sha512-umOSDSDrfHbTNPuNpC2NSnnA3LUrqpevPb4T9jRx4MagXNS0rs+gwiTcAvqCRmsD6utzsrzNt+ebm00SNWiC3Q==",
+ "dev": true,
+ "dependencies": {
+ "acorn": "^8.1.0",
+ "acorn-walk": "^8.0.2"
+ }
+ },
+ "node_modules/acorn-walk": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz",
+ "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/agent-base": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
+ "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
+ "dev": true,
+ "dependencies": {
+ "debug": "4"
+ },
+ "engines": {
+ "node": ">= 6.0.0"
+ }
+ },
+ "node_modules/ansi-escapes": {
+ "version": "4.3.2",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
+ "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
+ "dev": true,
+ "dependencies": {
+ "type-fest": "^0.21.3"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/anymatch": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
+ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
+ "dev": true,
+ "dependencies": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dev": true,
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "dev": true
+ },
+ "node_modules/babel-jest": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz",
+ "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==",
+ "dev": true,
+ "dependencies": {
+ "@jest/transform": "^29.7.0",
+ "@types/babel__core": "^7.1.14",
+ "babel-plugin-istanbul": "^6.1.1",
+ "babel-preset-jest": "^29.6.3",
+ "chalk": "^4.0.0",
+ "graceful-fs": "^4.2.9",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.8.0"
+ }
+ },
+ "node_modules/babel-plugin-istanbul": {
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz",
+ "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.0.0",
+ "@istanbuljs/load-nyc-config": "^1.0.0",
+ "@istanbuljs/schema": "^0.1.2",
+ "istanbul-lib-instrument": "^5.0.4",
+ "test-exclude": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz",
+ "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/core": "^7.12.3",
+ "@babel/parser": "^7.14.7",
+ "@istanbuljs/schema": "^0.1.2",
+ "istanbul-lib-coverage": "^3.2.0",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/babel-plugin-jest-hoist": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz",
+ "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/template": "^7.3.3",
+ "@babel/types": "^7.3.3",
+ "@types/babel__core": "^7.1.14",
+ "@types/babel__traverse": "^7.0.6"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/babel-plugin-polyfill-corejs2": {
+ "version": "0.4.5",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.5.tgz",
+ "integrity": "sha512-19hwUH5FKl49JEsvyTcoHakh6BE0wgXLLptIyKZ3PijHc/Ci521wygORCUCCred+E/twuqRyAkE02BAWPmsHOg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/compat-data": "^7.22.6",
+ "@babel/helper-define-polyfill-provider": "^0.4.2",
+ "semver": "^6.3.1"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0"
+ }
+ },
+ "node_modules/babel-plugin-polyfill-corejs3": {
+ "version": "0.8.4",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.4.tgz",
+ "integrity": "sha512-9l//BZZsPR+5XjyJMPtZSK4jv0BsTO1zDac2GC6ygx9WLGlcsnRd1Co0B2zT5fF5Ic6BZy+9m3HNZ3QcOeDKfg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-define-polyfill-provider": "^0.4.2",
+ "core-js-compat": "^3.32.2"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0"
+ }
+ },
+ "node_modules/babel-plugin-polyfill-regenerator": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.2.tgz",
+ "integrity": "sha512-tAlOptU0Xj34V1Y2PNTL4Y0FOJMDB6bZmoW39FeCQIhigGLkqu3Fj6uiXpxIf6Ij274ENdYx64y6Au+ZKlb1IA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-define-polyfill-provider": "^0.4.2"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0"
+ }
+ },
+ "node_modules/babel-preset-current-node-syntax": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz",
+ "integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/plugin-syntax-async-generators": "^7.8.4",
+ "@babel/plugin-syntax-bigint": "^7.8.3",
+ "@babel/plugin-syntax-class-properties": "^7.8.3",
+ "@babel/plugin-syntax-import-meta": "^7.8.3",
+ "@babel/plugin-syntax-json-strings": "^7.8.3",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
+ "@babel/plugin-syntax-numeric-separator": "^7.8.3",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3",
+ "@babel/plugin-syntax-top-level-await": "^7.8.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/babel-preset-jest": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz",
+ "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==",
+ "dev": true,
+ "dependencies": {
+ "babel-plugin-jest-hoist": "^29.6.3",
+ "babel-preset-current-node-syntax": "^1.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
+ "dev": true
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "dev": true,
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
+ "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "dev": true,
+ "dependencies": {
+ "fill-range": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.22.1",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.1.tgz",
+ "integrity": "sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "caniuse-lite": "^1.0.30001541",
+ "electron-to-chromium": "^1.4.535",
+ "node-releases": "^2.0.13",
+ "update-browserslist-db": "^1.0.13"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/bser": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz",
+ "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==",
+ "dev": true,
+ "dependencies": {
+ "node-int64": "^0.4.0"
+ }
+ },
+ "node_modules/buffer-from": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
+ "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
+ "dev": true
+ },
+ "node_modules/callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001546",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001546.tgz",
+ "integrity": "sha512-zvtSJwuQFpewSyRrI3AsftF6rM0X80mZkChIt1spBGEvRglCrjTniXvinc8JKRoqTwXAgvqTImaN9igfSMtUBw==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ]
+ },
+ "node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/char-regex": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz",
+ "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/ci-info": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz",
+ "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/sibiraj-s"
+ }
+ ],
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cjs-module-lexer": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz",
+ "integrity": "sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ==",
+ "dev": true
+ },
+ "node_modules/cliui": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
+ "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "dev": true,
+ "dependencies": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.1",
+ "wrap-ansi": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/co": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
+ "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==",
+ "dev": true,
+ "engines": {
+ "iojs": ">= 1.0.0",
+ "node": ">= 0.12.0"
+ }
+ },
+ "node_modules/collect-v8-coverage": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz",
+ "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==",
+ "dev": true
+ },
+ "node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "dev": true,
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
+ "dev": true
+ },
+ "node_modules/convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true
+ },
+ "node_modules/core-js-compat": {
+ "version": "3.33.0",
+ "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.33.0.tgz",
+ "integrity": "sha512-0w4LcLXsVEuNkIqwjjf9rjCoPhK8uqA4tMRh4Ge26vfLtUutshn+aRJU21I9LCJlh2QQHfisNToLjw1XEJLTWw==",
+ "dev": true,
+ "dependencies": {
+ "browserslist": "^4.22.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/core-js"
+ }
+ },
+ "node_modules/create-jest": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz",
+ "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==",
+ "dev": true,
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "chalk": "^4.0.0",
+ "exit": "^0.1.2",
+ "graceful-fs": "^4.2.9",
+ "jest-config": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "prompts": "^2.0.1"
+ },
+ "bin": {
+ "create-jest": "bin/create-jest.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
+ "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "dev": true,
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/cssom": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.5.0.tgz",
+ "integrity": "sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw==",
+ "dev": true
+ },
+ "node_modules/cssstyle": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-2.3.0.tgz",
+ "integrity": "sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==",
+ "dev": true,
+ "dependencies": {
+ "cssom": "~0.3.6"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cssstyle/node_modules/cssom": {
+ "version": "0.3.8",
+ "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.8.tgz",
+ "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==",
+ "dev": true
+ },
+ "node_modules/data-urls": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-3.0.2.tgz",
+ "integrity": "sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ==",
+ "dev": true,
+ "dependencies": {
+ "abab": "^2.0.6",
+ "whatwg-mimetype": "^3.0.0",
+ "whatwg-url": "^11.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.3.4",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
+ "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
+ "dev": true,
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decimal.js": {
+ "version": "10.4.3",
+ "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz",
+ "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==",
+ "dev": true
+ },
+ "node_modules/dedent": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.1.tgz",
+ "integrity": "sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg==",
+ "dev": true,
+ "peerDependencies": {
+ "babel-plugin-macros": "^3.1.0"
+ },
+ "peerDependenciesMeta": {
+ "babel-plugin-macros": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/deepmerge": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz",
+ "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/detect-newline": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz",
+ "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/diff-sequences": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz",
+ "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==",
+ "dev": true,
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/domexception": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/domexception/-/domexception-4.0.0.tgz",
+ "integrity": "sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==",
+ "dev": true,
+ "dependencies": {
+ "webidl-conversions": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.4.544",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.544.tgz",
+ "integrity": "sha512-54z7squS1FyFRSUqq/knOFSptjjogLZXbKcYk3B0qkE1KZzvqASwRZnY2KzZQJqIYLVD38XZeoiMRflYSwyO4w==",
+ "dev": true
+ },
+ "node_modules/emittery": {
+ "version": "0.13.1",
+ "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz",
+ "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/emittery?sponsor=1"
+ }
+ },
+ "node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "dev": true
+ },
+ "node_modules/entities": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz",
+ "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.12"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/error-ex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
+ "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "dev": true,
+ "dependencies": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
+ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz",
+ "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/escodegen": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz",
+ "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==",
+ "dev": true,
+ "dependencies": {
+ "esprima": "^4.0.1",
+ "estraverse": "^5.2.0",
+ "esutils": "^2.0.2"
+ },
+ "bin": {
+ "escodegen": "bin/escodegen.js",
+ "esgenerate": "bin/esgenerate.js"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "optionalDependencies": {
+ "source-map": "~0.6.1"
+ }
+ },
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "dev": true,
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/estraverse": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+ "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
+ "dev": true,
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/execa": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
+ "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
+ "dev": true,
+ "dependencies": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^6.0.0",
+ "human-signals": "^2.1.0",
+ "is-stream": "^2.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^4.0.1",
+ "onetime": "^5.1.2",
+ "signal-exit": "^3.0.3",
+ "strip-final-newline": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/execa?sponsor=1"
+ }
+ },
+ "node_modules/exit": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz",
+ "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/expect": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz",
+ "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==",
+ "dev": true,
+ "dependencies": {
+ "@jest/expect-utils": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "jest-matcher-utils": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+ "dev": true
+ },
+ "node_modules/fb-watchman": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz",
+ "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==",
+ "dev": true,
+ "dependencies": {
+ "bser": "2.1.1"
+ }
+ },
+ "node_modules/fill-range": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
+ "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "dev": true,
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/find-up": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
+ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
+ "dev": true,
+ "dependencies": {
+ "locate-path": "^5.0.0",
+ "path-exists": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
+ "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
+ "dev": true,
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
+ "dev": true
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "dev": true,
+ "engines": {
+ "node": "6.* || 8.* || >= 10.*"
+ }
+ },
+ "node_modules/get-package-type": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz",
+ "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/get-stream": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
+ "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "dev": true,
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.11",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
+ "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
+ "dev": true
+ },
+ "node_modules/has": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/has/-/has-1.0.4.tgz",
+ "integrity": "sha512-qdSAmqLF6209RFj4VVItywPMbm3vWylknmB3nvNiUIs72xAimcM8nVYxYr7ncvZq5qzk9MKIZR8ijqD/1QuYjQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/html-encoding-sniffer": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz",
+ "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==",
+ "dev": true,
+ "dependencies": {
+ "whatwg-encoding": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/html-escaper": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
+ "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==",
+ "dev": true
+ },
+ "node_modules/http-proxy-agent": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz",
+ "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==",
+ "dev": true,
+ "dependencies": {
+ "@tootallnate/once": "2",
+ "agent-base": "6",
+ "debug": "4"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/https-proxy-agent": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz",
+ "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==",
+ "dev": true,
+ "dependencies": {
+ "agent-base": "6",
+ "debug": "4"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/human-signals": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
+ "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
+ "dev": true,
+ "engines": {
+ "node": ">=10.17.0"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "dev": true,
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/import-local": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz",
+ "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==",
+ "dev": true,
+ "dependencies": {
+ "pkg-dir": "^4.2.0",
+ "resolve-cwd": "^3.0.0"
+ },
+ "bin": {
+ "import-local-fixture": "fixtures/cli.js"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.8.19"
+ }
+ },
+ "node_modules/inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
+ "dev": true,
+ "dependencies": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "dev": true
+ },
+ "node_modules/is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==",
+ "dev": true
+ },
+ "node_modules/is-core-module": {
+ "version": "2.13.0",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz",
+ "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==",
+ "dev": true,
+ "dependencies": {
+ "has": "^1.0.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-generator-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz",
+ "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/is-potential-custom-element-name": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz",
+ "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==",
+ "dev": true
+ },
+ "node_modules/is-stream": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
+ "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
+ "dev": true
+ },
+ "node_modules/istanbul-lib-coverage": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz",
+ "integrity": "sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/istanbul-lib-instrument": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.1.tgz",
+ "integrity": "sha512-EAMEJBsYuyyztxMxW3g7ugGPkrZsV57v0Hmv3mm1uQsmB+QnZuepg731CRaIgeUVSdmsTngOkSnauNF8p7FIhA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/core": "^7.12.3",
+ "@babel/parser": "^7.14.7",
+ "@istanbuljs/schema": "^0.1.2",
+ "istanbul-lib-coverage": "^3.2.0",
+ "semver": "^7.5.4"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-instrument/node_modules/lru-cache": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
+ "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
+ "dev": true,
+ "dependencies": {
+ "yallist": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-instrument/node_modules/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-instrument/node_modules/yallist": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "dev": true
+ },
+ "node_modules/istanbul-lib-report": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz",
+ "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==",
+ "dev": true,
+ "dependencies": {
+ "istanbul-lib-coverage": "^3.0.0",
+ "make-dir": "^4.0.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-source-maps": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz",
+ "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==",
+ "dev": true,
+ "dependencies": {
+ "debug": "^4.1.1",
+ "istanbul-lib-coverage": "^3.0.0",
+ "source-map": "^0.6.1"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-reports": {
+ "version": "3.1.6",
+ "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.6.tgz",
+ "integrity": "sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg==",
+ "dev": true,
+ "dependencies": {
+ "html-escaper": "^2.0.0",
+ "istanbul-lib-report": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/jest": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz",
+ "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==",
+ "dev": true,
+ "dependencies": {
+ "@jest/core": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "import-local": "^3.0.2",
+ "jest-cli": "^29.7.0"
+ },
+ "bin": {
+ "jest": "bin/jest.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-changed-files": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz",
+ "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==",
+ "dev": true,
+ "dependencies": {
+ "execa": "^5.0.0",
+ "jest-util": "^29.7.0",
+ "p-limit": "^3.1.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-circus": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz",
+ "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==",
+ "dev": true,
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/expect": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "co": "^4.6.0",
+ "dedent": "^1.0.0",
+ "is-generator-fn": "^2.0.0",
+ "jest-each": "^29.7.0",
+ "jest-matcher-utils": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-runtime": "^29.7.0",
+ "jest-snapshot": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "p-limit": "^3.1.0",
+ "pretty-format": "^29.7.0",
+ "pure-rand": "^6.0.0",
+ "slash": "^3.0.0",
+ "stack-utils": "^2.0.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-cli": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz",
+ "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==",
+ "dev": true,
+ "dependencies": {
+ "@jest/core": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "chalk": "^4.0.0",
+ "create-jest": "^29.7.0",
+ "exit": "^0.1.2",
+ "import-local": "^3.0.2",
+ "jest-config": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "yargs": "^17.3.1"
+ },
+ "bin": {
+ "jest": "bin/jest.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-config": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz",
+ "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/core": "^7.11.6",
+ "@jest/test-sequencer": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "babel-jest": "^29.7.0",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "deepmerge": "^4.2.2",
+ "glob": "^7.1.3",
+ "graceful-fs": "^4.2.9",
+ "jest-circus": "^29.7.0",
+ "jest-environment-node": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "jest-regex-util": "^29.6.3",
+ "jest-resolve": "^29.7.0",
+ "jest-runner": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "parse-json": "^5.2.0",
+ "pretty-format": "^29.7.0",
+ "slash": "^3.0.0",
+ "strip-json-comments": "^3.1.1"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "@types/node": "*",
+ "ts-node": ">=9.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "ts-node": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-diff": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz",
+ "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==",
+ "dev": true,
+ "dependencies": {
+ "chalk": "^4.0.0",
+ "diff-sequences": "^29.6.3",
+ "jest-get-type": "^29.6.3",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-docblock": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz",
+ "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==",
+ "dev": true,
+ "dependencies": {
+ "detect-newline": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-each": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz",
+ "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==",
+ "dev": true,
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "chalk": "^4.0.0",
+ "jest-get-type": "^29.6.3",
+ "jest-util": "^29.7.0",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-environment-jsdom": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-environment-jsdom/-/jest-environment-jsdom-29.7.0.tgz",
+ "integrity": "sha512-k9iQbsf9OyOfdzWH8HDmrRT0gSIcX+FLNW7IQq94tFX0gynPwqDTW0Ho6iMVNjGz/nb+l/vW3dWM2bbLLpkbXA==",
+ "dev": true,
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/fake-timers": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/jsdom": "^20.0.0",
+ "@types/node": "*",
+ "jest-mock": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jsdom": "^20.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "canvas": "^2.5.0"
+ },
+ "peerDependenciesMeta": {
+ "canvas": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-environment-node": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz",
+ "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==",
+ "dev": true,
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/fake-timers": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "jest-mock": "^29.7.0",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-get-type": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz",
+ "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==",
+ "dev": true,
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-haste-map": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz",
+ "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==",
+ "dev": true,
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/graceful-fs": "^4.1.3",
+ "@types/node": "*",
+ "anymatch": "^3.0.3",
+ "fb-watchman": "^2.0.0",
+ "graceful-fs": "^4.2.9",
+ "jest-regex-util": "^29.6.3",
+ "jest-util": "^29.7.0",
+ "jest-worker": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "walker": "^1.0.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "optionalDependencies": {
+ "fsevents": "^2.3.2"
+ }
+ },
+ "node_modules/jest-leak-detector": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz",
+ "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==",
+ "dev": true,
+ "dependencies": {
+ "jest-get-type": "^29.6.3",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-matcher-utils": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz",
+ "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==",
+ "dev": true,
+ "dependencies": {
+ "chalk": "^4.0.0",
+ "jest-diff": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-message-util": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz",
+ "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.12.13",
+ "@jest/types": "^29.6.3",
+ "@types/stack-utils": "^2.0.0",
+ "chalk": "^4.0.0",
+ "graceful-fs": "^4.2.9",
+ "micromatch": "^4.0.4",
+ "pretty-format": "^29.7.0",
+ "slash": "^3.0.0",
+ "stack-utils": "^2.0.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-mock": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz",
+ "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==",
+ "dev": true,
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-pnp-resolver": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz",
+ "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ },
+ "peerDependencies": {
+ "jest-resolve": "*"
+ },
+ "peerDependenciesMeta": {
+ "jest-resolve": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-resolve": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz",
+ "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==",
+ "dev": true,
+ "dependencies": {
+ "chalk": "^4.0.0",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "jest-pnp-resolver": "^1.2.2",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "resolve": "^1.20.0",
+ "resolve.exports": "^2.0.0",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-resolve-dependencies": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz",
+ "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==",
+ "dev": true,
+ "dependencies": {
+ "jest-regex-util": "^29.6.3",
+ "jest-snapshot": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runner": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz",
+ "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==",
+ "dev": true,
+ "dependencies": {
+ "@jest/console": "^29.7.0",
+ "@jest/environment": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "emittery": "^0.13.1",
+ "graceful-fs": "^4.2.9",
+ "jest-docblock": "^29.7.0",
+ "jest-environment-node": "^29.7.0",
+ "jest-haste-map": "^29.7.0",
+ "jest-leak-detector": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-resolve": "^29.7.0",
+ "jest-runtime": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-watcher": "^29.7.0",
+ "jest-worker": "^29.7.0",
+ "p-limit": "^3.1.0",
+ "source-map-support": "0.5.13"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runtime": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz",
+ "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==",
+ "dev": true,
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/fake-timers": "^29.7.0",
+ "@jest/globals": "^29.7.0",
+ "@jest/source-map": "^29.6.3",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "cjs-module-lexer": "^1.0.0",
+ "collect-v8-coverage": "^1.0.0",
+ "glob": "^7.1.3",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-mock": "^29.7.0",
+ "jest-regex-util": "^29.6.3",
+ "jest-resolve": "^29.7.0",
+ "jest-snapshot": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "slash": "^3.0.0",
+ "strip-bom": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-snapshot": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz",
+ "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/core": "^7.11.6",
+ "@babel/generator": "^7.7.2",
+ "@babel/plugin-syntax-jsx": "^7.7.2",
+ "@babel/plugin-syntax-typescript": "^7.7.2",
+ "@babel/types": "^7.3.3",
+ "@jest/expect-utils": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "babel-preset-current-node-syntax": "^1.0.0",
+ "chalk": "^4.0.0",
+ "expect": "^29.7.0",
+ "graceful-fs": "^4.2.9",
+ "jest-diff": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "jest-matcher-utils": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "natural-compare": "^1.4.0",
+ "pretty-format": "^29.7.0",
+ "semver": "^7.5.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-snapshot/node_modules/lru-cache": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
+ "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
+ "dev": true,
+ "dependencies": {
+ "yallist": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/jest-snapshot/node_modules/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/jest-snapshot/node_modules/yallist": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "dev": true
+ },
+ "node_modules/jest-util": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz",
+ "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==",
+ "dev": true,
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "graceful-fs": "^4.2.9",
+ "picomatch": "^2.2.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-validate": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz",
+ "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==",
+ "dev": true,
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "camelcase": "^6.2.0",
+ "chalk": "^4.0.0",
+ "jest-get-type": "^29.6.3",
+ "leven": "^3.1.0",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-validate/node_modules/camelcase": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
+ "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/jest-watcher": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz",
+ "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==",
+ "dev": true,
+ "dependencies": {
+ "@jest/test-result": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "ansi-escapes": "^4.2.1",
+ "chalk": "^4.0.0",
+ "emittery": "^0.13.1",
+ "jest-util": "^29.7.0",
+ "string-length": "^4.0.1"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-worker": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz",
+ "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==",
+ "dev": true,
+ "dependencies": {
+ "@types/node": "*",
+ "jest-util": "^29.7.0",
+ "merge-stream": "^2.0.0",
+ "supports-color": "^8.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-worker/node_modules/supports-color": {
+ "version": "8.1.1",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
+ "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/supports-color?sponsor=1"
+ }
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "dev": true
+ },
+ "node_modules/js-yaml": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
+ "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "dev": true,
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsdom": {
+ "version": "20.0.3",
+ "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-20.0.3.tgz",
+ "integrity": "sha512-SYhBvTh89tTfCD/CRdSOm13mOBa42iTaTyfyEWBdKcGdPxPtLFBXuHR8XHb33YNYaP+lLbmSvBTsnoesCNJEsQ==",
+ "dev": true,
+ "dependencies": {
+ "abab": "^2.0.6",
+ "acorn": "^8.8.1",
+ "acorn-globals": "^7.0.0",
+ "cssom": "^0.5.0",
+ "cssstyle": "^2.3.0",
+ "data-urls": "^3.0.2",
+ "decimal.js": "^10.4.2",
+ "domexception": "^4.0.0",
+ "escodegen": "^2.0.0",
+ "form-data": "^4.0.0",
+ "html-encoding-sniffer": "^3.0.0",
+ "http-proxy-agent": "^5.0.0",
+ "https-proxy-agent": "^5.0.1",
+ "is-potential-custom-element-name": "^1.0.1",
+ "nwsapi": "^2.2.2",
+ "parse5": "^7.1.1",
+ "saxes": "^6.0.0",
+ "symbol-tree": "^3.2.4",
+ "tough-cookie": "^4.1.2",
+ "w3c-xmlserializer": "^4.0.0",
+ "webidl-conversions": "^7.0.0",
+ "whatwg-encoding": "^2.0.0",
+ "whatwg-mimetype": "^3.0.0",
+ "whatwg-url": "^11.0.0",
+ "ws": "^8.11.0",
+ "xml-name-validator": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "peerDependencies": {
+ "canvas": "^2.5.0"
+ },
+ "peerDependenciesMeta": {
+ "canvas": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
+ "dev": true,
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/json-parse-even-better-errors": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
+ "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
+ "dev": true
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true,
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/kleur": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz",
+ "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/leven": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
+ "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/lines-and-columns": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
+ "dev": true
+ },
+ "node_modules/locate-path": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
+ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
+ "dev": true,
+ "dependencies": {
+ "p-locate": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/lodash.debounce": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz",
+ "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==",
+ "dev": true
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/make-dir": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz",
+ "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==",
+ "dev": true,
+ "dependencies": {
+ "semver": "^7.5.3"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/make-dir/node_modules/lru-cache": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
+ "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
+ "dev": true,
+ "dependencies": {
+ "yallist": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/make-dir/node_modules/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/make-dir/node_modules/yallist": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "dev": true
+ },
+ "node_modules/makeerror": {
+ "version": "1.0.12",
+ "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz",
+ "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==",
+ "dev": true,
+ "dependencies": {
+ "tmpl": "1.0.5"
+ }
+ },
+ "node_modules/merge-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
+ "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
+ "dev": true
+ },
+ "node_modules/micromatch": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
+ "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
+ "dev": true,
+ "dependencies": {
+ "braces": "^3.0.2",
+ "picomatch": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dev": true,
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mimic-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
+ "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dev": true,
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ },
+ "node_modules/natural-compare": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+ "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==",
+ "dev": true
+ },
+ "node_modules/node-int64": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz",
+ "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==",
+ "dev": true
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.13",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz",
+ "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==",
+ "dev": true
+ },
+ "node_modules/normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/npm-run-path": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
+ "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
+ "dev": true,
+ "dependencies": {
+ "path-key": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/nwsapi": {
+ "version": "2.2.7",
+ "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.7.tgz",
+ "integrity": "sha512-ub5E4+FBPKwAZx0UwIQOjYWGHTEq5sPqHQNRN8Z9e4A7u3Tj1weLJsL59yH9vmvqEtBHaOmT6cYQKIZOxp35FQ==",
+ "dev": true
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "dev": true,
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/onetime": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
+ "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
+ "dev": true,
+ "dependencies": {
+ "mimic-fn": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-limit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+ "dev": true,
+ "dependencies": {
+ "yocto-queue": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-locate": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
+ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
+ "dev": true,
+ "dependencies": {
+ "p-limit": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/p-locate/node_modules/p-limit": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
+ "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
+ "dev": true,
+ "dependencies": {
+ "p-try": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-try": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
+ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/parse-json": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
+ "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.0.0",
+ "error-ex": "^1.3.1",
+ "json-parse-even-better-errors": "^2.3.0",
+ "lines-and-columns": "^1.1.6"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/parse5": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz",
+ "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==",
+ "dev": true,
+ "dependencies": {
+ "entities": "^4.4.0"
+ },
+ "funding": {
+ "url": "https://github.com/inikulin/parse5?sponsor=1"
+ }
+ },
+ "node_modules/path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
+ "dev": true
+ },
+ "node_modules/picocolors": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
+ "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==",
+ "dev": true
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "dev": true,
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/pirates": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz",
+ "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/pkg-dir": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
+ "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
+ "dev": true,
+ "dependencies": {
+ "find-up": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pretty-format": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz",
+ "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==",
+ "dev": true,
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "ansi-styles": "^5.0.0",
+ "react-is": "^18.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/pretty-format/node_modules/ansi-styles": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
+ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/prompts": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz",
+ "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==",
+ "dev": true,
+ "dependencies": {
+ "kleur": "^3.0.3",
+ "sisteransi": "^1.0.5"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/psl": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz",
+ "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==",
+ "dev": true
+ },
+ "node_modules/punycode": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz",
+ "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/pure-rand": {
+ "version": "6.0.4",
+ "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.4.tgz",
+ "integrity": "sha512-LA0Y9kxMYv47GIPJy6MI84fqTd2HmYZI83W/kM/SkKfDlajnZYfmXFTxkbY+xSBPkLJxltMa9hIkmdc29eguMA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://github.com/sponsors/dubzzz"
+ },
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/fast-check"
+ }
+ ]
+ },
+ "node_modules/querystringify": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz",
+ "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==",
+ "dev": true
+ },
+ "node_modules/react-is": {
+ "version": "18.2.0",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz",
+ "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==",
+ "dev": true
+ },
+ "node_modules/regenerate": {
+ "version": "1.4.2",
+ "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz",
+ "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==",
+ "dev": true
+ },
+ "node_modules/regenerate-unicode-properties": {
+ "version": "10.1.1",
+ "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz",
+ "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==",
+ "dev": true,
+ "dependencies": {
+ "regenerate": "^1.4.2"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/regenerator-runtime": {
+ "version": "0.14.0",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz",
+ "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==",
+ "dev": true
+ },
+ "node_modules/regenerator-transform": {
+ "version": "0.15.2",
+ "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz",
+ "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/runtime": "^7.8.4"
+ }
+ },
+ "node_modules/regexpu-core": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz",
+ "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/regjsgen": "^0.8.0",
+ "regenerate": "^1.4.2",
+ "regenerate-unicode-properties": "^10.1.0",
+ "regjsparser": "^0.9.1",
+ "unicode-match-property-ecmascript": "^2.0.0",
+ "unicode-match-property-value-ecmascript": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/regjsparser": {
+ "version": "0.9.1",
+ "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz",
+ "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==",
+ "dev": true,
+ "dependencies": {
+ "jsesc": "~0.5.0"
+ },
+ "bin": {
+ "regjsparser": "bin/parser"
+ }
+ },
+ "node_modules/regjsparser/node_modules/jsesc": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz",
+ "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==",
+ "dev": true,
+ "bin": {
+ "jsesc": "bin/jsesc"
+ }
+ },
+ "node_modules/require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==",
+ "dev": true
+ },
+ "node_modules/resolve": {
+ "version": "1.22.6",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.6.tgz",
+ "integrity": "sha512-njhxM7mV12JfufShqGy3Rz8j11RPdLy4xi15UurGJeoHLfJpVXKdh3ueuOqbYUcDZnffr6X739JBo5LzyahEsw==",
+ "dev": true,
+ "dependencies": {
+ "is-core-module": "^2.13.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/resolve-cwd": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz",
+ "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==",
+ "dev": true,
+ "dependencies": {
+ "resolve-from": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/resolve-from": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz",
+ "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/resolve.exports": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz",
+ "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "dev": true
+ },
+ "node_modules/saxes": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz",
+ "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==",
+ "dev": true,
+ "dependencies": {
+ "xmlchars": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=v12.22.7"
+ }
+ },
+ "node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true,
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dev": true,
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/signal-exit": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
+ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
+ "dev": true
+ },
+ "node_modules/sisteransi": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz",
+ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==",
+ "dev": true
+ },
+ "node_modules/slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/source-map-support": {
+ "version": "0.5.13",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz",
+ "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==",
+ "dev": true,
+ "dependencies": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
+ }
+ },
+ "node_modules/sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==",
+ "dev": true
+ },
+ "node_modules/stack-utils": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz",
+ "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==",
+ "dev": true,
+ "dependencies": {
+ "escape-string-regexp": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/string-length": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz",
+ "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==",
+ "dev": true,
+ "dependencies": {
+ "char-regex": "^1.0.2",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dev": true,
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-bom": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz",
+ "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-final-newline": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
+ "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/strip-json-comments": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
+ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/symbol-tree": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz",
+ "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==",
+ "dev": true
+ },
+ "node_modules/test-exclude": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz",
+ "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==",
+ "dev": true,
+ "dependencies": {
+ "@istanbuljs/schema": "^0.1.2",
+ "glob": "^7.1.4",
+ "minimatch": "^3.0.4"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/tmpl": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz",
+ "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
+ "dev": true
+ },
+ "node_modules/to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dev": true,
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/tough-cookie": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz",
+ "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==",
+ "dev": true,
+ "dependencies": {
+ "psl": "^1.1.33",
+ "punycode": "^2.1.1",
+ "universalify": "^0.2.0",
+ "url-parse": "^1.5.3"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/tr46": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz",
+ "integrity": "sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA==",
+ "dev": true,
+ "dependencies": {
+ "punycode": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/type-detect": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
+ "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/type-fest": {
+ "version": "0.21.3",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
+ "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/unicode-canonical-property-names-ecmascript": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz",
+ "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/unicode-match-property-ecmascript": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz",
+ "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==",
+ "dev": true,
+ "dependencies": {
+ "unicode-canonical-property-names-ecmascript": "^2.0.0",
+ "unicode-property-aliases-ecmascript": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/unicode-match-property-value-ecmascript": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz",
+ "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/unicode-property-aliases-ecmascript": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz",
+ "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/universalify": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz",
+ "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.0.13",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz",
+ "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "escalade": "^3.1.1",
+ "picocolors": "^1.0.0"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/url-parse": {
+ "version": "1.5.10",
+ "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz",
+ "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==",
+ "dev": true,
+ "dependencies": {
+ "querystringify": "^2.1.1",
+ "requires-port": "^1.0.0"
+ }
+ },
+ "node_modules/v8-to-istanbul": {
+ "version": "9.1.3",
+ "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.1.3.tgz",
+ "integrity": "sha512-9lDD+EVI2fjFsMWXc6dy5JJzBsVTcQ2fVkfBvncZ6xJWG9wtBhOldG+mHkSL0+V1K/xgZz0JDO5UT5hFwHUghg==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.12",
+ "@types/istanbul-lib-coverage": "^2.0.1",
+ "convert-source-map": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10.12.0"
+ }
+ },
+ "node_modules/w3c-xmlserializer": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-4.0.0.tgz",
+ "integrity": "sha512-d+BFHzbiCx6zGfz0HyQ6Rg69w9k19nviJspaj4yNscGjrHu94sVP+aRm75yEbCh+r2/yR+7q6hux9LVtbuTGBw==",
+ "dev": true,
+ "dependencies": {
+ "xml-name-validator": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=14"
+ }
+ },
+ "node_modules/walker": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz",
+ "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==",
+ "dev": true,
+ "dependencies": {
+ "makeerror": "1.0.12"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz",
+ "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/whatwg-encoding": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz",
+ "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==",
+ "dev": true,
+ "dependencies": {
+ "iconv-lite": "0.6.3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/whatwg-mimetype": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-3.0.0.tgz",
+ "integrity": "sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/whatwg-url": {
+ "version": "11.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-11.0.0.tgz",
+ "integrity": "sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ==",
+ "dev": true,
+ "dependencies": {
+ "tr46": "^3.0.0",
+ "webidl-conversions": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dev": true,
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
+ "dev": true
+ },
+ "node_modules/write-file-atomic": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz",
+ "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==",
+ "dev": true,
+ "dependencies": {
+ "imurmurhash": "^0.1.4",
+ "signal-exit": "^3.0.7"
+ },
+ "engines": {
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
+ }
+ },
+ "node_modules/ws": {
+ "version": "8.14.2",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.14.2.tgz",
+ "integrity": "sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==",
+ "dev": true,
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/xml-name-validator": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz",
+ "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/xmlchars": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz",
+ "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==",
+ "dev": true
+ },
+ "node_modules/y18n": {
+ "version": "5.0.8",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
+ "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true
+ },
+ "node_modules/yargs": {
+ "version": "17.7.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
+ "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
+ "dev": true,
+ "dependencies": {
+ "cliui": "^8.0.1",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.3",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^21.1.1"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yargs-parser": {
+ "version": "21.1.1",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
+ "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yocto-queue": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ }
+ }
+}
diff --git a/tests-ui/package.json b/tests-ui/package.json
new file mode 100644
index 00000000000..e7b60ad8e75
--- /dev/null
+++ b/tests-ui/package.json
@@ -0,0 +1,30 @@
+{
+ "name": "comfui-tests",
+ "version": "1.0.0",
+ "description": "UI tests",
+ "main": "index.js",
+ "scripts": {
+ "test": "jest",
+ "test:generate": "node setup.js"
+ },
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/comfyanonymous/ComfyUI.git"
+ },
+ "keywords": [
+ "comfyui",
+ "test"
+ ],
+ "author": "comfyanonymous",
+ "license": "GPL-3.0",
+ "bugs": {
+ "url": "https://github.com/comfyanonymous/ComfyUI/issues"
+ },
+ "homepage": "https://github.com/comfyanonymous/ComfyUI#readme",
+ "devDependencies": {
+ "@babel/preset-env": "^7.22.20",
+ "@types/jest": "^29.5.5",
+ "jest": "^29.7.0",
+ "jest-environment-jsdom": "^29.7.0"
+ }
+}
diff --git a/tests-ui/setup.js b/tests-ui/setup.js
new file mode 100644
index 00000000000..8bbd9dcdf20
--- /dev/null
+++ b/tests-ui/setup.js
@@ -0,0 +1,88 @@
+const { spawn } = require("child_process");
+const { resolve } = require("path");
+const { existsSync, mkdirSync, writeFileSync } = require("fs");
+const http = require("http");
+
+async function setup() {
+ // Wait up to 30s for it to start
+ let success = false;
+ let child;
+ for (let i = 0; i < 30; i++) {
+ try {
+ await new Promise((res, rej) => {
+ http
+ .get("http://127.0.0.1:8188/object_info", (resp) => {
+ let data = "";
+ resp.on("data", (chunk) => {
+ data += chunk;
+ });
+ resp.on("end", () => {
+ // Modify the response data to add some checkpoints
+ const objectInfo = JSON.parse(data);
+ objectInfo.CheckpointLoaderSimple.input.required.ckpt_name[0] = ["model1.safetensors", "model2.ckpt"];
+ objectInfo.VAELoader.input.required.vae_name[0] = ["vae1.safetensors", "vae2.ckpt"];
+
+ data = JSON.stringify(objectInfo, undefined, "\t");
+
+ const outDir = resolve("./data");
+ if (!existsSync(outDir)) {
+ mkdirSync(outDir);
+ }
+
+ const outPath = resolve(outDir, "object_info.json");
+ console.log(`Writing ${Object.keys(objectInfo).length} nodes to ${outPath}`);
+ writeFileSync(outPath, data, {
+ encoding: "utf8",
+ });
+ res();
+ });
+ })
+ .on("error", rej);
+ });
+ success = true;
+ break;
+ } catch (error) {
+ console.log(i + "/30", error);
+ if (i === 0) {
+ // Start the server on first iteration if it fails to connect
+ console.log("Starting ComfyUI server...");
+
+ let python = resolve("../../python_embeded/python.exe");
+ let args;
+ let cwd;
+ if (existsSync(python)) {
+ args = ["-s", "ComfyUI/main.py"];
+ cwd = "../..";
+ } else {
+ python = "python";
+ args = ["main.py"];
+ cwd = "..";
+ }
+ args.push("--cpu");
+ console.log(python, ...args);
+ child = spawn(python, args, { cwd });
+ child.on("error", (err) => {
+ console.log(`Server error (${err})`);
+ i = 30;
+ });
+ child.on("exit", (code) => {
+ if (!success) {
+ console.log(`Server exited (${code})`);
+ i = 30;
+ }
+ });
+ }
+ await new Promise((r) => {
+ setTimeout(r, 1000);
+ });
+ }
+ }
+
+ child?.kill();
+
+ if (!success) {
+ throw new Error("Waiting for server failed...");
+ }
+}
+
+ setup();
\ No newline at end of file
diff --git a/tests-ui/tests/groupNode.test.js b/tests-ui/tests/groupNode.test.js
new file mode 100644
index 00000000000..ce54c11542c
--- /dev/null
+++ b/tests-ui/tests/groupNode.test.js
@@ -0,0 +1,818 @@
+// @ts-check
+///
+
+const { start, createDefaultWorkflow } = require("../utils");
+const lg = require("../utils/litegraph");
+
+describe("group node", () => {
+ beforeEach(() => {
+ lg.setup(global);
+ });
+
+ afterEach(() => {
+ lg.teardown(global);
+ });
+
+ /**
+ *
+ * @param {*} app
+ * @param {*} graph
+ * @param {*} name
+ * @param {*} nodes
+ * @returns { Promise> }
+ */
+ async function convertToGroup(app, graph, name, nodes) {
+ // Select the nodes we are converting
+ for (const n of nodes) {
+ n.select(true);
+ }
+
+ expect(Object.keys(app.canvas.selected_nodes).sort((a, b) => +a - +b)).toEqual(
+ nodes.map((n) => n.id + "").sort((a, b) => +a - +b)
+ );
+
+ global.prompt = jest.fn().mockImplementation(() => name);
+ const groupNode = await nodes[0].menu["Convert to Group Node"].call(false);
+
+ // Check group name was requested
+ expect(window.prompt).toHaveBeenCalled();
+
+ // Ensure old nodes are removed
+ for (const n of nodes) {
+ expect(n.isRemoved).toBeTruthy();
+ }
+
+ expect(groupNode.type).toEqual("workflow/" + name);
+
+ return graph.find(groupNode);
+ }
+
+ /**
+ * @param { Record | number[] } idMap
+ * @param { Record> } valueMap
+ */
+ function getOutput(idMap = {}, valueMap = {}) {
+ if (idMap instanceof Array) {
+ idMap = idMap.reduce((p, n) => {
+ p[n] = n + "";
+ return p;
+ }, {});
+ }
+ const expected = {
+ 1: { inputs: { ckpt_name: "model1.safetensors", ...valueMap?.[1] }, class_type: "CheckpointLoaderSimple" },
+ 2: { inputs: { text: "positive", clip: ["1", 1], ...valueMap?.[2] }, class_type: "CLIPTextEncode" },
+ 3: { inputs: { text: "negative", clip: ["1", 1], ...valueMap?.[3] }, class_type: "CLIPTextEncode" },
+ 4: { inputs: { width: 512, height: 512, batch_size: 1, ...valueMap?.[4] }, class_type: "EmptyLatentImage" },
+ 5: {
+ inputs: {
+ seed: 0,
+ steps: 20,
+ cfg: 8,
+ sampler_name: "euler",
+ scheduler: "normal",
+ denoise: 1,
+ model: ["1", 0],
+ positive: ["2", 0],
+ negative: ["3", 0],
+ latent_image: ["4", 0],
+ ...valueMap?.[5],
+ },
+ class_type: "KSampler",
+ },
+ 6: { inputs: { samples: ["5", 0], vae: ["1", 2], ...valueMap?.[6] }, class_type: "VAEDecode" },
+ 7: { inputs: { filename_prefix: "ComfyUI", images: ["6", 0], ...valueMap?.[7] }, class_type: "SaveImage" },
+ };
+
+ // Map old IDs to new at the top level
+ const mapped = {};
+ for (const oldId in idMap) {
+ mapped[idMap[oldId]] = expected[oldId];
+ delete expected[oldId];
+ }
+ Object.assign(mapped, expected);
+
+ // Map old IDs to new inside links
+ for (const k in mapped) {
+ for (const input in mapped[k].inputs) {
+ const v = mapped[k].inputs[input];
+ if (v instanceof Array) {
+ if (v[0] in idMap) {
+ v[0] = idMap[v[0]] + "";
+ }
+ }
+ }
+ }
+
+ return mapped;
+ }
+
+ test("can be created from selected nodes", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+ const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg, nodes.empty]);
+
+ // Ensure links are now to the group node
+ expect(group.inputs).toHaveLength(2);
+ expect(group.outputs).toHaveLength(3);
+
+ expect(group.inputs.map((i) => i.input.name)).toEqual(["clip", "CLIPTextEncode clip"]);
+ expect(group.outputs.map((i) => i.output.name)).toEqual(["LATENT", "CONDITIONING", "CLIPTextEncode CONDITIONING"]);
+
+ // ckpt clip to both clip inputs on the group
+ expect(nodes.ckpt.outputs.CLIP.connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([
+ [group.id, 0],
+ [group.id, 1],
+ ]);
+
+ // group conditioning to sampler
+ expect(group.outputs["CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([
+ [nodes.sampler.id, 1],
+ ]);
+ // group conditioning 2 to sampler
+ expect(
+ group.outputs["CLIPTextEncode CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index])
+ ).toEqual([[nodes.sampler.id, 2]]);
+ // group latent to sampler
+ expect(group.outputs["LATENT"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([
+ [nodes.sampler.id, 3],
+ ]);
+ });
+
+ test("maintains all output links on conversion", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+ const save2 = ez.SaveImage(...nodes.decode.outputs);
+ const save3 = ez.SaveImage(...nodes.decode.outputs);
+ // Ensure an output with multiple links maintains them on convert to group
+ const group = await convertToGroup(app, graph, "test", [nodes.sampler, nodes.decode]);
+ expect(group.outputs[0].connections.length).toBe(3);
+ expect(group.outputs[0].connections[0].targetNode.id).toBe(nodes.save.id);
+ expect(group.outputs[0].connections[1].targetNode.id).toBe(save2.id);
+ expect(group.outputs[0].connections[2].targetNode.id).toBe(save3.id);
+
+ // and they're still linked when converting back to nodes
+ const newNodes = group.menu["Convert to nodes"].call();
+ const decode = graph.find(newNodes.find((n) => n.type === "VAEDecode"));
+ expect(decode.outputs[0].connections.length).toBe(3);
+ expect(decode.outputs[0].connections[0].targetNode.id).toBe(nodes.save.id);
+ expect(decode.outputs[0].connections[1].targetNode.id).toBe(save2.id);
+ expect(decode.outputs[0].connections[2].targetNode.id).toBe(save3.id);
+ });
+ test("can be be converted back to nodes", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+ const toConvert = [nodes.pos, nodes.neg, nodes.empty, nodes.sampler];
+ const group = await convertToGroup(app, graph, "test", toConvert);
+
+ // Edit some values to ensure they are set back onto the converted nodes
+ expect(group.widgets["text"].value).toBe("positive");
+ group.widgets["text"].value = "pos";
+ expect(group.widgets["CLIPTextEncode text"].value).toBe("negative");
+ group.widgets["CLIPTextEncode text"].value = "neg";
+ expect(group.widgets["width"].value).toBe(512);
+ group.widgets["width"].value = 1024;
+ expect(group.widgets["sampler_name"].value).toBe("euler");
+ group.widgets["sampler_name"].value = "ddim";
+ expect(group.widgets["control_after_generate"].value).toBe("randomize");
+ group.widgets["control_after_generate"].value = "fixed";
+
+ /** @type { Array } */
+ group.menu["Convert to nodes"].call();
+
+ // ensure widget values are set
+ const pos = graph.find(nodes.pos.id);
+ expect(pos.node.type).toBe("CLIPTextEncode");
+ expect(pos.widgets["text"].value).toBe("pos");
+ const neg = graph.find(nodes.neg.id);
+ expect(neg.node.type).toBe("CLIPTextEncode");
+ expect(neg.widgets["text"].value).toBe("neg");
+ const empty = graph.find(nodes.empty.id);
+ expect(empty.node.type).toBe("EmptyLatentImage");
+ expect(empty.widgets["width"].value).toBe(1024);
+ const sampler = graph.find(nodes.sampler.id);
+ expect(sampler.node.type).toBe("KSampler");
+ expect(sampler.widgets["sampler_name"].value).toBe("ddim");
+ expect(sampler.widgets["control_after_generate"].value).toBe("fixed");
+
+ // validate links
+ expect(nodes.ckpt.outputs.CLIP.connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([
+ [pos.id, 0],
+ [neg.id, 0],
+ ]);
+
+ expect(pos.outputs["CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([
+ [nodes.sampler.id, 1],
+ ]);
+
+ expect(neg.outputs["CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([
+ [nodes.sampler.id, 2],
+ ]);
+
+ expect(empty.outputs["LATENT"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([
+ [nodes.sampler.id, 3],
+ ]);
+ });
+ test("it can embed reroutes as inputs", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+
+ // Add and connect a reroute to the clip text encodes
+ const reroute = ez.Reroute();
+ nodes.ckpt.outputs.CLIP.connectTo(reroute.inputs[0]);
+ reroute.outputs[0].connectTo(nodes.pos.inputs[0]);
+ reroute.outputs[0].connectTo(nodes.neg.inputs[0]);
+
+ // Convert to group and ensure we only have 1 input of the correct type
+ const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg, nodes.empty, reroute]);
+ expect(group.inputs).toHaveLength(1);
+ expect(group.inputs[0].input.type).toEqual("CLIP");
+
+ expect((await graph.toPrompt()).output).toEqual(getOutput());
+ });
+ test("it can embed reroutes as outputs", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+
+ // Add a reroute with no output so we output IMAGE even though its used internally
+ const reroute = ez.Reroute();
+ nodes.decode.outputs.IMAGE.connectTo(reroute.inputs[0]);
+
+ // Convert to group and ensure there is an IMAGE output
+ const group = await convertToGroup(app, graph, "test", [nodes.decode, nodes.save, reroute]);
+ expect(group.outputs).toHaveLength(1);
+ expect(group.outputs[0].output.type).toEqual("IMAGE");
+ expect((await graph.toPrompt()).output).toEqual(getOutput([nodes.decode.id, nodes.save.id]));
+ });
+ test("it can embed reroutes as pipes", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+
+ // Use reroutes as a pipe
+ const rerouteModel = ez.Reroute();
+ const rerouteClip = ez.Reroute();
+ const rerouteVae = ez.Reroute();
+ nodes.ckpt.outputs.MODEL.connectTo(rerouteModel.inputs[0]);
+ nodes.ckpt.outputs.CLIP.connectTo(rerouteClip.inputs[0]);
+ nodes.ckpt.outputs.VAE.connectTo(rerouteVae.inputs[0]);
+
+ const group = await convertToGroup(app, graph, "test", [rerouteModel, rerouteClip, rerouteVae]);
+
+ expect(group.outputs).toHaveLength(3);
+ expect(group.outputs.map((o) => o.output.type)).toEqual(["MODEL", "CLIP", "VAE"]);
+
+ expect(group.outputs).toHaveLength(3);
+ expect(group.outputs.map((o) => o.output.type)).toEqual(["MODEL", "CLIP", "VAE"]);
+
+ group.outputs[0].connectTo(nodes.sampler.inputs.model);
+ group.outputs[1].connectTo(nodes.pos.inputs.clip);
+ group.outputs[1].connectTo(nodes.neg.inputs.clip);
+ });
+ test("can handle reroutes used internally", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+
+ let reroutes = [];
+ let prevNode = nodes.ckpt;
+ for(let i = 0; i < 5; i++) {
+ const reroute = ez.Reroute();
+ prevNode.outputs[0].connectTo(reroute.inputs[0]);
+ prevNode = reroute;
+ reroutes.push(reroute);
+ }
+ prevNode.outputs[0].connectTo(nodes.sampler.inputs.model);
+
+ const group = await convertToGroup(app, graph, "test", [...reroutes, ...Object.values(nodes)]);
+ expect((await graph.toPrompt()).output).toEqual(getOutput());
+
+ group.menu["Convert to nodes"].call();
+ expect((await graph.toPrompt()).output).toEqual(getOutput());
+ });
+ test("creates with widget values from inner nodes", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+
+ nodes.ckpt.widgets.ckpt_name.value = "model2.ckpt";
+ nodes.pos.widgets.text.value = "hello";
+ nodes.neg.widgets.text.value = "world";
+ nodes.empty.widgets.width.value = 256;
+ nodes.empty.widgets.height.value = 1024;
+ nodes.sampler.widgets.seed.value = 1;
+ nodes.sampler.widgets.control_after_generate.value = "increment";
+ nodes.sampler.widgets.steps.value = 8;
+ nodes.sampler.widgets.cfg.value = 4.5;
+ nodes.sampler.widgets.sampler_name.value = "uni_pc";
+ nodes.sampler.widgets.scheduler.value = "karras";
+ nodes.sampler.widgets.denoise.value = 0.9;
+
+ const group = await convertToGroup(app, graph, "test", [
+ nodes.ckpt,
+ nodes.pos,
+ nodes.neg,
+ nodes.empty,
+ nodes.sampler,
+ ]);
+
+ expect(group.widgets["ckpt_name"].value).toEqual("model2.ckpt");
+ expect(group.widgets["text"].value).toEqual("hello");
+ expect(group.widgets["CLIPTextEncode text"].value).toEqual("world");
+ expect(group.widgets["width"].value).toEqual(256);
+ expect(group.widgets["height"].value).toEqual(1024);
+ expect(group.widgets["seed"].value).toEqual(1);
+ expect(group.widgets["control_after_generate"].value).toEqual("increment");
+ expect(group.widgets["steps"].value).toEqual(8);
+ expect(group.widgets["cfg"].value).toEqual(4.5);
+ expect(group.widgets["sampler_name"].value).toEqual("uni_pc");
+ expect(group.widgets["scheduler"].value).toEqual("karras");
+ expect(group.widgets["denoise"].value).toEqual(0.9);
+
+ expect((await graph.toPrompt()).output).toEqual(
+ getOutput([nodes.ckpt.id, nodes.pos.id, nodes.neg.id, nodes.empty.id, nodes.sampler.id], {
+ [nodes.ckpt.id]: { ckpt_name: "model2.ckpt" },
+ [nodes.pos.id]: { text: "hello" },
+ [nodes.neg.id]: { text: "world" },
+ [nodes.empty.id]: { width: 256, height: 1024 },
+ [nodes.sampler.id]: {
+ seed: 1,
+ steps: 8,
+ cfg: 4.5,
+ sampler_name: "uni_pc",
+ scheduler: "karras",
+ denoise: 0.9,
+ },
+ })
+ );
+ });
+ test("group inputs can be reroutes", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+ const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]);
+
+ const reroute = ez.Reroute();
+ nodes.ckpt.outputs.CLIP.connectTo(reroute.inputs[0]);
+
+ reroute.outputs[0].connectTo(group.inputs[0]);
+ reroute.outputs[0].connectTo(group.inputs[1]);
+
+ expect((await graph.toPrompt()).output).toEqual(getOutput([nodes.pos.id, nodes.neg.id]));
+ });
+ test("group outputs can be reroutes", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+ const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]);
+
+ const reroute1 = ez.Reroute();
+ const reroute2 = ez.Reroute();
+ group.outputs[0].connectTo(reroute1.inputs[0]);
+ group.outputs[1].connectTo(reroute2.inputs[0]);
+
+ reroute1.outputs[0].connectTo(nodes.sampler.inputs.positive);
+ reroute2.outputs[0].connectTo(nodes.sampler.inputs.negative);
+
+ expect((await graph.toPrompt()).output).toEqual(getOutput([nodes.pos.id, nodes.neg.id]));
+ });
+ test("groups can connect to each other", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+ const group1 = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]);
+ const group2 = await convertToGroup(app, graph, "test2", [nodes.empty, nodes.sampler]);
+
+ group1.outputs[0].connectTo(group2.inputs["positive"]);
+ group1.outputs[1].connectTo(group2.inputs["negative"]);
+
+ expect((await graph.toPrompt()).output).toEqual(
+ getOutput([nodes.pos.id, nodes.neg.id, nodes.empty.id, nodes.sampler.id])
+ );
+ });
+ test("displays generated image on group node", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+ let group = await convertToGroup(app, graph, "test", [
+ nodes.pos,
+ nodes.neg,
+ nodes.empty,
+ nodes.sampler,
+ nodes.decode,
+ nodes.save,
+ ]);
+
+ const { api } = require("../../web/scripts/api");
+
+ api.dispatchEvent(new CustomEvent("execution_start", {}));
+ api.dispatchEvent(new CustomEvent("executing", { detail: `${nodes.save.id}` }));
+ // Event should be forwarded to group node id
+ expect(+app.runningNodeId).toEqual(group.id);
+ expect(group.node["imgs"]).toBeFalsy();
+ api.dispatchEvent(
+ new CustomEvent("executed", {
+ detail: {
+ node: `${nodes.save.id}`,
+ output: {
+ images: [
+ {
+ filename: "test.png",
+ type: "output",
+ },
+ ],
+ },
+ },
+ })
+ );
+
+ // Trigger paint
+ group.node.onDrawBackground?.(app.canvas.ctx, app.canvas.canvas);
+
+ expect(group.node["images"]).toEqual([
+ {
+ filename: "test.png",
+ type: "output",
+ },
+ ]);
+
+ // Reload
+ const workflow = JSON.stringify((await graph.toPrompt()).workflow);
+ await app.loadGraphData(JSON.parse(workflow));
+ group = graph.find(group);
+
+ // Trigger inner nodes to get created
+ group.node["getInnerNodes"]();
+
+ // Check it works for internal node ids
+ api.dispatchEvent(new CustomEvent("execution_start", {}));
+ api.dispatchEvent(new CustomEvent("executing", { detail: `${group.id}:5` }));
+ // Event should be forwarded to group node id
+ expect(+app.runningNodeId).toEqual(group.id);
+ expect(group.node["imgs"]).toBeFalsy();
+ api.dispatchEvent(
+ new CustomEvent("executed", {
+ detail: {
+ node: `${group.id}:5`,
+ output: {
+ images: [
+ {
+ filename: "test2.png",
+ type: "output",
+ },
+ ],
+ },
+ },
+ })
+ );
+
+ // Trigger paint
+ group.node.onDrawBackground?.(app.canvas.ctx, app.canvas.canvas);
+
+ expect(group.node["images"]).toEqual([
+ {
+ filename: "test2.png",
+ type: "output",
+ },
+ ]);
+ });
+ test("allows widgets to be converted to inputs", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+ const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]);
+ group.widgets[0].convertToInput();
+
+ const primitive = ez.PrimitiveNode();
+ primitive.outputs[0].connectTo(group.inputs["text"]);
+ primitive.widgets[0].value = "hello";
+
+ expect((await graph.toPrompt()).output).toEqual(
+ getOutput([nodes.pos.id, nodes.neg.id], {
+ [nodes.pos.id]: { text: "hello" },
+ })
+ );
+ });
+ test("can be copied", async () => {
+ const { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+
+ const group1 = await convertToGroup(app, graph, "test", [
+ nodes.pos,
+ nodes.neg,
+ nodes.empty,
+ nodes.sampler,
+ nodes.decode,
+ nodes.save,
+ ]);
+
+ group1.widgets["text"].value = "hello";
+ group1.widgets["width"].value = 256;
+ group1.widgets["seed"].value = 1;
+
+ // Clone the node
+ group1.menu.Clone.call();
+ expect(app.graph._nodes).toHaveLength(3);
+ const group2 = graph.find(app.graph._nodes[2]);
+ expect(group2.node.type).toEqual("workflow/test");
+ expect(group2.id).not.toEqual(group1.id);
+
+ // Reconnect ckpt
+ nodes.ckpt.outputs.MODEL.connectTo(group2.inputs["model"]);
+ nodes.ckpt.outputs.CLIP.connectTo(group2.inputs["clip"]);
+ nodes.ckpt.outputs.CLIP.connectTo(group2.inputs["CLIPTextEncode clip"]);
+ nodes.ckpt.outputs.VAE.connectTo(group2.inputs["vae"]);
+
+ group2.widgets["text"].value = "world";
+ group2.widgets["width"].value = 1024;
+ group2.widgets["seed"].value = 100;
+
+ let i = 0;
+ expect((await graph.toPrompt()).output).toEqual({
+ ...getOutput([nodes.empty.id, nodes.pos.id, nodes.neg.id, nodes.sampler.id, nodes.decode.id, nodes.save.id], {
+ [nodes.empty.id]: { width: 256 },
+ [nodes.pos.id]: { text: "hello" },
+ [nodes.sampler.id]: { seed: 1 },
+ }),
+ ...getOutput(
+ {
+ [nodes.empty.id]: `${group2.id}:${i++}`,
+ [nodes.pos.id]: `${group2.id}:${i++}`,
+ [nodes.neg.id]: `${group2.id}:${i++}`,
+ [nodes.sampler.id]: `${group2.id}:${i++}`,
+ [nodes.decode.id]: `${group2.id}:${i++}`,
+ [nodes.save.id]: `${group2.id}:${i++}`,
+ },
+ {
+ [nodes.empty.id]: { width: 1024 },
+ [nodes.pos.id]: { text: "world" },
+ [nodes.sampler.id]: { seed: 100 },
+ }
+ ),
+ });
+
+ graph.arrange();
+ });
+ test("is embedded in workflow", async () => {
+ let { ez, graph, app } = await start();
+ const nodes = createDefaultWorkflow(ez, graph);
+ let group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]);
+ const workflow = JSON.stringify((await graph.toPrompt()).workflow);
+
+ // Clear the environment
+ ({ ez, graph, app } = await start({
+ resetEnv: true,
+ }));
+ // Ensure the node isnt registered
+ expect(() => ez["workflow/test"]).toThrow();
+
+ // Reload the workflow
+ await app.loadGraphData(JSON.parse(workflow));
+
+ // Ensure the node is found
+ group = graph.find(group);
+
+ // Generate prompt and ensure it is as expected
+ expect((await graph.toPrompt()).output).toEqual(
+ getOutput({
+ [nodes.pos.id]: `${group.id}:0`,
+ [nodes.neg.id]: `${group.id}:1`,
+ })
+ );
+ });
+ test("shows missing node error on missing internal node when loading graph data", async () => {
+ const { graph } = await start();
+
+ const dialogShow = jest.spyOn(graph.app.ui.dialog, "show");
+ await graph.app.loadGraphData({
+ last_node_id: 3,
+ last_link_id: 1,
+ nodes: [
+ {
+ id: 3,
+ type: "workflow/testerror",
+ },
+ ],
+ links: [],
+ groups: [],
+ config: {},
+ extra: {
+ groupNodes: {
+ testerror: {
+ nodes: [
+ {
+ type: "NotKSampler",
+ },
+ {
+ type: "NotVAEDecode",
+ },
+ ],
+ },
+ },
+ },
+ });
+
+ expect(dialogShow).toBeCalledTimes(1);
+ const call = dialogShow.mock.calls[0][0].innerHTML;
+ expect(call).toContain("the following node types were not found");
+ expect(call).toContain("NotKSampler");
+ expect(call).toContain("NotVAEDecode");
+ expect(call).toContain("workflow/testerror");
+ });
+ test("maintains widget inputs on conversion back to nodes", async () => {
+ const { ez, graph, app } = await start();
+ let pos = ez.CLIPTextEncode({ text: "positive" });
+ pos.node.title = "Positive";
+ let neg = ez.CLIPTextEncode({ text: "negative" });
+ neg.node.title = "Negative";
+ pos.widgets.text.convertToInput();
+ neg.widgets.text.convertToInput();
+
+ let primitive = ez.PrimitiveNode();
+ primitive.outputs[0].connectTo(pos.inputs.text);
+ primitive.outputs[0].connectTo(neg.inputs.text);
+
+ const group = await convertToGroup(app, graph, "test", [pos, neg, primitive]);
+ // This will use a primitive widget named 'value'
+ expect(group.widgets.length).toBe(1);
+ expect(group.widgets["value"].value).toBe("positive");
+
+ const newNodes = group.menu["Convert to nodes"].call();
+ pos = graph.find(newNodes.find((n) => n.title === "Positive"));
+ neg = graph.find(newNodes.find((n) => n.title === "Negative"));
+ primitive = graph.find(newNodes.find((n) => n.type === "PrimitiveNode"));
+
+ expect(pos.inputs).toHaveLength(2);
+ expect(neg.inputs).toHaveLength(2);
+ expect(primitive.outputs[0].connections).toHaveLength(2);
+
+ expect((await graph.toPrompt()).output).toEqual({
+ 1: { inputs: { text: "positive" }, class_type: "CLIPTextEncode" },
+ 2: { inputs: { text: "positive" }, class_type: "CLIPTextEncode" },
+ });
+ });
+ test("adds widgets in node execution order", async () => {
+ const { ez, graph, app } = await start();
+ const scale = ez.LatentUpscale();
+ const save = ez.SaveImage();
+ const empty = ez.EmptyLatentImage();
+ const decode = ez.VAEDecode();
+
+ scale.outputs.LATENT.connectTo(decode.inputs.samples);
+ decode.outputs.IMAGE.connectTo(save.inputs.images);
+ empty.outputs.LATENT.connectTo(scale.inputs.samples);
+
+ const group = await convertToGroup(app, graph, "test", [scale, save, empty, decode]);
+ const widgets = group.widgets.map((w) => w.widget.name);
+ expect(widgets).toStrictEqual([
+ "width",
+ "height",
+ "batch_size",
+ "upscale_method",
+ "LatentUpscale width",
+ "LatentUpscale height",
+ "crop",
+ "filename_prefix",
+ ]);
+ });
+ test("adds output for external links when converting to group", async () => {
+ const { ez, graph, app } = await start();
+ const img = ez.EmptyLatentImage();
+ let decode = ez.VAEDecode(...img.outputs);
+ const preview1 = ez.PreviewImage(...decode.outputs);
+ const preview2 = ez.PreviewImage(...decode.outputs);
+
+ const group = await convertToGroup(app, graph, "test", [img, decode, preview1]);
+
+ // Ensure we have an output connected to the 2nd preview node
+ expect(group.outputs.length).toBe(1);
+ expect(group.outputs[0].connections.length).toBe(1);
+ expect(group.outputs[0].connections[0].targetNode.id).toBe(preview2.id);
+
+ // Convert back and ensure bothe previews are still connected
+ group.menu["Convert to nodes"].call();
+ decode = graph.find(decode);
+ expect(decode.outputs[0].connections.length).toBe(2);
+ expect(decode.outputs[0].connections[0].targetNode.id).toBe(preview1.id);
+ expect(decode.outputs[0].connections[1].targetNode.id).toBe(preview2.id);
+ });
+ test("adds output for external links when converting to group when nodes are not in execution order", async () => {
+ const { ez, graph, app } = await start();
+ const sampler = ez.KSampler();
+ const ckpt = ez.CheckpointLoaderSimple();
+ const empty = ez.EmptyLatentImage();
+ const pos = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "positive" });
+ const neg = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "negative" });
+ const decode1 = ez.VAEDecode(sampler.outputs.LATENT, ckpt.outputs.VAE);
+ const save = ez.SaveImage(decode1.outputs.IMAGE);
+ ckpt.outputs.MODEL.connectTo(sampler.inputs.model);
+ pos.outputs.CONDITIONING.connectTo(sampler.inputs.positive);
+ neg.outputs.CONDITIONING.connectTo(sampler.inputs.negative);
+ empty.outputs.LATENT.connectTo(sampler.inputs.latent_image);
+
+ const encode = ez.VAEEncode(decode1.outputs.IMAGE);
+ const vae = ez.VAELoader();
+ const decode2 = ez.VAEDecode(encode.outputs.LATENT, vae.outputs.VAE);
+ const preview = ez.PreviewImage(decode2.outputs.IMAGE);
+ vae.outputs.VAE.connectTo(encode.inputs.vae);
+
+ const group = await convertToGroup(app, graph, "test", [vae, decode1, encode, sampler]);
+
+ expect(group.outputs.length).toBe(3);
+ expect(group.outputs[0].output.name).toBe("VAE");
+ expect(group.outputs[0].output.type).toBe("VAE");
+ expect(group.outputs[1].output.name).toBe("IMAGE");
+ expect(group.outputs[1].output.type).toBe("IMAGE");
+ expect(group.outputs[2].output.name).toBe("LATENT");
+ expect(group.outputs[2].output.type).toBe("LATENT");
+
+ expect(group.outputs[0].connections.length).toBe(1);
+ expect(group.outputs[0].connections[0].targetNode.id).toBe(decode2.id);
+ expect(group.outputs[0].connections[0].targetInput.index).toBe(1);
+
+ expect(group.outputs[1].connections.length).toBe(1);
+ expect(group.outputs[1].connections[0].targetNode.id).toBe(save.id);
+ expect(group.outputs[1].connections[0].targetInput.index).toBe(0);
+
+ expect(group.outputs[2].connections.length).toBe(1);
+ expect(group.outputs[2].connections[0].targetNode.id).toBe(decode2.id);
+ expect(group.outputs[2].connections[0].targetInput.index).toBe(0);
+
+ expect((await graph.toPrompt()).output).toEqual({
+ ...getOutput({ 1: ckpt.id, 2: pos.id, 3: neg.id, 4: empty.id, 5: sampler.id, 6: decode1.id, 7: save.id }),
+ [vae.id]: { inputs: { vae_name: "vae1.safetensors" }, class_type: vae.node.type },
+ [encode.id]: { inputs: { pixels: ["6", 0], vae: [vae.id + "", 0] }, class_type: encode.node.type },
+ [decode2.id]: { inputs: { samples: [encode.id + "", 0], vae: [vae.id + "", 0] }, class_type: decode2.node.type },
+ [preview.id]: { inputs: { images: [decode2.id + "", 0] }, class_type: preview.node.type },
+ });
+ });
+ test("works with IMAGEUPLOAD widget", async () => {
+ const { ez, graph, app } = await start();
+ const img = ez.LoadImage();
+ const preview1 = ez.PreviewImage(img.outputs[0]);
+
+ const group = await convertToGroup(app, graph, "test", [img, preview1]);
+ const widget = group.widgets["upload"];
+ expect(widget).toBeTruthy();
+ expect(widget.widget.type).toBe("button");
+ });
+ test("internal primitive populates widgets for all linked inputs", async () => {
+ const { ez, graph, app } = await start();
+ const img = ez.LoadImage();
+ const scale1 = ez.ImageScale(img.outputs[0]);
+ const scale2 = ez.ImageScale(img.outputs[0]);
+ ez.PreviewImage(scale1.outputs[0]);
+ ez.PreviewImage(scale2.outputs[0]);
+
+ scale1.widgets.width.convertToInput();
+ scale2.widgets.height.convertToInput();
+
+ const primitive = ez.PrimitiveNode();
+ primitive.outputs[0].connectTo(scale1.inputs.width);
+ primitive.outputs[0].connectTo(scale2.inputs.height);
+
+ const group = await convertToGroup(app, graph, "test", [img, primitive, scale1, scale2]);
+ group.widgets.value.value = 100;
+ expect((await graph.toPrompt()).output).toEqual({
+ 1: {
+ inputs: { image: img.widgets.image.value, upload: "image" },
+ class_type: "LoadImage",
+ },
+ 2: {
+ inputs: { upscale_method: "nearest-exact", width: 100, height: 512, crop: "disabled", image: ["1", 0] },
+ class_type: "ImageScale",
+ },
+ 3: {
+ inputs: { upscale_method: "nearest-exact", width: 512, height: 100, crop: "disabled", image: ["1", 0] },
+ class_type: "ImageScale",
+ },
+ 4: { inputs: { images: ["2", 0] }, class_type: "PreviewImage" },
+ 5: { inputs: { images: ["3", 0] }, class_type: "PreviewImage" },
+ });
+ });
+ test("primitive control widgets values are copied on convert", async () => {
+ const { ez, graph, app } = await start();
+ const sampler = ez.KSampler();
+ sampler.widgets.seed.convertToInput();
+ sampler.widgets.sampler_name.convertToInput();
+
+ let p1 = ez.PrimitiveNode();
+ let p2 = ez.PrimitiveNode();
+ p1.outputs[0].connectTo(sampler.inputs.seed);
+ p2.outputs[0].connectTo(sampler.inputs.sampler_name);
+
+ p1.widgets.control_after_generate.value = "increment";
+ p2.widgets.control_after_generate.value = "decrement";
+ p2.widgets.control_filter_list.value = "/.*/";
+
+ p2.node.title = "p2";
+
+ const group = await convertToGroup(app, graph, "test", [sampler, p1, p2]);
+ expect(group.widgets.control_after_generate.value).toBe("increment");
+ expect(group.widgets["p2 control_after_generate"].value).toBe("decrement");
+ expect(group.widgets["p2 control_filter_list"].value).toBe("/.*/");
+
+ group.widgets.control_after_generate.value = "fixed";
+ group.widgets["p2 control_after_generate"].value = "randomize";
+ group.widgets["p2 control_filter_list"].value = "/.+/";
+
+ group.menu["Convert to nodes"].call();
+ p1 = graph.find(p1);
+ p2 = graph.find(p2);
+
+ expect(p1.widgets.control_after_generate.value).toBe("fixed");
+ expect(p2.widgets.control_after_generate.value).toBe("randomize");
+ expect(p2.widgets.control_filter_list.value).toBe("/.+/");
+ });
+});
diff --git a/tests-ui/tests/widgetInputs.test.js b/tests-ui/tests/widgetInputs.test.js
new file mode 100644
index 00000000000..8e191adf043
--- /dev/null
+++ b/tests-ui/tests/widgetInputs.test.js
@@ -0,0 +1,395 @@
+// @ts-check
+///
+
+const { start, makeNodeDef, checkBeforeAndAfterReload, assertNotNullOrUndefined } = require("../utils");
+const lg = require("../utils/litegraph");
+
+/**
+ * @typedef { import("../utils/ezgraph") } Ez
+ * @typedef { ReturnType["ez"] } EzNodeFactory
+ */
+
+/**
+ * @param { EzNodeFactory } ez
+ * @param { InstanceType } graph
+ * @param { InstanceType } input
+ * @param { string } widgetType
+ * @param { number } controlWidgetCount
+ * @returns
+ */
+async function connectPrimitiveAndReload(ez, graph, input, widgetType, controlWidgetCount = 0) {
+ // Connect to primitive and ensure its still connected after
+ let primitive = ez.PrimitiveNode();
+ primitive.outputs[0].connectTo(input);
+
+ await checkBeforeAndAfterReload(graph, async () => {
+ primitive = graph.find(primitive);
+ let { connections } = primitive.outputs[0];
+ expect(connections).toHaveLength(1);
+ expect(connections[0].targetNode.id).toBe(input.node.node.id);
+
+ // Ensure widget is correct type
+ const valueWidget = primitive.widgets.value;
+ expect(valueWidget.widget.type).toBe(widgetType);
+
+ // Check if control_after_generate should be added
+ if (controlWidgetCount) {
+ const controlWidget = primitive.widgets.control_after_generate;
+ expect(controlWidget.widget.type).toBe("combo");
+ if(widgetType === "combo") {
+ const filterWidget = primitive.widgets.control_filter_list;
+ expect(filterWidget.widget.type).toBe("string");
+ }
+ }
+
+ // Ensure we dont have other widgets
+ expect(primitive.node.widgets).toHaveLength(1 + controlWidgetCount);
+ });
+
+ return primitive;
+}
+
+describe("widget inputs", () => {
+ beforeEach(() => {
+ lg.setup(global);
+ });
+
+ afterEach(() => {
+ lg.teardown(global);
+ });
+
+ [
+ { name: "int", type: "INT", widget: "number", control: 1 },
+ { name: "float", type: "FLOAT", widget: "number", control: 1 },
+ { name: "text", type: "STRING" },
+ {
+ name: "customtext",
+ type: "STRING",
+ opt: { multiline: true },
+ },
+ { name: "toggle", type: "BOOLEAN" },
+ { name: "combo", type: ["a", "b", "c"], control: 2 },
+ ].forEach((c) => {
+ test(`widget conversion + primitive works on ${c.name}`, async () => {
+ const { ez, graph } = await start({
+ mockNodeDefs: makeNodeDef("TestNode", { [c.name]: [c.type, c.opt ?? {}] }),
+ });
+
+ // Create test node and convert to input
+ const n = ez.TestNode();
+ const w = n.widgets[c.name];
+ w.convertToInput();
+ expect(w.isConvertedToInput).toBeTruthy();
+ const input = w.getConvertedInput();
+ expect(input).toBeTruthy();
+
+ // @ts-ignore : input is valid here
+ await connectPrimitiveAndReload(ez, graph, input, c.widget ?? c.name, c.control);
+ });
+ });
+
+ test("converted widget works after reload", async () => {
+ const { ez, graph } = await start();
+ let n = ez.CheckpointLoaderSimple();
+
+ const inputCount = n.inputs.length;
+
+ // Convert ckpt name to an input
+ n.widgets.ckpt_name.convertToInput();
+ expect(n.widgets.ckpt_name.isConvertedToInput).toBeTruthy();
+ expect(n.inputs.ckpt_name).toBeTruthy();
+ expect(n.inputs.length).toEqual(inputCount + 1);
+
+ // Convert back to widget and ensure input is removed
+ n.widgets.ckpt_name.convertToWidget();
+ expect(n.widgets.ckpt_name.isConvertedToInput).toBeFalsy();
+ expect(n.inputs.ckpt_name).toBeFalsy();
+ expect(n.inputs.length).toEqual(inputCount);
+
+ // Convert again and reload the graph to ensure it maintains state
+ n.widgets.ckpt_name.convertToInput();
+ expect(n.inputs.length).toEqual(inputCount + 1);
+
+ const primitive = await connectPrimitiveAndReload(ez, graph, n.inputs.ckpt_name, "combo", 2);
+
+ // Disconnect & reconnect
+ primitive.outputs[0].connections[0].disconnect();
+ let { connections } = primitive.outputs[0];
+ expect(connections).toHaveLength(0);
+
+ primitive.outputs[0].connectTo(n.inputs.ckpt_name);
+ ({ connections } = primitive.outputs[0]);
+ expect(connections).toHaveLength(1);
+ expect(connections[0].targetNode.id).toBe(n.node.id);
+
+ // Convert back to widget and ensure input is removed
+ n.widgets.ckpt_name.convertToWidget();
+ expect(n.widgets.ckpt_name.isConvertedToInput).toBeFalsy();
+ expect(n.inputs.ckpt_name).toBeFalsy();
+ expect(n.inputs.length).toEqual(inputCount);
+ });
+
+ test("converted widget works on clone", async () => {
+ const { graph, ez } = await start();
+ let n = ez.CheckpointLoaderSimple();
+
+ // Convert the widget to an input
+ n.widgets.ckpt_name.convertToInput();
+ expect(n.widgets.ckpt_name.isConvertedToInput).toBeTruthy();
+
+ // Clone the node
+ n.menu["Clone"].call();
+ expect(graph.nodes).toHaveLength(2);
+ const clone = graph.nodes[1];
+ expect(clone.id).not.toEqual(n.id);
+
+ // Ensure the clone has an input
+ expect(clone.widgets.ckpt_name.isConvertedToInput).toBeTruthy();
+ expect(clone.inputs.ckpt_name).toBeTruthy();
+
+ // Ensure primitive connects to both nodes
+ let primitive = ez.PrimitiveNode();
+ primitive.outputs[0].connectTo(n.inputs.ckpt_name);
+ primitive.outputs[0].connectTo(clone.inputs.ckpt_name);
+ expect(primitive.outputs[0].connections).toHaveLength(2);
+
+ // Convert back to widget and ensure input is removed
+ clone.widgets.ckpt_name.convertToWidget();
+ expect(clone.widgets.ckpt_name.isConvertedToInput).toBeFalsy();
+ expect(clone.inputs.ckpt_name).toBeFalsy();
+ });
+
+ test("shows missing node error on custom node with converted input", async () => {
+ const { graph } = await start();
+
+ const dialogShow = jest.spyOn(graph.app.ui.dialog, "show");
+
+ await graph.app.loadGraphData({
+ last_node_id: 3,
+ last_link_id: 4,
+ nodes: [
+ {
+ id: 1,
+ type: "TestNode",
+ pos: [41.87329101561909, 389.7381480823742],
+ size: { 0: 220, 1: 374 },
+ flags: {},
+ order: 1,
+ mode: 0,
+ inputs: [{ name: "test", type: "FLOAT", link: 4, widget: { name: "test" }, slot_index: 0 }],
+ outputs: [],
+ properties: { "Node name for S&R": "TestNode" },
+ widgets_values: [1],
+ },
+ {
+ id: 3,
+ type: "PrimitiveNode",
+ pos: [-312, 433],
+ size: { 0: 210, 1: 82 },
+ flags: {},
+ order: 0,
+ mode: 0,
+ outputs: [{ links: [4], widget: { name: "test" } }],
+ title: "test",
+ properties: {},
+ },
+ ],
+ links: [[4, 3, 0, 1, 6, "FLOAT"]],
+ groups: [],
+ config: {},
+ extra: {},
+ version: 0.4,
+ });
+
+ expect(dialogShow).toBeCalledTimes(1);
+ expect(dialogShow.mock.calls[0][0].innerHTML).toContain("the following node types were not found");
+ expect(dialogShow.mock.calls[0][0].innerHTML).toContain("TestNode");
+ });
+
+ test("defaultInput widgets can be converted back to inputs", async () => {
+ const { graph, ez } = await start({
+ mockNodeDefs: makeNodeDef("TestNode", { example: ["INT", { defaultInput: true }] }),
+ });
+
+ // Create test node and ensure it starts as an input
+ let n = ez.TestNode();
+ let w = n.widgets.example;
+ expect(w.isConvertedToInput).toBeTruthy();
+ let input = w.getConvertedInput();
+ expect(input).toBeTruthy();
+
+ // Ensure it can be converted to
+ w.convertToWidget();
+ expect(w.isConvertedToInput).toBeFalsy();
+ expect(n.inputs.length).toEqual(0);
+ // and from
+ w.convertToInput();
+ expect(w.isConvertedToInput).toBeTruthy();
+ input = w.getConvertedInput();
+
+ // Reload and ensure it still only has 1 converted widget
+ if (!assertNotNullOrUndefined(input)) return;
+
+ await connectPrimitiveAndReload(ez, graph, input, "number", 1);
+ n = graph.find(n);
+ expect(n.widgets).toHaveLength(1);
+ w = n.widgets.example;
+ expect(w.isConvertedToInput).toBeTruthy();
+
+ // Convert back to widget and ensure it is still a widget after reload
+ w.convertToWidget();
+ await graph.reload();
+ n = graph.find(n);
+ expect(n.widgets).toHaveLength(1);
+ expect(n.widgets[0].isConvertedToInput).toBeFalsy();
+ expect(n.inputs.length).toEqual(0);
+ });
+
+ test("forceInput widgets can not be converted back to inputs", async () => {
+ const { graph, ez } = await start({
+ mockNodeDefs: makeNodeDef("TestNode", { example: ["INT", { forceInput: true }] }),
+ });
+
+ // Create test node and ensure it starts as an input
+ let n = ez.TestNode();
+ let w = n.widgets.example;
+ expect(w.isConvertedToInput).toBeTruthy();
+ const input = w.getConvertedInput();
+ expect(input).toBeTruthy();
+
+ // Convert to widget should error
+ expect(() => w.convertToWidget()).toThrow();
+
+ // Reload and ensure it still only has 1 converted widget
+ if (assertNotNullOrUndefined(input)) {
+ await connectPrimitiveAndReload(ez, graph, input, "number", 1);
+ n = graph.find(n);
+ expect(n.widgets).toHaveLength(1);
+ expect(n.widgets.example.isConvertedToInput).toBeTruthy();
+ }
+ });
+
+ test("primitive can connect to matching combos on converted widgets", async () => {
+ const { ez } = await start({
+ mockNodeDefs: {
+ ...makeNodeDef("TestNode1", { example: [["A", "B", "C"], { forceInput: true }] }),
+ ...makeNodeDef("TestNode2", { example: [["A", "B", "C"], { forceInput: true }] }),
+ },
+ });
+
+ const n1 = ez.TestNode1();
+ const n2 = ez.TestNode2();
+ const p = ez.PrimitiveNode();
+ p.outputs[0].connectTo(n1.inputs[0]);
+ p.outputs[0].connectTo(n2.inputs[0]);
+ expect(p.outputs[0].connections).toHaveLength(2);
+ const valueWidget = p.widgets.value;
+ expect(valueWidget.widget.type).toBe("combo");
+ expect(valueWidget.widget.options.values).toEqual(["A", "B", "C"]);
+ });
+
+ test("primitive can not connect to non matching combos on converted widgets", async () => {
+ const { ez } = await start({
+ mockNodeDefs: {
+ ...makeNodeDef("TestNode1", { example: [["A", "B", "C"], { forceInput: true }] }),
+ ...makeNodeDef("TestNode2", { example: [["A", "B"], { forceInput: true }] }),
+ },
+ });
+
+ const n1 = ez.TestNode1();
+ const n2 = ez.TestNode2();
+ const p = ez.PrimitiveNode();
+ p.outputs[0].connectTo(n1.inputs[0]);
+ expect(() => p.outputs[0].connectTo(n2.inputs[0])).toThrow();
+ expect(p.outputs[0].connections).toHaveLength(1);
+ });
+
+ test("combo output can not connect to non matching combos list input", async () => {
+ const { ez } = await start({
+ mockNodeDefs: {
+ ...makeNodeDef("TestNode1", {}, [["A", "B"]]),
+ ...makeNodeDef("TestNode2", { example: [["A", "B"], { forceInput: true}] }),
+ ...makeNodeDef("TestNode3", { example: [["A", "B", "C"], { forceInput: true}] }),
+ },
+ });
+
+ const n1 = ez.TestNode1();
+ const n2 = ez.TestNode2();
+ const n3 = ez.TestNode3();
+
+ n1.outputs[0].connectTo(n2.inputs[0]);
+ expect(() => n1.outputs[0].connectTo(n3.inputs[0])).toThrow();
+ });
+
+ test("combo primitive can filter list when control_after_generate called", async () => {
+ const { ez } = await start({
+ mockNodeDefs: {
+ ...makeNodeDef("TestNode1", { example: [["A", "B", "C", "D", "AA", "BB", "CC", "DD", "AAA", "BBB"], {}] }),
+ },
+ });
+
+ const n1 = ez.TestNode1();
+ n1.widgets.example.convertToInput();
+ const p = ez.PrimitiveNode()
+ p.outputs[0].connectTo(n1.inputs[0]);
+
+ const value = p.widgets.value;
+ const control = p.widgets.control_after_generate.widget;
+ const filter = p.widgets.control_filter_list;
+
+ expect(p.widgets.length).toBe(3);
+ control.value = "increment";
+ expect(value.value).toBe("A");
+
+ // Manually trigger after queue when set to increment
+ control["afterQueued"]();
+ expect(value.value).toBe("B");
+
+ // Filter to items containing D
+ filter.value = "D";
+ control["afterQueued"]();
+ expect(value.value).toBe("D");
+ control["afterQueued"]();
+ expect(value.value).toBe("DD");
+
+ // Check decrement
+ value.value = "BBB";
+ control.value = "decrement";
+ filter.value = "B";
+ control["afterQueued"]();
+ expect(value.value).toBe("BB");
+ control["afterQueued"]();
+ expect(value.value).toBe("B");
+
+ // Check regex works
+ value.value = "BBB";
+ filter.value = "/[AB]|^C$/";
+ control["afterQueued"]();
+ expect(value.value).toBe("AAA");
+ control["afterQueued"]();
+ expect(value.value).toBe("BB");
+ control["afterQueued"]();
+ expect(value.value).toBe("AA");
+ control["afterQueued"]();
+ expect(value.value).toBe("C");
+ control["afterQueued"]();
+ expect(value.value).toBe("B");
+ control["afterQueued"]();
+ expect(value.value).toBe("A");
+
+ // Check random
+ control.value = "randomize";
+ filter.value = "/D/";
+ for(let i = 0; i < 100; i++) {
+ control["afterQueued"]();
+ expect(value.value === "D" || value.value === "DD").toBeTruthy();
+ }
+
+ // Ensure it doesnt apply when fixed
+ control.value = "fixed";
+ value.value = "B";
+ filter.value = "C";
+ control["afterQueued"]();
+ expect(value.value).toBe("B");
+ });
+});
diff --git a/tests-ui/utils/ezgraph.js b/tests-ui/utils/ezgraph.js
new file mode 100644
index 00000000000..898b82db051
--- /dev/null
+++ b/tests-ui/utils/ezgraph.js
@@ -0,0 +1,439 @@
+// @ts-check
+///
+
+/**
+ * @typedef { import("../../web/scripts/app")["app"] } app
+ * @typedef { import("../../web/types/litegraph") } LG
+ * @typedef { import("../../web/types/litegraph").IWidget } IWidget
+ * @typedef { import("../../web/types/litegraph").ContextMenuItem } ContextMenuItem
+ * @typedef { import("../../web/types/litegraph").INodeInputSlot } INodeInputSlot
+ * @typedef { import("../../web/types/litegraph").INodeOutputSlot } INodeOutputSlot
+ * @typedef { InstanceType & { widgets?: Array } } LGNode
+ * @typedef { (...args: EzOutput[] | [...EzOutput[], Record]) => EzNode } EzNodeFactory
+ */
+
+export class EzConnection {
+ /** @type { app } */
+ app;
+ /** @type { InstanceType } */
+ link;
+
+ get originNode() {
+ return new EzNode(this.app, this.app.graph.getNodeById(this.link.origin_id));
+ }
+
+ get originOutput() {
+ return this.originNode.outputs[this.link.origin_slot];
+ }
+
+ get targetNode() {
+ return new EzNode(this.app, this.app.graph.getNodeById(this.link.target_id));
+ }
+
+ get targetInput() {
+ return this.targetNode.inputs[this.link.target_slot];
+ }
+
+ /**
+ * @param { app } app
+ * @param { InstanceType } link
+ */
+ constructor(app, link) {
+ this.app = app;
+ this.link = link;
+ }
+
+ disconnect() {
+ this.targetInput.disconnect();
+ }
+}
+
+export class EzSlot {
+ /** @type { EzNode } */
+ node;
+ /** @type { number } */
+ index;
+
+ /**
+ * @param { EzNode } node
+ * @param { number } index
+ */
+ constructor(node, index) {
+ this.node = node;
+ this.index = index;
+ }
+}
+
+export class EzInput extends EzSlot {
+ /** @type { INodeInputSlot } */
+ input;
+
+ /**
+ * @param { EzNode } node
+ * @param { number } index
+ * @param { INodeInputSlot } input
+ */
+ constructor(node, index, input) {
+ super(node, index);
+ this.input = input;
+ }
+
+ disconnect() {
+ this.node.node.disconnectInput(this.index);
+ }
+}
+
+export class EzOutput extends EzSlot {
+ /** @type { INodeOutputSlot } */
+ output;
+
+ /**
+ * @param { EzNode } node
+ * @param { number } index
+ * @param { INodeOutputSlot } output
+ */
+ constructor(node, index, output) {
+ super(node, index);
+ this.output = output;
+ }
+
+ get connections() {
+ return (this.node.node.outputs?.[this.index]?.links ?? []).map(
+ (l) => new EzConnection(this.node.app, this.node.app.graph.links[l])
+ );
+ }
+
+ /**
+ * @param { EzInput } input
+ */
+ connectTo(input) {
+ if (!input) throw new Error("Invalid input");
+
+ /**
+ * @type { LG["LLink"] | null }
+ */
+ const link = this.node.node.connect(this.index, input.node.node, input.index);
+ if (!link) {
+ const inp = input.input;
+ const inName = inp.name || inp.label || inp.type;
+ throw new Error(
+ `Connecting from ${input.node.node.type}[${inName}#${input.index}] -> ${this.node.node.type}[${
+ this.output.name ?? this.output.type
+ }#${this.index}] failed.`
+ );
+ }
+ return link;
+ }
+}
+
+export class EzNodeMenuItem {
+ /** @type { EzNode } */
+ node;
+ /** @type { number } */
+ index;
+ /** @type { ContextMenuItem } */
+ item;
+
+ /**
+ * @param { EzNode } node
+ * @param { number } index
+ * @param { ContextMenuItem } item
+ */
+ constructor(node, index, item) {
+ this.node = node;
+ this.index = index;
+ this.item = item;
+ }
+
+ call(selectNode = true) {
+ if (!this.item?.callback) throw new Error(`Menu Item ${this.item?.content ?? "[null]"} has no callback.`);
+ if (selectNode) {
+ this.node.select();
+ }
+ return this.item.callback.call(this.node.node, undefined, undefined, undefined, undefined, this.node.node);
+ }
+}
+
+export class EzWidget {
+ /** @type { EzNode } */
+ node;
+ /** @type { number } */
+ index;
+ /** @type { IWidget } */
+ widget;
+
+ /**
+ * @param { EzNode } node
+ * @param { number } index
+ * @param { IWidget } widget
+ */
+ constructor(node, index, widget) {
+ this.node = node;
+ this.index = index;
+ this.widget = widget;
+ }
+
+ get value() {
+ return this.widget.value;
+ }
+
+ set value(v) {
+ this.widget.value = v;
+ }
+
+ get isConvertedToInput() {
+ // @ts-ignore : this type is valid for converted widgets
+ return this.widget.type === "converted-widget";
+ }
+
+ getConvertedInput() {
+ if (!this.isConvertedToInput) throw new Error(`Widget ${this.widget.name} is not converted to input.`);
+
+ return this.node.inputs.find((inp) => inp.input["widget"]?.name === this.widget.name);
+ }
+
+ convertToWidget() {
+ if (!this.isConvertedToInput)
+ throw new Error(`Widget ${this.widget.name} cannot be converted as it is already a widget.`);
+ this.node.menu[`Convert ${this.widget.name} to widget`].call();
+ }
+
+ convertToInput() {
+ if (this.isConvertedToInput)
+ throw new Error(`Widget ${this.widget.name} cannot be converted as it is already an input.`);
+ this.node.menu[`Convert ${this.widget.name} to input`].call();
+ }
+}
+
+export class EzNode {
+ /** @type { app } */
+ app;
+ /** @type { LGNode } */
+ node;
+
+ /**
+ * @param { app } app
+ * @param { LGNode } node
+ */
+ constructor(app, node) {
+ this.app = app;
+ this.node = node;
+ }
+
+ get id() {
+ return this.node.id;
+ }
+
+ get inputs() {
+ return this.#makeLookupArray("inputs", "name", EzInput);
+ }
+
+ get outputs() {
+ return this.#makeLookupArray("outputs", "name", EzOutput);
+ }
+
+ get widgets() {
+ return this.#makeLookupArray("widgets", "name", EzWidget);
+ }
+
+ get menu() {
+ return this.#makeLookupArray(() => this.app.canvas.getNodeMenuOptions(this.node), "content", EzNodeMenuItem);
+ }
+
+ get isRemoved() {
+ return !this.app.graph.getNodeById(this.id);
+ }
+
+ select(addToSelection = false) {
+ this.app.canvas.selectNode(this.node, addToSelection);
+ }
+
+ // /**
+ // * @template { "inputs" | "outputs" } T
+ // * @param { T } type
+ // * @returns { Record & (type extends "inputs" ? EzInput [] : EzOutput[]) }
+ // */
+ // #getSlotItems(type) {
+ // // @ts-ignore : these items are correct
+ // return (this.node[type] ?? []).reduce((p, s, i) => {
+ // if (s.name in p) {
+ // throw new Error(`Unable to store input ${s.name} on array as name conflicts.`);
+ // }
+ // // @ts-ignore
+ // p.push((p[s.name] = new (type === "inputs" ? EzInput : EzOutput)(this, i, s)));
+ // return p;
+ // }, Object.assign([], { $: this }));
+ // }
+
+ /**
+ * @template { { new(node: EzNode, index: number, obj: any): any } } T
+ * @param { "inputs" | "outputs" | "widgets" | (() => Array) } nodeProperty
+ * @param { string } nameProperty
+ * @param { T } ctor
+ * @returns { Record> & Array> }
+ */
+ #makeLookupArray(nodeProperty, nameProperty, ctor) {
+ const items = typeof nodeProperty === "function" ? nodeProperty() : this.node[nodeProperty];
+ // @ts-ignore
+ return (items ?? []).reduce((p, s, i) => {
+ if (!s) return p;
+
+ const name = s[nameProperty];
+ const item = new ctor(this, i, s);
+ // @ts-ignore
+ p.push(item);
+ if (name) {
+ // @ts-ignore
+ if (name in p) {
+ throw new Error(`Unable to store ${nodeProperty} ${name} on array as name conflicts.`);
+ }
+ }
+ // @ts-ignore
+ p[name] = item;
+ return p;
+ }, Object.assign([], { $: this }));
+ }
+}
+
+export class EzGraph {
+ /** @type { app } */
+ app;
+
+ /**
+ * @param { app } app
+ */
+ constructor(app) {
+ this.app = app;
+ }
+
+ get nodes() {
+ return this.app.graph._nodes.map((n) => new EzNode(this.app, n));
+ }
+
+ clear() {
+ this.app.graph.clear();
+ }
+
+ arrange() {
+ this.app.graph.arrange();
+ }
+
+ stringify() {
+ return JSON.stringify(this.app.graph.serialize(), undefined, "\t");
+ }
+
+ /**
+ * @param { number | LGNode | EzNode } obj
+ * @returns { EzNode }
+ */
+ find(obj) {
+ let match;
+ let id;
+ if (typeof obj === "number") {
+ id = obj;
+ } else {
+ id = obj.id;
+ }
+
+ match = this.app.graph.getNodeById(id);
+
+ if (!match) {
+ throw new Error(`Unable to find node with ID ${id}.`);
+ }
+
+ return new EzNode(this.app, match);
+ }
+
+ /**
+ * @returns { Promise }
+ */
+ reload() {
+ const graph = JSON.parse(JSON.stringify(this.app.graph.serialize()));
+ return new Promise((r) => {
+ this.app.graph.clear();
+ setTimeout(async () => {
+ await this.app.loadGraphData(graph);
+ r();
+ }, 10);
+ });
+ }
+
+ /**
+ * @returns { Promise<{
+ * workflow: {},
+ * output: Record
+ * }>}> }
+ */
+ toPrompt() {
+ // @ts-ignore
+ return this.app.graphToPrompt();
+ }
+}
+
+export const Ez = {
+ /**
+ * Quickly build and interact with a ComfyUI graph
+ * @example
+ * const { ez, graph } = Ez.graph(app);
+ * graph.clear();
+ * const [model, clip, vae] = ez.CheckpointLoaderSimple().outputs;
+ * const [pos] = ez.CLIPTextEncode(clip, { text: "positive" }).outputs;
+ * const [neg] = ez.CLIPTextEncode(clip, { text: "negative" }).outputs;
+ * const [latent] = ez.KSampler(model, pos, neg, ...ez.EmptyLatentImage().outputs).outputs;
+ * const [image] = ez.VAEDecode(latent, vae).outputs;
+ * const saveNode = ez.SaveImage(image);
+ * console.log(saveNode);
+ * graph.arrange();
+ * @param { app } app
+ * @param { LG["LiteGraph"] } LiteGraph
+ * @param { LG["LGraphCanvas"] } LGraphCanvas
+ * @param { boolean } clearGraph
+ * @returns { { graph: EzGraph, ez: Record } }
+ */
+ graph(app, LiteGraph = window["LiteGraph"], LGraphCanvas = window["LGraphCanvas"], clearGraph = true) {
+ // Always set the active canvas so things work
+ LGraphCanvas.active_canvas = app.canvas;
+
+ if (clearGraph) {
+ app.graph.clear();
+ }
+
+ // @ts-ignore : this proxy handles utility methods & node creation
+ const factory = new Proxy(
+ {},
+ {
+ get(_, p) {
+ if (typeof p !== "string") throw new Error("Invalid node");
+ const node = LiteGraph.createNode(p);
+ if (!node) throw new Error(`Unknown node "${p}"`);
+ app.graph.add(node);
+
+ /**
+ * @param {Parameters} args
+ */
+ return function (...args) {
+ const ezNode = new EzNode(app, node);
+ const inputs = ezNode.inputs;
+
+ let slot = 0;
+ for (const arg of args) {
+ if (arg instanceof EzOutput) {
+ arg.connectTo(inputs[slot++]);
+ } else {
+ for (const k in arg) {
+ ezNode.widgets[k].value = arg[k];
+ }
+ }
+ }
+
+ return ezNode;
+ };
+ },
+ }
+ );
+
+ return { graph: new EzGraph(app), ez: factory };
+ },
+};
diff --git a/tests-ui/utils/index.js b/tests-ui/utils/index.js
new file mode 100644
index 00000000000..eeccdb3d921
--- /dev/null
+++ b/tests-ui/utils/index.js
@@ -0,0 +1,105 @@
+const { mockApi } = require("./setup");
+const { Ez } = require("./ezgraph");
+const lg = require("./litegraph");
+
+/**
+ *
+ * @param { Parameters[0] & { resetEnv?: boolean } } config
+ * @returns
+ */
+export async function start(config = undefined) {
+ if(config?.resetEnv) {
+ jest.resetModules();
+ jest.resetAllMocks();
+ lg.setup(global);
+ }
+
+ mockApi(config);
+ const { app } = require("../../web/scripts/app");
+ await app.setup();
+ return { ...Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]), app };
+}
+
+/**
+ * @param { ReturnType["graph"] } graph
+ * @param { (hasReloaded: boolean) => (Promise | void) } cb
+ */
+export async function checkBeforeAndAfterReload(graph, cb) {
+ await cb(false);
+ await graph.reload();
+ await cb(true);
+}
+
+/**
+ * @param { string } name
+ * @param { Record } input
+ * @param { (string | string[])[] | Record } output
+ * @returns { Record }
+ */
+export function makeNodeDef(name, input, output = {}) {
+ const nodeDef = {
+ name,
+ category: "test",
+ output: [],
+ output_name: [],
+ output_is_list: [],
+ input: {
+ required: {},
+ },
+ };
+ for (const k in input) {
+ nodeDef.input.required[k] = typeof input[k] === "string" ? [input[k], {}] : [...input[k]];
+ }
+ if (output instanceof Array) {
+ output = output.reduce((p, c) => {
+ p[c] = c;
+ return p;
+ }, {});
+ }
+ for (const k in output) {
+ nodeDef.output.push(output[k]);
+ nodeDef.output_name.push(k);
+ nodeDef.output_is_list.push(false);
+ }
+
+ return { [name]: nodeDef };
+}
+
+/**
+/**
+ * @template { any } T
+ * @param { T } x
+ * @returns { x is Exclude }
+ */
+export function assertNotNullOrUndefined(x) {
+ expect(x).not.toEqual(null);
+ expect(x).not.toEqual(undefined);
+ return true;
+}
+
+/**
+ *
+ * @param { ReturnType["ez"] } ez
+ * @param { ReturnType["graph"] } graph
+ */
+export function createDefaultWorkflow(ez, graph) {
+ graph.clear();
+ const ckpt = ez.CheckpointLoaderSimple();
+
+ const pos = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "positive" });
+ const neg = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "negative" });
+
+ const empty = ez.EmptyLatentImage();
+ const sampler = ez.KSampler(
+ ckpt.outputs.MODEL,
+ pos.outputs.CONDITIONING,
+ neg.outputs.CONDITIONING,
+ empty.outputs.LATENT
+ );
+
+ const decode = ez.VAEDecode(sampler.outputs.LATENT, ckpt.outputs.VAE);
+ const save = ez.SaveImage(decode.outputs.IMAGE);
+ graph.arrange();
+
+ return { ckpt, pos, neg, empty, sampler, decode, save };
+}
diff --git a/tests-ui/utils/litegraph.js b/tests-ui/utils/litegraph.js
new file mode 100644
index 00000000000..777f8c3ba13
--- /dev/null
+++ b/tests-ui/utils/litegraph.js
@@ -0,0 +1,36 @@
+const fs = require("fs");
+const path = require("path");
+const { nop } = require("../utils/nopProxy");
+
+function forEachKey(cb) {
+ for (const k of [
+ "LiteGraph",
+ "LGraph",
+ "LLink",
+ "LGraphNode",
+ "LGraphGroup",
+ "DragAndScale",
+ "LGraphCanvas",
+ "ContextMenu",
+ ]) {
+ cb(k);
+ }
+}
+
+export function setup(ctx) {
+ const lg = fs.readFileSync(path.resolve("../web/lib/litegraph.core.js"), "utf-8");
+ const globalTemp = {};
+ (function (console) {
+ eval(lg);
+ }).call(globalTemp, nop);
+
+ forEachKey((k) => (ctx[k] = globalTemp[k]));
+ require(path.resolve("../web/lib/litegraph.extensions.js"));
+}
+
+export function teardown(ctx) {
+ forEachKey((k) => delete ctx[k]);
+
+ // Clear document after each run
+ document.getElementsByTagName("html")[0].innerHTML = "";
+}
diff --git a/tests-ui/utils/nopProxy.js b/tests-ui/utils/nopProxy.js
new file mode 100644
index 00000000000..2502d9d03d6
--- /dev/null
+++ b/tests-ui/utils/nopProxy.js
@@ -0,0 +1,6 @@
+export const nop = new Proxy(function () {}, {
+ get: () => nop,
+ set: () => true,
+ apply: () => nop,
+ construct: () => nop,
+});
diff --git a/tests-ui/utils/setup.js b/tests-ui/utils/setup.js
new file mode 100644
index 00000000000..dd150214a34
--- /dev/null
+++ b/tests-ui/utils/setup.js
@@ -0,0 +1,49 @@
+require("../../web/scripts/api");
+
+const fs = require("fs");
+const path = require("path");
+function* walkSync(dir) {
+ const files = fs.readdirSync(dir, { withFileTypes: true });
+ for (const file of files) {
+ if (file.isDirectory()) {
+ yield* walkSync(path.join(dir, file.name));
+ } else {
+ yield path.join(dir, file.name);
+ }
+ }
+}
+
+/**
+ * @typedef { import("../../web/types/comfy").ComfyObjectInfo } ComfyObjectInfo
+ */
+
+/**
+ * @param { { mockExtensions?: string[], mockNodeDefs?: Record } } config
+ */
+export function mockApi({ mockExtensions, mockNodeDefs } = {}) {
+ if (!mockExtensions) {
+ mockExtensions = Array.from(walkSync(path.resolve("../web/extensions/core")))
+ .filter((x) => x.endsWith(".js"))
+ .map((x) => path.relative(path.resolve("../web"), x));
+ }
+ if (!mockNodeDefs) {
+ mockNodeDefs = JSON.parse(fs.readFileSync(path.resolve("./data/object_info.json")));
+ }
+
+ const events = new EventTarget();
+ const mockApi = {
+ addEventListener: events.addEventListener.bind(events),
+ removeEventListener: events.removeEventListener.bind(events),
+ dispatchEvent: events.dispatchEvent.bind(events),
+ getSystemStats: jest.fn(),
+ getExtensions: jest.fn(() => mockExtensions),
+ getNodeDefs: jest.fn(() => mockNodeDefs),
+ init: jest.fn(),
+ apiURL: jest.fn((x) => "../../web/" + x),
+ };
+ jest.mock("../../web/scripts/api", () => ({
+ get api() {
+ return mockApi;
+ },
+ }));
+}
diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js
index 3695b08e27f..b8d83613d4b 100644
--- a/web/extensions/core/colorPalette.js
+++ b/web/extensions/core/colorPalette.js
@@ -174,6 +174,213 @@ const colorPalettes = {
"tr-odd-bg-color": "#073642",
}
},
+ },
+ "arc": {
+ "id": "arc",
+ "name": "Arc",
+ "colors": {
+ "node_slot": {
+ "BOOLEAN": "",
+ "CLIP": "#eacb8b",
+ "CLIP_VISION": "#A8DADC",
+ "CLIP_VISION_OUTPUT": "#ad7452",
+ "CONDITIONING": "#cf876f",
+ "CONTROL_NET": "#00d78d",
+ "CONTROL_NET_WEIGHTS": "",
+ "FLOAT": "",
+ "GLIGEN": "",
+ "IMAGE": "#80a1c0",
+ "IMAGEUPLOAD": "",
+ "INT": "",
+ "LATENT": "#b38ead",
+ "LATENT_KEYFRAME": "",
+ "MASK": "#a3bd8d",
+ "MODEL": "#8978a7",
+ "SAMPLER": "",
+ "SIGMAS": "",
+ "STRING": "",
+ "STYLE_MODEL": "#C2FFAE",
+ "T2I_ADAPTER_WEIGHTS": "",
+ "TAESD": "#DCC274",
+ "TIMESTEP_KEYFRAME": "",
+ "UPSCALE_MODEL": "",
+ "VAE": "#be616b"
+ },
+ "litegraph_base": {
+ "BACKGROUND_IMAGE": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAACXBIWXMAAAsTAAALEwEAmpwYAAABcklEQVR4nO3YMUoDARgF4RfxBqZI6/0vZqFn0MYtrLIQMFN8U6V4LAtD+Jm9XG/v30OGl2e/AP7yevz4+vx45nvgF/+QGITEICQGITEIiUFIjNNC3q43u3/YnRJyPOzeQ+0e220nhRzReC8e7R7bbdvl+Jal1Bs46jEIiUFIDEJiEBKDkBhKPbZT6qHdptRTu02p53DUYxASg5AYhMQgJAYhMZR6bKfUQ7tNqad2m1LP4ajHICQGITEIiUFIDEJiKPXYTqmHdptST+02pZ7DUY9BSAxCYhASg5AYhMRQ6rGdUg/tNqWe2m1KPYejHoOQGITEICQGITEIiaHUYzulHtptSj2125R6Dkc9BiExCIlBSAxCYhASQ6nHdko9tNuUemq3KfUcjnoMQmIQEoOQGITEICSGUo/tlHpotyn11G5T6jkc9RiExCAkBiExCIlBSAylHtsp9dBuU+qp3abUczjqMQiJQUgMQmIQEoOQGITE+AHFISNQrFTGuwAAAABJRU5ErkJggg==",
+ "CLEAR_BACKGROUND_COLOR": "#2b2f38",
+ "NODE_TITLE_COLOR": "#b2b7bd",
+ "NODE_SELECTED_TITLE_COLOR": "#FFF",
+ "NODE_TEXT_SIZE": 14,
+ "NODE_TEXT_COLOR": "#AAA",
+ "NODE_SUBTEXT_SIZE": 12,
+ "NODE_DEFAULT_COLOR": "#2b2f38",
+ "NODE_DEFAULT_BGCOLOR": "#242730",
+ "NODE_DEFAULT_BOXCOLOR": "#6e7581",
+ "NODE_DEFAULT_SHAPE": "box",
+ "NODE_BOX_OUTLINE_COLOR": "#FFF",
+ "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)",
+ "DEFAULT_GROUP_FONT": 22,
+ "WIDGET_BGCOLOR": "#2b2f38",
+ "WIDGET_OUTLINE_COLOR": "#6e7581",
+ "WIDGET_TEXT_COLOR": "#DDD",
+ "WIDGET_SECONDARY_TEXT_COLOR": "#b2b7bd",
+ "LINK_COLOR": "#9A9",
+ "EVENT_LINK_COLOR": "#A86",
+ "CONNECTING_LINK_COLOR": "#AFA"
+ },
+ "comfy_base": {
+ "fg-color": "#fff",
+ "bg-color": "#2b2f38",
+ "comfy-menu-bg": "#242730",
+ "comfy-input-bg": "#2b2f38",
+ "input-text": "#ddd",
+ "descrip-text": "#b2b7bd",
+ "drag-text": "#ccc",
+ "error-text": "#ff4444",
+ "border-color": "#6e7581",
+ "tr-even-bg-color": "#2b2f38",
+ "tr-odd-bg-color": "#242730"
+ }
+ },
+ },
+ "nord": {
+ "id": "nord",
+ "name": "Nord",
+ "colors": {
+ "node_slot": {
+ "BOOLEAN": "",
+ "CLIP": "#eacb8b",
+ "CLIP_VISION": "#A8DADC",
+ "CLIP_VISION_OUTPUT": "#ad7452",
+ "CONDITIONING": "#cf876f",
+ "CONTROL_NET": "#00d78d",
+ "CONTROL_NET_WEIGHTS": "",
+ "FLOAT": "",
+ "GLIGEN": "",
+ "IMAGE": "#80a1c0",
+ "IMAGEUPLOAD": "",
+ "INT": "",
+ "LATENT": "#b38ead",
+ "LATENT_KEYFRAME": "",
+ "MASK": "#a3bd8d",
+ "MODEL": "#8978a7",
+ "SAMPLER": "",
+ "SIGMAS": "",
+ "STRING": "",
+ "STYLE_MODEL": "#C2FFAE",
+ "T2I_ADAPTER_WEIGHTS": "",
+ "TAESD": "#DCC274",
+ "TIMESTEP_KEYFRAME": "",
+ "UPSCALE_MODEL": "",
+ "VAE": "#be616b"
+ },
+ "litegraph_base": {
+ "BACKGROUND_IMAGE": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAACXBIWXMAAAsTAAALEwEAmpwYAAAFu2lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgOS4xLWMwMDEgNzkuMTQ2Mjg5OSwgMjAyMy8wNi8yNS0yMDowMTo1NSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIHhtcDpNZXRhZGF0YURhdGU9IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvcG5nIiBwaG90b3Nob3A6Q29sb3JNb2RlPSIzIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOjUwNDFhMmZjLTEzNzQtMTk0ZC1hZWY4LTYxMzM1MTVmNjUwMCIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoyMzFiMTBiMC1iNGZiLTAyNGUtYjEyZS0zMDUzMDNjZDA3YzgiIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDoyMzFiMTBiMC1iNGZiLTAyNGUtYjEyZS0zMDUzMDNjZDA3YzgiPiA8eG1wTU06SGlzdG9yeT4gPHJkZjpTZXE+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJjcmVhdGVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjIzMWIxMGIwLWI0ZmItMDI0ZS1iMTJlLTMwNTMwM2NkMDdjOCIgc3RFdnQ6d2hlbj0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDo1MDQxYTJmYy0xMzc0LTE5NGQtYWVmOC02MTMzNTE1ZjY1MDAiIHN0RXZ0OndoZW49IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyNS4xIChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz73jWg/AAAAyUlEQVR42u3WKwoAIBRFQRdiMb1idv9Lsxn9gEFw4Dbb8JCTojbbXEJwjJVL2HKwYMGCBQuWLbDmjr+9zrBGjHl1WVcvy2DBggULFizTWQpewSt4HzwsgwULFiwFr7MUvMtS8D54WLBgGSxYCl7BK3iXZbBgwYIFC5bpLAWv4BW8Dx6WwYIFC5aC11kK3mUpeB88LFiwDBYsBa/gFbzLMliwYMGCBct0loJX8AreBw/LYMGCBUvB6ywF77IUvA8eFixYBgsWrNfWAZPltufdad+1AAAAAElFTkSuQmCC",
+ "CLEAR_BACKGROUND_COLOR": "#212732",
+ "NODE_TITLE_COLOR": "#999",
+ "NODE_SELECTED_TITLE_COLOR": "#e5eaf0",
+ "NODE_TEXT_SIZE": 14,
+ "NODE_TEXT_COLOR": "#bcc2c8",
+ "NODE_SUBTEXT_SIZE": 12,
+ "NODE_DEFAULT_COLOR": "#2e3440",
+ "NODE_DEFAULT_BGCOLOR": "#161b22",
+ "NODE_DEFAULT_BOXCOLOR": "#545d70",
+ "NODE_DEFAULT_SHAPE": "box",
+ "NODE_BOX_OUTLINE_COLOR": "#e5eaf0",
+ "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)",
+ "DEFAULT_GROUP_FONT": 24,
+ "WIDGET_BGCOLOR": "#2e3440",
+ "WIDGET_OUTLINE_COLOR": "#545d70",
+ "WIDGET_TEXT_COLOR": "#bcc2c8",
+ "WIDGET_SECONDARY_TEXT_COLOR": "#999",
+ "LINK_COLOR": "#9A9",
+ "EVENT_LINK_COLOR": "#A86",
+ "CONNECTING_LINK_COLOR": "#AFA"
+ },
+ "comfy_base": {
+ "fg-color": "#e5eaf0",
+ "bg-color": "#2e3440",
+ "comfy-menu-bg": "#161b22",
+ "comfy-input-bg": "#2e3440",
+ "input-text": "#bcc2c8",
+ "descrip-text": "#999",
+ "drag-text": "#ccc",
+ "error-text": "#ff4444",
+ "border-color": "#545d70",
+ "tr-even-bg-color": "#2e3440",
+ "tr-odd-bg-color": "#161b22"
+ }
+ },
+ },
+ "github": {
+ "id": "github",
+ "name": "Github",
+ "colors": {
+ "node_slot": {
+ "BOOLEAN": "",
+ "CLIP": "#eacb8b",
+ "CLIP_VISION": "#A8DADC",
+ "CLIP_VISION_OUTPUT": "#ad7452",
+ "CONDITIONING": "#cf876f",
+ "CONTROL_NET": "#00d78d",
+ "CONTROL_NET_WEIGHTS": "",
+ "FLOAT": "",
+ "GLIGEN": "",
+ "IMAGE": "#80a1c0",
+ "IMAGEUPLOAD": "",
+ "INT": "",
+ "LATENT": "#b38ead",
+ "LATENT_KEYFRAME": "",
+ "MASK": "#a3bd8d",
+ "MODEL": "#8978a7",
+ "SAMPLER": "",
+ "SIGMAS": "",
+ "STRING": "",
+ "STYLE_MODEL": "#C2FFAE",
+ "T2I_ADAPTER_WEIGHTS": "",
+ "TAESD": "#DCC274",
+ "TIMESTEP_KEYFRAME": "",
+ "UPSCALE_MODEL": "",
+ "VAE": "#be616b"
+ },
+ "litegraph_base": {
+ "BACKGROUND_IMAGE": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAACXBIWXMAAAsTAAALEwEAmpwYAAAGlmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgOS4xLWMwMDEgNzkuMTQ2Mjg5OSwgMjAyMy8wNi8yNS0yMDowMTo1NSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIHhtcDpNZXRhZGF0YURhdGU9IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvcG5nIiBwaG90b3Nob3A6Q29sb3JNb2RlPSIzIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOmIyYzRhNjA5LWJmYTctYTg0MC1iOGFlLTk3MzE2ZjM1ZGIyNyIgeG1wTU06RG9jdW1lbnRJRD0iYWRvYmU6ZG9jaWQ6cGhvdG9zaG9wOjk0ZmNlZGU4LTE1MTctZmQ0MC04ZGU3LWYzOTgxM2E3ODk5ZiIgeG1wTU06T3JpZ2luYWxEb2N1bWVudElEPSJ4bXAuZGlkOjIzMWIxMGIwLWI0ZmItMDI0ZS1iMTJlLTMwNTMwM2NkMDdjOCI+IDx4bXBNTTpIaXN0b3J5PiA8cmRmOlNlcT4gPHJkZjpsaSBzdEV2dDphY3Rpb249ImNyZWF0ZWQiIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6MjMxYjEwYjAtYjRmYi0wMjRlLWIxMmUtMzA1MzAzY2QwN2M4IiBzdEV2dDp3aGVuPSIyMDIzLTExLTEzVDAwOjE4OjAyKzAxOjAwIiBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZG9iZSBQaG90b3Nob3AgMjUuMSAoV2luZG93cykiLz4gPHJkZjpsaSBzdEV2dDphY3Rpb249InNhdmVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjQ4OWY1NzlmLTJkNjUtZWQ0Zi04OTg0LTA4NGE2MGE1ZTMzNSIgc3RFdnQ6d2hlbj0iMjAyMy0xMS0xNVQwMjowNDo1OSswMTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiBzdEV2dDpjaGFuZ2VkPSIvIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDpiMmM0YTYwOS1iZmE3LWE4NDAtYjhhZS05NzMxNmYzNWRiMjciIHN0RXZ0OndoZW49IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyNS4xIChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz4OTe6GAAAAx0lEQVR42u3WMQoAIQxFwRzJys77X8vSLiRgITif7bYbgrwYc/mKXyBoY4VVBgsWLFiwYFmOlTv+9jfDOjHmr8u6eVkGCxYsWLBgmc5S8ApewXvgYRksWLBgKXidpeBdloL3wMOCBctgwVLwCl7BuyyDBQsWLFiwTGcpeAWv4D3wsAwWLFiwFLzOUvAuS8F74GHBgmWwYCl4Ba/gXZbBggULFixYprMUvIJX8B54WAYLFixYCl5nKXiXpeA98LBgwTJYsGC9tg1o8f4TTtqzNQAAAABJRU5ErkJggg==",
+ "CLEAR_BACKGROUND_COLOR": "#040506",
+ "NODE_TITLE_COLOR": "#999",
+ "NODE_SELECTED_TITLE_COLOR": "#e5eaf0",
+ "NODE_TEXT_SIZE": 14,
+ "NODE_TEXT_COLOR": "#bcc2c8",
+ "NODE_SUBTEXT_SIZE": 12,
+ "NODE_DEFAULT_COLOR": "#161b22",
+ "NODE_DEFAULT_BGCOLOR": "#13171d",
+ "NODE_DEFAULT_BOXCOLOR": "#30363d",
+ "NODE_DEFAULT_SHAPE": "box",
+ "NODE_BOX_OUTLINE_COLOR": "#e5eaf0",
+ "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)",
+ "DEFAULT_GROUP_FONT": 24,
+ "WIDGET_BGCOLOR": "#161b22",
+ "WIDGET_OUTLINE_COLOR": "#30363d",
+ "WIDGET_TEXT_COLOR": "#bcc2c8",
+ "WIDGET_SECONDARY_TEXT_COLOR": "#999",
+ "LINK_COLOR": "#9A9",
+ "EVENT_LINK_COLOR": "#A86",
+ "CONNECTING_LINK_COLOR": "#AFA"
+ },
+ "comfy_base": {
+ "fg-color": "#e5eaf0",
+ "bg-color": "#161b22",
+ "comfy-menu-bg": "#13171d",
+ "comfy-input-bg": "#161b22",
+ "input-text": "#bcc2c8",
+ "descrip-text": "#999",
+ "drag-text": "#ccc",
+ "error-text": "#ff4444",
+ "border-color": "#30363d",
+ "tr-even-bg-color": "#161b22",
+ "tr-odd-bg-color": "#13171d"
+ }
+ },
}
};
diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js
index 152cd7043de..0a305391a4e 100644
--- a/web/extensions/core/contextMenuFilter.js
+++ b/web/extensions/core/contextMenuFilter.js
@@ -25,7 +25,7 @@ const ext = {
requestAnimationFrame(() => {
const currentNode = LGraphCanvas.active_canvas.current_node;
const clickedComboValue = currentNode.widgets
- .filter(w => w.type === "combo" && w.options.values.length === values.length)
+ ?.filter(w => w.type === "combo" && w.options.values.length === values.length)
.find(w => w.options.values.every((v, i) => v === values[i]))
?.value;
diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js
new file mode 100644
index 00000000000..450b4f5f35c
--- /dev/null
+++ b/web/extensions/core/groupNode.js
@@ -0,0 +1,1054 @@
+import { app } from "../../scripts/app.js";
+import { api } from "../../scripts/api.js";
+import { getWidgetType } from "../../scripts/widgets.js";
+import { mergeIfValid } from "./widgetInputs.js";
+
+const GROUP = Symbol();
+
+const Workflow = {
+ InUse: {
+ Free: 0,
+ Registered: 1,
+ InWorkflow: 2,
+ },
+ isInUseGroupNode(name) {
+ const id = `workflow/${name}`;
+ // Check if lready registered/in use in this workflow
+ if (app.graph.extra?.groupNodes?.[name]) {
+ if (app.graph._nodes.find((n) => n.type === id)) {
+ return Workflow.InUse.InWorkflow;
+ } else {
+ return Workflow.InUse.Registered;
+ }
+ }
+ return Workflow.InUse.Free;
+ },
+ storeGroupNode(name, data) {
+ let extra = app.graph.extra;
+ if (!extra) app.graph.extra = extra = {};
+ let groupNodes = extra.groupNodes;
+ if (!groupNodes) extra.groupNodes = groupNodes = {};
+ groupNodes[name] = data;
+ },
+};
+
+class GroupNodeBuilder {
+ constructor(nodes) {
+ this.nodes = nodes;
+ }
+
+ build() {
+ const name = this.getName();
+ if (!name) return;
+
+ // Sort the nodes so they are in execution order
+ // this allows for widgets to be in the correct order when reconstructing
+ this.sortNodes();
+
+ this.nodeData = this.getNodeData();
+ Workflow.storeGroupNode(name, this.nodeData);
+
+ return { name, nodeData: this.nodeData };
+ }
+
+ getName() {
+ const name = prompt("Enter group name");
+ if (!name) return;
+ const used = Workflow.isInUseGroupNode(name);
+ switch (used) {
+ case Workflow.InUse.InWorkflow:
+ alert(
+ "An in use group node with this name already exists embedded in this workflow, please remove any instances or use a new name."
+ );
+ return;
+ case Workflow.InUse.Registered:
+ if (
+ !confirm(
+ "An group node with this name already exists embedded in this workflow, are you sure you want to overwrite it?"
+ )
+ ) {
+ return;
+ }
+ break;
+ }
+ return name;
+ }
+
+ sortNodes() {
+ // Gets the builders nodes in graph execution order
+ const nodesInOrder = app.graph.computeExecutionOrder(false);
+ this.nodes = this.nodes
+ .map((node) => ({ index: nodesInOrder.indexOf(node), node }))
+ .sort((a, b) => a.index - b.index || a.node.id - b.node.id)
+ .map(({ node }) => node);
+ }
+
+ getNodeData() {
+ const storeLinkTypes = (config) => {
+ // Store link types for dynamically typed nodes e.g. reroutes
+ for (const link of config.links) {
+ const origin = app.graph.getNodeById(link[4]);
+ const type = origin.outputs[link[1]].type;
+ link.push(type);
+ }
+ };
+
+ const storeExternalLinks = (config) => {
+ // Store any external links to the group in the config so when rebuilding we add extra slots
+ config.external = [];
+ for (let i = 0; i < this.nodes.length; i++) {
+ const node = this.nodes[i];
+ if (!node.outputs?.length) continue;
+ for (let slot = 0; slot < node.outputs.length; slot++) {
+ let hasExternal = false;
+ const output = node.outputs[slot];
+ let type = output.type;
+ if (!output.links?.length) continue;
+ for (const l of output.links) {
+ const link = app.graph.links[l];
+ if (!link) continue;
+ if (type === "*") type = link.type;
+
+ if (!app.canvas.selected_nodes[link.target_id]) {
+ hasExternal = true;
+ break;
+ }
+ }
+ if (hasExternal) {
+ config.external.push([i, slot, type]);
+ }
+ }
+ }
+ };
+
+ // Use the built in copyToClipboard function to generate the node data we need
+ const backup = localStorage.getItem("litegrapheditor_clipboard");
+ try {
+ app.canvas.copyToClipboard(this.nodes);
+ const config = JSON.parse(localStorage.getItem("litegrapheditor_clipboard"));
+
+ storeLinkTypes(config);
+ storeExternalLinks(config);
+
+ return config;
+ } finally {
+ localStorage.setItem("litegrapheditor_clipboard", backup);
+ }
+ }
+}
+
+export class GroupNodeConfig {
+ constructor(name, nodeData) {
+ this.name = name;
+ this.nodeData = nodeData;
+ this.getLinks();
+
+ this.inputCount = 0;
+ this.oldToNewOutputMap = {};
+ this.newToOldOutputMap = {};
+ this.oldToNewInputMap = {};
+ this.oldToNewWidgetMap = {};
+ this.newToOldWidgetMap = {};
+ this.primitiveDefs = {};
+ this.widgetToPrimitive = {};
+ this.primitiveToWidget = {};
+ }
+
+ async registerType(source = "workflow") {
+ this.nodeDef = {
+ output: [],
+ output_name: [],
+ output_is_list: [],
+ name: source + "/" + this.name,
+ display_name: this.name,
+ category: "group nodes" + ("/" + source),
+ input: { required: {} },
+
+ [GROUP]: this,
+ };
+
+ this.inputs = [];
+ const seenInputs = {};
+ const seenOutputs = {};
+ for (let i = 0; i < this.nodeData.nodes.length; i++) {
+ const node = this.nodeData.nodes[i];
+ node.index = i;
+ this.processNode(node, seenInputs, seenOutputs);
+ }
+ await app.registerNodeDef("workflow/" + this.name, this.nodeDef);
+ }
+
+ getLinks() {
+ this.linksFrom = {};
+ this.linksTo = {};
+ this.externalFrom = {};
+
+ // Extract links for easy lookup
+ for (const l of this.nodeData.links) {
+ const [sourceNodeId, sourceNodeSlot, targetNodeId, targetNodeSlot] = l;
+
+ // Skip links outside the copy config
+ if (sourceNodeId == null) continue;
+
+ if (!this.linksFrom[sourceNodeId]) {
+ this.linksFrom[sourceNodeId] = {};
+ }
+ this.linksFrom[sourceNodeId][sourceNodeSlot] = l;
+
+ if (!this.linksTo[targetNodeId]) {
+ this.linksTo[targetNodeId] = {};
+ }
+ this.linksTo[targetNodeId][targetNodeSlot] = l;
+ }
+
+ if (this.nodeData.external) {
+ for (const ext of this.nodeData.external) {
+ if (!this.externalFrom[ext[0]]) {
+ this.externalFrom[ext[0]] = { [ext[1]]: ext[2] };
+ } else {
+ this.externalFrom[ext[0]][ext[1]] = ext[2];
+ }
+ }
+ }
+ }
+
+ processNode(node, seenInputs, seenOutputs) {
+ const def = this.getNodeDef(node);
+ if (!def) return;
+
+ const inputs = { ...def.input?.required, ...def.input?.optional };
+
+ this.inputs.push(this.processNodeInputs(node, seenInputs, inputs));
+ if (def.output?.length) this.processNodeOutputs(node, seenOutputs, def);
+ }
+
+ getNodeDef(node) {
+ const def = globalDefs[node.type];
+ if (def) return def;
+
+ const linksFrom = this.linksFrom[node.index];
+ if (node.type === "PrimitiveNode") {
+ // Skip as its not linked
+ if (!linksFrom) return;
+
+ let type = linksFrom["0"][5];
+ if (type === "COMBO") {
+ // Use the array items
+ const source = node.outputs[0].widget.name;
+ const fromTypeName = this.nodeData.nodes[linksFrom["0"][2]].type;
+ const fromType = globalDefs[fromTypeName];
+ const input = fromType.input.required[source] ?? fromType.input.optional[source];
+ type = input[0];
+ }
+
+ const def = (this.primitiveDefs[node.index] = {
+ input: {
+ required: {
+ value: [type, {}],
+ },
+ },
+ output: [type],
+ output_name: [],
+ output_is_list: [],
+ });
+ return def;
+ } else if (node.type === "Reroute") {
+ const linksTo = this.linksTo[node.index];
+ if (linksTo && linksFrom && !this.externalFrom[node.index]?.[0]) {
+ // Being used internally
+ return null;
+ }
+
+ let rerouteType = "*";
+ if (linksFrom) {
+ const [, , id, slot] = linksFrom["0"];
+ rerouteType = this.nodeData.nodes[id].inputs[slot].type;
+ } else if (linksTo) {
+ const [id, slot] = linksTo["0"];
+ rerouteType = this.nodeData.nodes[id].outputs[slot].type;
+ } else {
+ // Reroute used as a pipe
+ for (const l of this.nodeData.links) {
+ if (l[2] === node.index) {
+ rerouteType = l[5];
+ break;
+ }
+ }
+ if (rerouteType === "*") {
+ // Check for an external link
+ const t = this.externalFrom[node.index]?.[0];
+ if (t) {
+ rerouteType = t;
+ }
+ }
+ }
+
+ return {
+ input: {
+ required: {
+ [rerouteType]: [rerouteType, {}],
+ },
+ },
+ output: [rerouteType],
+ output_name: [],
+ output_is_list: [],
+ };
+ }
+
+ console.warn("Skipping virtual node " + node.type + " when building group node " + this.name);
+ }
+
+ getInputConfig(node, inputName, seenInputs, config, extra) {
+ let name = node.inputs?.find((inp) => inp.name === inputName)?.label ?? inputName;
+ let prefix = "";
+ // Special handling for primitive to include the title if it is set rather than just "value"
+ if ((node.type === "PrimitiveNode" && node.title) || name in seenInputs) {
+ prefix = `${node.title ?? node.type} `;
+ name = `${prefix}${inputName}`;
+ if (name in seenInputs) {
+ name = `${prefix}${seenInputs[name]} ${inputName}`;
+ }
+ }
+ seenInputs[name] = (seenInputs[name] ?? 1) + 1;
+
+ if (inputName === "seed" || inputName === "noise_seed") {
+ if (!extra) extra = {};
+ extra.control_after_generate = `${prefix}control_after_generate`;
+ }
+ if (config[0] === "IMAGEUPLOAD") {
+ if (!extra) extra = {};
+ extra.widget = `${prefix}${config[1]?.widget ?? "image"}`;
+ }
+
+ if (extra) {
+ config = [config[0], { ...config[1], ...extra }];
+ }
+
+ return { name, config };
+ }
+
+ processWidgetInputs(inputs, node, inputNames, seenInputs) {
+ const slots = [];
+ const converted = new Map();
+ const widgetMap = (this.oldToNewWidgetMap[node.index] = {});
+ for (const inputName of inputNames) {
+ let widgetType = getWidgetType(inputs[inputName], inputName);
+ if (widgetType) {
+ const convertedIndex = node.inputs?.findIndex(
+ (inp) => inp.name === inputName && inp.widget?.name === inputName
+ );
+ if (convertedIndex > -1) {
+ // This widget has been converted to a widget
+ // We need to store this in the correct position so link ids line up
+ converted.set(convertedIndex, inputName);
+ widgetMap[inputName] = null;
+ } else {
+ // Normal widget
+ const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName]);
+ this.nodeDef.input.required[name] = config;
+ widgetMap[inputName] = name;
+ this.newToOldWidgetMap[name] = { node, inputName };
+ }
+ } else {
+ // Normal input
+ slots.push(inputName);
+ }
+ }
+ return { converted, slots };
+ }
+
+ checkPrimitiveConnection(link, inputName, inputs) {
+ const sourceNode = this.nodeData.nodes[link[0]];
+ if (sourceNode.type === "PrimitiveNode") {
+ // Merge link configurations
+ const [sourceNodeId, _, targetNodeId, __] = link;
+ const primitiveDef = this.primitiveDefs[sourceNodeId];
+ const targetWidget = inputs[inputName];
+ const primitiveConfig = primitiveDef.input.required.value;
+ const output = { widget: primitiveConfig };
+ const config = mergeIfValid(output, targetWidget, false, null, primitiveConfig);
+ primitiveConfig[1] = config?.customConfig ?? inputs[inputName][1] ? { ...inputs[inputName][1] } : {};
+
+ let name = this.oldToNewWidgetMap[sourceNodeId]["value"];
+ name = name.substr(0, name.length - 6);
+ primitiveConfig[1].control_after_generate = true;
+ primitiveConfig[1].control_prefix = name;
+
+ let toPrimitive = this.widgetToPrimitive[targetNodeId];
+ if (!toPrimitive) {
+ toPrimitive = this.widgetToPrimitive[targetNodeId] = {};
+ }
+ if (toPrimitive[inputName]) {
+ toPrimitive[inputName].push(sourceNodeId);
+ }
+ toPrimitive[inputName] = sourceNodeId;
+
+ let toWidget = this.primitiveToWidget[sourceNodeId];
+ if (!toWidget) {
+ toWidget = this.primitiveToWidget[sourceNodeId] = [];
+ }
+ toWidget.push({ nodeId: targetNodeId, inputName });
+ }
+ }
+
+ processInputSlots(inputs, node, slots, linksTo, inputMap, seenInputs) {
+ for (let i = 0; i < slots.length; i++) {
+ const inputName = slots[i];
+ if (linksTo[i]) {
+ this.checkPrimitiveConnection(linksTo[i], inputName, inputs);
+ // This input is linked so we can skip it
+ continue;
+ }
+
+ const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName]);
+ this.nodeDef.input.required[name] = config;
+ inputMap[i] = this.inputCount++;
+ }
+ }
+
+ processConvertedWidgets(inputs, node, slots, converted, linksTo, inputMap, seenInputs) {
+ // Add converted widgets sorted into their index order (ordered as they were converted) so link ids match up
+ const convertedSlots = [...converted.keys()].sort().map((k) => converted.get(k));
+ for (let i = 0; i < convertedSlots.length; i++) {
+ const inputName = convertedSlots[i];
+ if (linksTo[slots.length + i]) {
+ this.checkPrimitiveConnection(linksTo[slots.length + i], inputName, inputs);
+ // This input is linked so we can skip it
+ continue;
+ }
+
+ const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName], {
+ defaultInput: true,
+ });
+ this.nodeDef.input.required[name] = config;
+ inputMap[slots.length + i] = this.inputCount++;
+ }
+ }
+
+ processNodeInputs(node, seenInputs, inputs) {
+ const inputMapping = [];
+
+ const inputNames = Object.keys(inputs);
+ if (!inputNames.length) return;
+
+ const { converted, slots } = this.processWidgetInputs(inputs, node, inputNames, seenInputs);
+ const linksTo = this.linksTo[node.index] ?? {};
+ const inputMap = (this.oldToNewInputMap[node.index] = {});
+ this.processInputSlots(inputs, node, slots, linksTo, inputMap, seenInputs);
+ this.processConvertedWidgets(inputs, node, slots, converted, linksTo, inputMap, seenInputs);
+
+ return inputMapping;
+ }
+
+ processNodeOutputs(node, seenOutputs, def) {
+ const oldToNew = (this.oldToNewOutputMap[node.index] = {});
+
+ // Add outputs
+ for (let outputId = 0; outputId < def.output.length; outputId++) {
+ const linksFrom = this.linksFrom[node.index];
+ if (linksFrom?.[outputId] && !this.externalFrom[node.index]?.[outputId]) {
+ // This output is linked internally so we can skip it
+ continue;
+ }
+
+ oldToNew[outputId] = this.nodeDef.output.length;
+ this.newToOldOutputMap[this.nodeDef.output.length] = { node, slot: outputId };
+ this.nodeDef.output.push(def.output[outputId]);
+ this.nodeDef.output_is_list.push(def.output_is_list[outputId]);
+
+ let label = def.output_name?.[outputId] ?? def.output[outputId];
+ const output = node.outputs.find((o) => o.name === label);
+ if (output?.label) {
+ label = output.label;
+ }
+ let name = label;
+ if (name in seenOutputs) {
+ const prefix = `${node.title ?? node.type} `;
+ name = `${prefix}${label}`;
+ if (name in seenOutputs) {
+ name = `${prefix}${node.index} ${label}`;
+ }
+ }
+ seenOutputs[name] = 1;
+
+ this.nodeDef.output_name.push(name);
+ }
+ }
+
+ static async registerFromWorkflow(groupNodes, missingNodeTypes) {
+ for (const g in groupNodes) {
+ const groupData = groupNodes[g];
+
+ let hasMissing = false;
+ for (const n of groupData.nodes) {
+ // Find missing node types
+ if (!(n.type in LiteGraph.registered_node_types)) {
+ missingNodeTypes.push(n.type);
+ hasMissing = true;
+ }
+ }
+
+ if (hasMissing) continue;
+
+ const config = new GroupNodeConfig(g, groupData);
+ await config.registerType();
+ }
+ }
+}
+
+export class GroupNodeHandler {
+ node;
+ groupData;
+
+ constructor(node) {
+ this.node = node;
+ this.groupData = node.constructor?.nodeData?.[GROUP];
+
+ this.node.setInnerNodes = (innerNodes) => {
+ this.innerNodes = innerNodes;
+
+ for (let innerNodeIndex = 0; innerNodeIndex < this.innerNodes.length; innerNodeIndex++) {
+ const innerNode = this.innerNodes[innerNodeIndex];
+
+ for (const w of innerNode.widgets ?? []) {
+ if (w.type === "converted-widget") {
+ w.serializeValue = w.origSerializeValue;
+ }
+ }
+
+ innerNode.index = innerNodeIndex;
+ innerNode.getInputNode = (slot) => {
+ // Check if this input is internal or external
+ const externalSlot = this.groupData.oldToNewInputMap[innerNode.index]?.[slot];
+ if (externalSlot != null) {
+ return this.node.getInputNode(externalSlot);
+ }
+
+ // Internal link
+ const innerLink = this.groupData.linksTo[innerNode.index]?.[slot];
+ if (!innerLink) return null;
+
+ const inputNode = innerNodes[innerLink[0]];
+ // Primitives will already apply their values
+ if (inputNode.type === "PrimitiveNode") return null;
+
+ return inputNode;
+ };
+
+ innerNode.getInputLink = (slot) => {
+ const externalSlot = this.groupData.oldToNewInputMap[innerNode.index]?.[slot];
+ if (externalSlot != null) {
+ // The inner node is connected via the group node inputs
+ const linkId = this.node.inputs[externalSlot].link;
+ let link = app.graph.links[linkId];
+
+ // Use the outer link, but update the target to the inner node
+ link = {
+ ...link,
+ target_id: innerNode.id,
+ target_slot: +slot,
+ };
+ return link;
+ }
+
+ let link = this.groupData.linksTo[innerNode.index]?.[slot];
+ if (!link) return null;
+ // Use the inner link, but update the origin node to be inner node id
+ link = {
+ origin_id: innerNodes[link[0]].id,
+ origin_slot: link[1],
+ target_id: innerNode.id,
+ target_slot: +slot,
+ };
+ return link;
+ };
+ }
+ };
+
+ this.node.updateLink = (link) => {
+ // Replace the group node reference with the internal node
+ link = { ...link };
+ const output = this.groupData.newToOldOutputMap[link.origin_slot];
+ let innerNode = this.innerNodes[output.node.index];
+ let l;
+ while (innerNode.type === "Reroute") {
+ l = innerNode.getInputLink(0);
+ innerNode = innerNode.getInputNode(0);
+ }
+
+ link.origin_id = innerNode.id;
+ link.origin_slot = l?.origin_slot ?? output.slot;
+ return link;
+ };
+
+ this.node.getInnerNodes = () => {
+ if (!this.innerNodes) {
+ this.node.setInnerNodes(
+ this.groupData.nodeData.nodes.map((n, i) => {
+ const innerNode = LiteGraph.createNode(n.type);
+ innerNode.configure(n);
+ innerNode.id = `${this.node.id}:${i}`;
+ return innerNode;
+ })
+ );
+ }
+
+ this.updateInnerWidgets();
+
+ return this.innerNodes;
+ };
+
+ this.node.convertToNodes = () => {
+ const addInnerNodes = () => {
+ const backup = localStorage.getItem("litegrapheditor_clipboard");
+ // Clone the node data so we dont mutate it for other nodes
+ const c = { ...this.groupData.nodeData };
+ c.nodes = [...c.nodes];
+ const innerNodes = this.node.getInnerNodes();
+ let ids = [];
+ for (let i = 0; i < c.nodes.length; i++) {
+ let id = innerNodes?.[i]?.id;
+ // Use existing IDs if they are set on the inner nodes
+ if (id == null || isNaN(id)) {
+ id = undefined;
+ } else {
+ ids.push(id);
+ }
+ c.nodes[i] = { ...c.nodes[i], id };
+ }
+ localStorage.setItem("litegrapheditor_clipboard", JSON.stringify(c));
+ app.canvas.pasteFromClipboard();
+ localStorage.setItem("litegrapheditor_clipboard", backup);
+
+ const [x, y] = this.node.pos;
+ let top;
+ let left;
+ // Configure nodes with current widget data
+ const selectedIds = ids.length ? ids : Object.keys(app.canvas.selected_nodes);
+ const newNodes = [];
+ for (let i = 0; i < selectedIds.length; i++) {
+ const id = selectedIds[i];
+ const newNode = app.graph.getNodeById(id);
+ const innerNode = innerNodes[i];
+ newNodes.push(newNode);
+
+ if (left == null || newNode.pos[0] < left) {
+ left = newNode.pos[0];
+ }
+ if (top == null || newNode.pos[1] < top) {
+ top = newNode.pos[1];
+ }
+
+ const map = this.groupData.oldToNewWidgetMap[innerNode.index];
+ if (map) {
+ const widgets = Object.keys(map);
+
+ for (const oldName of widgets) {
+ const newName = map[oldName];
+ if (!newName) continue;
+
+ const widgetIndex = this.node.widgets.findIndex((w) => w.name === newName);
+ if (widgetIndex === -1) continue;
+
+ // Populate the main and any linked widgets
+ if (innerNode.type === "PrimitiveNode") {
+ for (let i = 0; i < newNode.widgets.length; i++) {
+ newNode.widgets[i].value = this.node.widgets[widgetIndex + i].value;
+ }
+ } else {
+ const outerWidget = this.node.widgets[widgetIndex];
+ const newWidget = newNode.widgets.find((w) => w.name === oldName);
+ if (!newWidget) continue;
+
+ newWidget.value = outerWidget.value;
+ for (let w = 0; w < outerWidget.linkedWidgets?.length; w++) {
+ newWidget.linkedWidgets[w].value = outerWidget.linkedWidgets[w].value;
+ }
+ }
+ }
+ }
+ }
+
+ // Shift each node
+ for (const newNode of newNodes) {
+ newNode.pos = [newNode.pos[0] - (left - x), newNode.pos[1] - (top - y)];
+ }
+
+ return { newNodes, selectedIds };
+ };
+
+ const reconnectInputs = (selectedIds) => {
+ for (const innerNodeIndex in this.groupData.oldToNewInputMap) {
+ const id = selectedIds[innerNodeIndex];
+ const newNode = app.graph.getNodeById(id);
+ const map = this.groupData.oldToNewInputMap[innerNodeIndex];
+ for (const innerInputId in map) {
+ const groupSlotId = map[innerInputId];
+ if (groupSlotId == null) continue;
+ const slot = node.inputs[groupSlotId];
+ if (slot.link == null) continue;
+ const link = app.graph.links[slot.link];
+ // connect this node output to the input of another node
+ const originNode = app.graph.getNodeById(link.origin_id);
+ originNode.connect(link.origin_slot, newNode, +innerInputId);
+ }
+ }
+ };
+
+ const reconnectOutputs = () => {
+ for (let groupOutputId = 0; groupOutputId < node.outputs?.length; groupOutputId++) {
+ const output = node.outputs[groupOutputId];
+ if (!output.links) continue;
+ const links = [...output.links];
+ for (const l of links) {
+ const slot = this.groupData.newToOldOutputMap[groupOutputId];
+ const link = app.graph.links[l];
+ const targetNode = app.graph.getNodeById(link.target_id);
+ const newNode = app.graph.getNodeById(selectedIds[slot.node.index]);
+ newNode.connect(slot.slot, targetNode, link.target_slot);
+ }
+ }
+ };
+
+ const { newNodes, selectedIds } = addInnerNodes();
+ reconnectInputs(selectedIds);
+ reconnectOutputs(selectedIds);
+ app.graph.remove(this.node);
+
+ return newNodes;
+ };
+
+ const getExtraMenuOptions = this.node.getExtraMenuOptions;
+ this.node.getExtraMenuOptions = function (_, options) {
+ getExtraMenuOptions?.apply(this, arguments);
+
+ let optionIndex = options.findIndex((o) => o.content === "Outputs");
+ if (optionIndex === -1) optionIndex = options.length;
+ else optionIndex++;
+ options.splice(optionIndex, 0, null, {
+ content: "Convert to nodes",
+ callback: () => {
+ return this.convertToNodes();
+ },
+ });
+ };
+
+ // Draw custom collapse icon to identity this as a group
+ const onDrawTitleBox = this.node.onDrawTitleBox;
+ this.node.onDrawTitleBox = function (ctx, height, size, scale) {
+ onDrawTitleBox?.apply(this, arguments);
+
+ const fill = ctx.fillStyle;
+ ctx.beginPath();
+ ctx.rect(11, -height + 11, 2, 2);
+ ctx.rect(14, -height + 11, 2, 2);
+ ctx.rect(17, -height + 11, 2, 2);
+ ctx.rect(11, -height + 14, 2, 2);
+ ctx.rect(14, -height + 14, 2, 2);
+ ctx.rect(17, -height + 14, 2, 2);
+ ctx.rect(11, -height + 17, 2, 2);
+ ctx.rect(14, -height + 17, 2, 2);
+ ctx.rect(17, -height + 17, 2, 2);
+
+ ctx.fillStyle = this.boxcolor || LiteGraph.NODE_DEFAULT_BOXCOLOR;
+ ctx.fill();
+ ctx.fillStyle = fill;
+ };
+
+ // Draw progress label
+ const onDrawForeground = node.onDrawForeground;
+ const groupData = this.groupData.nodeData;
+ node.onDrawForeground = function (ctx) {
+ const r = onDrawForeground?.apply?.(this, arguments);
+ if (+app.runningNodeId === this.id && this.runningInternalNodeId !== null) {
+ const n = groupData.nodes[this.runningInternalNodeId];
+ const message = `Running ${n.title || n.type} (${this.runningInternalNodeId}/${groupData.nodes.length})`;
+ ctx.save();
+ ctx.font = "12px sans-serif";
+ const sz = ctx.measureText(message);
+ ctx.fillStyle = node.boxcolor || LiteGraph.NODE_DEFAULT_BOXCOLOR;
+ ctx.beginPath();
+ ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, sz.width + 12, 20, 5);
+ ctx.fill();
+
+ ctx.fillStyle = "#fff";
+ ctx.fillText(message, 6, -LiteGraph.NODE_TITLE_HEIGHT - 6);
+ ctx.restore();
+ }
+ };
+
+ // Flag this node as needing to be reset
+ const onExecutionStart = this.node.onExecutionStart;
+ this.node.onExecutionStart = function () {
+ this.resetExecution = true;
+ return onExecutionStart?.apply(this, arguments);
+ };
+
+ function handleEvent(type, getId, getEvent) {
+ const handler = ({ detail }) => {
+ const id = getId(detail);
+ if (!id) return;
+ const node = app.graph.getNodeById(id);
+ if (node) return;
+
+ const innerNodeIndex = this.innerNodes?.findIndex((n) => n.id == id);
+ if (innerNodeIndex > -1) {
+ this.node.runningInternalNodeId = innerNodeIndex;
+ api.dispatchEvent(new CustomEvent(type, { detail: getEvent(detail, this.node.id + "", this.node) }));
+ }
+ };
+ api.addEventListener(type, handler);
+ return handler;
+ }
+
+ const executing = handleEvent.call(
+ this,
+ "executing",
+ (d) => d,
+ (d, id, node) => id
+ );
+
+ const executed = handleEvent.call(
+ this,
+ "executed",
+ (d) => d?.node,
+ (d, id, node) => ({ ...d, node: id, merge: !node.resetExecution })
+ );
+
+ const onRemoved = node.onRemoved;
+ this.node.onRemoved = function () {
+ onRemoved?.apply(this, arguments);
+ api.removeEventListener("executing", executing);
+ api.removeEventListener("executed", executed);
+ };
+ }
+
+ updateInnerWidgets() {
+ for (const newWidgetName in this.groupData.newToOldWidgetMap) {
+ const newWidget = this.node.widgets.find((w) => w.name === newWidgetName);
+ if (!newWidget) continue;
+
+ const newValue = newWidget.value;
+ const old = this.groupData.newToOldWidgetMap[newWidgetName];
+ let innerNode = this.innerNodes[old.node.index];
+
+ if (innerNode.type === "PrimitiveNode") {
+ innerNode.primitiveValue = newValue;
+ const primitiveLinked = this.groupData.primitiveToWidget[old.node.index];
+ for (const linked of primitiveLinked) {
+ const node = this.innerNodes[linked.nodeId];
+ const widget = node.widgets.find((w) => w.name === linked.inputName);
+
+ if (widget) {
+ widget.value = newValue;
+ }
+ }
+ continue;
+ }
+
+ const widget = innerNode.widgets?.find((w) => w.name === old.inputName);
+ if (widget) {
+ widget.value = newValue;
+ }
+ }
+ }
+
+ populatePrimitive(node, nodeId, oldName, i, linkedShift) {
+ // Converted widget, populate primitive if linked
+ const primitiveId = this.groupData.widgetToPrimitive[nodeId]?.[oldName];
+ if (primitiveId == null) return;
+ const targetWidgetName = this.groupData.oldToNewWidgetMap[primitiveId]["value"];
+ const targetWidgetIndex = this.node.widgets.findIndex((w) => w.name === targetWidgetName);
+ if (targetWidgetIndex > -1) {
+ const primitiveNode = this.innerNodes[primitiveId];
+ let len = primitiveNode.widgets.length;
+ if (len - 1 !== this.node.widgets[targetWidgetIndex].linkedWidgets?.length) {
+ // Fallback handling for if some reason the primitive has a different number of widgets
+ // we dont want to overwrite random widgets, better to leave blank
+ len = 1;
+ }
+ for (let i = 0; i < len; i++) {
+ this.node.widgets[targetWidgetIndex + i].value = primitiveNode.widgets[i].value;
+ }
+ }
+ }
+
+ populateWidgets() {
+ for (let nodeId = 0; nodeId < this.groupData.nodeData.nodes.length; nodeId++) {
+ const node = this.groupData.nodeData.nodes[nodeId];
+
+ if (!node.widgets_values?.length) continue;
+
+ const map = this.groupData.oldToNewWidgetMap[nodeId];
+ const widgets = Object.keys(map);
+
+ let linkedShift = 0;
+ for (let i = 0; i < widgets.length; i++) {
+ const oldName = widgets[i];
+ const newName = map[oldName];
+ const widgetIndex = this.node.widgets.findIndex((w) => w.name === newName);
+ const mainWidget = this.node.widgets[widgetIndex];
+ if (!newName) {
+ // New name will be null if its a converted widget
+ this.populatePrimitive(node, nodeId, oldName, i, linkedShift);
+
+ // Find the inner widget and shift by the number of linked widgets as they will have been removed too
+ const innerWidget = this.innerNodes[nodeId].widgets?.find((w) => w.name === oldName);
+ linkedShift += innerWidget.linkedWidgets?.length ?? 0;
+ continue;
+ }
+
+ if (widgetIndex === -1) {
+ continue;
+ }
+
+ // Populate the main and any linked widget
+ mainWidget.value = node.widgets_values[i + linkedShift];
+ for (let w = 0; w < mainWidget.linkedWidgets?.length; w++) {
+ this.node.widgets[widgetIndex + w + 1].value = node.widgets_values[i + ++linkedShift];
+ }
+ }
+ }
+ }
+
+ replaceNodes(nodes) {
+ let top;
+ let left;
+
+ for (let i = 0; i < nodes.length; i++) {
+ const node = nodes[i];
+ if (left == null || node.pos[0] < left) {
+ left = node.pos[0];
+ }
+ if (top == null || node.pos[1] < top) {
+ top = node.pos[1];
+ }
+
+ this.linkOutputs(node, i);
+ app.graph.remove(node);
+ }
+
+ this.linkInputs();
+ this.node.pos = [left, top];
+ }
+
+ linkOutputs(originalNode, nodeId) {
+ if (!originalNode.outputs) return;
+
+ for (const output of originalNode.outputs) {
+ if (!output.links) continue;
+ // Clone the links as they'll be changed if we reconnect
+ const links = [...output.links];
+ for (const l of links) {
+ const link = app.graph.links[l];
+ if (!link) continue;
+
+ const targetNode = app.graph.getNodeById(link.target_id);
+ const newSlot = this.groupData.oldToNewOutputMap[nodeId]?.[link.origin_slot];
+ if (newSlot != null) {
+ this.node.connect(newSlot, targetNode, link.target_slot);
+ }
+ }
+ }
+ }
+
+ linkInputs() {
+ for (const link of this.groupData.nodeData.links ?? []) {
+ const [, originSlot, targetId, targetSlot, actualOriginId] = link;
+ const originNode = app.graph.getNodeById(actualOriginId);
+ if (!originNode) continue; // this node is in the group
+ originNode.connect(originSlot, this.node.id, this.groupData.oldToNewInputMap[targetId][targetSlot]);
+ }
+ }
+
+ static getGroupData(node) {
+ return node.constructor?.nodeData?.[GROUP];
+ }
+
+ static isGroupNode(node) {
+ return !!node.constructor?.nodeData?.[GROUP];
+ }
+
+ static async fromNodes(nodes) {
+ // Process the nodes into the stored workflow group node data
+ const builder = new GroupNodeBuilder(nodes);
+ const res = builder.build();
+ if (!res) return;
+
+ const { name, nodeData } = res;
+
+ // Convert this data into a LG node definition and register it
+ const config = new GroupNodeConfig(name, nodeData);
+ await config.registerType();
+
+ const groupNode = LiteGraph.createNode(`workflow/${name}`);
+ // Reuse the existing nodes for this instance
+ groupNode.setInnerNodes(builder.nodes);
+ groupNode[GROUP].populateWidgets();
+ app.graph.add(groupNode);
+
+ // Remove all converted nodes and relink them
+ groupNode[GROUP].replaceNodes(builder.nodes);
+ return groupNode;
+ }
+}
+
+function addConvertToGroupOptions() {
+ function addOption(options, index) {
+ const selected = Object.values(app.canvas.selected_nodes ?? {});
+ const disabled = selected.length < 2 || selected.find((n) => GroupNodeHandler.isGroupNode(n));
+ options.splice(index + 1, null, {
+ content: `Convert to Group Node`,
+ disabled,
+ callback: async () => {
+ return await GroupNodeHandler.fromNodes(selected);
+ },
+ });
+ }
+
+ // Add to canvas
+ const getCanvasMenuOptions = LGraphCanvas.prototype.getCanvasMenuOptions;
+ LGraphCanvas.prototype.getCanvasMenuOptions = function () {
+ const options = getCanvasMenuOptions.apply(this, arguments);
+ const index = options.findIndex((o) => o?.content === "Add Group") + 1 || opts.length;
+ addOption(options, index);
+ return options;
+ };
+
+ // Add to nodes
+ const getNodeMenuOptions = LGraphCanvas.prototype.getNodeMenuOptions;
+ LGraphCanvas.prototype.getNodeMenuOptions = function (node) {
+ const options = getNodeMenuOptions.apply(this, arguments);
+ if (!GroupNodeHandler.isGroupNode(node)) {
+ const index = options.findIndex((o) => o?.content === "Outputs") + 1 || opts.length - 1;
+ addOption(options, index);
+ }
+ return options;
+ };
+}
+
+const id = "Comfy.GroupNode";
+let globalDefs;
+const ext = {
+ name: id,
+ setup() {
+ addConvertToGroupOptions();
+ },
+ async beforeConfigureGraph(graphData, missingNodeTypes) {
+ const nodes = graphData?.extra?.groupNodes;
+ if (nodes) {
+ await GroupNodeConfig.registerFromWorkflow(nodes, missingNodeTypes);
+ }
+ },
+ addCustomNodeDefs(defs) {
+ // Store this so we can mutate it later with group nodes
+ globalDefs = defs;
+ },
+ nodeCreated(node) {
+ if (GroupNodeHandler.isGroupNode(node)) {
+ node[GROUP] = new GroupNodeHandler(node);
+ }
+ },
+};
+
+app.registerExtension(ext);
diff --git a/web/extensions/core/groupOptions.js b/web/extensions/core/groupOptions.js
index 1d935e90aef..5dd21e73016 100644
--- a/web/extensions/core/groupOptions.js
+++ b/web/extensions/core/groupOptions.js
@@ -5,6 +5,61 @@ function setNodeMode(node, mode) {
node.graph.change();
}
+function addNodesToGroup(group, nodes=[]) {
+ var x1, y1, x2, y2;
+ var nx1, ny1, nx2, ny2;
+ var node;
+
+ x1 = y1 = x2 = y2 = -1;
+ nx1 = ny1 = nx2 = ny2 = -1;
+
+ for (var n of [group._nodes, nodes]) {
+ for (var i in n) {
+ node = n[i]
+
+ nx1 = node.pos[0]
+ ny1 = node.pos[1]
+ nx2 = node.pos[0] + node.size[0]
+ ny2 = node.pos[1] + node.size[1]
+
+ if (node.type != "Reroute") {
+ ny1 -= LiteGraph.NODE_TITLE_HEIGHT;
+ }
+
+ if (node.flags?.collapsed) {
+ ny2 = ny1 + LiteGraph.NODE_TITLE_HEIGHT;
+
+ if (node?._collapsed_width) {
+ nx2 = nx1 + Math.round(node._collapsed_width);
+ }
+ }
+
+ if (x1 == -1 || nx1 < x1) {
+ x1 = nx1;
+ }
+
+ if (y1 == -1 || ny1 < y1) {
+ y1 = ny1;
+ }
+
+ if (x2 == -1 || nx2 > x2) {
+ x2 = nx2;
+ }
+
+ if (y2 == -1 || ny2 > y2) {
+ y2 = ny2;
+ }
+ }
+ }
+
+ var padding = 10;
+
+ y1 = y1 - Math.round(group.font_size * 1.4);
+
+ group.pos = [x1 - padding, y1 - padding];
+ group.size = [x2 - x1 + padding * 2, y2 - y1 + padding * 2];
+}
+
app.registerExtension({
name: "Comfy.GroupOptions",
setup() {
@@ -14,6 +69,17 @@ app.registerExtension({
const options = orig.apply(this, arguments);
const group = this.graph.getGroupOnPos(this.graph_mouse[0], this.graph_mouse[1]);
if (!group) {
+ options.push({
+ content: "Add Group For Selected Nodes",
+ disabled: !Object.keys(app.canvas.selected_nodes || {}).length,
+ callback: () => {
+ var group = new LiteGraph.LGraphGroup();
+ addNodesToGroup(group, this.selected_nodes)
+ app.canvas.graph.add(group);
+ this.graph.change();
+ }
+ });
+
return options;
}
@@ -21,6 +87,15 @@ app.registerExtension({
group.recomputeInsideNodes();
const nodesInGroup = group._nodes;
+ options.push({
+ content: "Add Selected Nodes To Group",
+ disabled: !Object.keys(app.canvas.selected_nodes || {}).length,
+ callback: () => {
+ addNodesToGroup(group, this.selected_nodes)
+ this.graph.change();
+ }
+ });
+
// No nodes in group, return default options
if (nodesInGroup.length === 0) {
return options;
@@ -38,6 +113,23 @@ app.registerExtension({
}
}
+ options.push({
+ content: "Fit Group To Nodes",
+ callback: () => {
+ addNodesToGroup(group)
+ this.graph.change();
+ }
+ });
+
+ options.push({
+ content: "Select Nodes",
+ callback: () => {
+ this.selectNodes(nodesInGroup);
+ this.graph.change();
+ this.canvas.focus();
+ }
+ });
+
// Modes
// 0: Always
// 1: On Event
diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js
index 7059f826d74..2d4821742d1 100644
--- a/web/extensions/core/nodeTemplates.js
+++ b/web/extensions/core/nodeTemplates.js
@@ -1,5 +1,6 @@
import { app } from "../../scripts/app.js";
import { ComfyDialog, $el } from "../../scripts/ui.js";
+import { GroupNodeConfig, GroupNodeHandler } from "./groupNode.js";
// Adds the ability to save and add multiple nodes as a template
// To save:
@@ -14,6 +15,9 @@ import { ComfyDialog, $el } from "../../scripts/ui.js";
// To delete/rename:
// Right click the canvas
// Node templates -> Manage
+//
+// To rearrange:
+// Open the manage dialog and Drag and drop elements using the "Name:" label as handle
const id = "Comfy.NodeTemplates";
@@ -22,16 +26,42 @@ class ManageTemplates extends ComfyDialog {
super();
this.element.classList.add("comfy-manage-templates");
this.templates = this.load();
+ this.draggedEl = null;
+ this.saveVisualCue = null;
+ this.emptyImg = new Image();
+ this.emptyImg.src = 'data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=';
+
+ this.importInput = $el("input", {
+ type: "file",
+ accept: ".json",
+ multiple: true,
+ style: { display: "none" },
+ parent: document.body,
+ onchange: () => this.importAll(),
+ });
}
createButtons() {
const btns = super.createButtons();
- btns[0].textContent = "Cancel";
+ btns[0].textContent = "Close";
+ btns[0].onclick = (e) => {
+ clearTimeout(this.saveVisualCue);
+ this.close();
+ };
+ btns.unshift(
+ $el("button", {
+ type: "button",
+ textContent: "Export",
+ onclick: () => this.exportAll(),
+ })
+ );
btns.unshift(
$el("button", {
type: "button",
- textContent: "Save",
- onclick: () => this.save(),
+ textContent: "Import",
+ onclick: () => {
+ this.importInput.click();
+ },
})
);
return btns;
@@ -46,27 +76,54 @@ class ManageTemplates extends ComfyDialog {
}
}
- save() {
- // Find all visible inputs and save them as our new list
- const inputs = this.element.querySelectorAll("input");
- const updated = [];
-
- for (let i = 0; i < inputs.length; i++) {
- const input = inputs[i];
- if (input.parentElement.style.display !== "none") {
- const t = this.templates[i];
- t.name = input.value.trim() || input.getAttribute("data-name");
- updated.push(t);
+ store() {
+ localStorage.setItem(id, JSON.stringify(this.templates));
+ }
+
+ async importAll() {
+ for (const file of this.importInput.files) {
+ if (file.type === "application/json" || file.name.endsWith(".json")) {
+ const reader = new FileReader();
+ reader.onload = async () => {
+ var importFile = JSON.parse(reader.result);
+ if (importFile && importFile?.templates) {
+ for (const template of importFile.templates) {
+ if (template?.name && template?.data) {
+ this.templates.push(template);
+ }
+ }
+ this.store();
+ }
+ };
+ await reader.readAsText(file);
}
}
- this.templates = updated;
- this.store();
+ this.importInput.value = null;
+
this.close();
}
- store() {
- localStorage.setItem(id, JSON.stringify(this.templates));
+ exportAll() {
+ if (this.templates.length == 0) {
+ alert("No templates to export.");
+ return;
+ }
+
+ const json = JSON.stringify({ templates: this.templates }, null, 2); // convert the data to a JSON string
+ const blob = new Blob([json], { type: "application/json" });
+ const url = URL.createObjectURL(blob);
+ const a = $el("a", {
+ href: url,
+ download: "node_templates.json",
+ style: { display: "none" },
+ parent: document.body,
+ });
+ a.click();
+ setTimeout(function () {
+ a.remove();
+ window.URL.revokeObjectURL(url);
+ }, 0);
}
show() {
@@ -74,42 +131,155 @@ class ManageTemplates extends ComfyDialog {
super.show(
$el(
"div",
- {
- style: {
- display: "grid",
- gridTemplateColumns: "1fr auto",
- gap: "5px",
- },
- },
- this.templates.flatMap((t) => {
+ {},
+ this.templates.flatMap((t,i) => {
let nameInput;
return [
$el(
- "label",
+ "div",
{
- textContent: "Name: ",
+ dataset: { id: i },
+ className: "tempateManagerRow",
+ style: {
+ display: "grid",
+ gridTemplateColumns: "1fr auto",
+ border: "1px dashed transparent",
+ gap: "5px",
+ backgroundColor: "var(--comfy-menu-bg)"
+ },
+ ondragstart: (e) => {
+ this.draggedEl = e.currentTarget;
+ e.currentTarget.style.opacity = "0.6";
+ e.currentTarget.style.border = "1px dashed yellow";
+ e.dataTransfer.effectAllowed = 'move';
+ e.dataTransfer.setDragImage(this.emptyImg, 0, 0);
+ },
+ ondragend: (e) => {
+ e.target.style.opacity = "1";
+ e.currentTarget.style.border = "1px dashed transparent";
+ e.currentTarget.removeAttribute("draggable");
+
+ // rearrange the elements in the localStorage
+ this.element.querySelectorAll('.tempateManagerRow').forEach((el,i) => {
+ var prev_i = el.dataset.id;
+
+ if ( el == this.draggedEl && prev_i != i ) {
+ [this.templates[i], this.templates[prev_i]] = [this.templates[prev_i], this.templates[i]];
+ }
+ el.dataset.id = i;
+ });
+ this.store();
+ },
+ ondragover: (e) => {
+ e.preventDefault();
+ if ( e.currentTarget == this.draggedEl )
+ return;
+
+ let rect = e.currentTarget.getBoundingClientRect();
+ if (e.clientY > rect.top + rect.height / 2) {
+ e.currentTarget.parentNode.insertBefore(this.draggedEl, e.currentTarget.nextSibling);
+ } else {
+ e.currentTarget.parentNode.insertBefore(this.draggedEl, e.currentTarget);
+ }
+ }
},
[
- $el("input", {
- value: t.name,
- dataset: { name: t.name },
- $: (el) => (nameInput = el),
- }),
+ $el(
+ "label",
+ {
+ textContent: "Name: ",
+ style: {
+ cursor: "grab",
+ },
+ onmousedown: (e) => {
+ // enable dragging only from the label
+ if (e.target.localName == 'label')
+ e.currentTarget.parentNode.draggable = 'true';
+ }
+ },
+ [
+ $el("input", {
+ value: t.name,
+ dataset: { name: t.name },
+ style: {
+ transitionProperty: 'background-color',
+ transitionDuration: '0s',
+ },
+ onchange: (e) => {
+ clearTimeout(this.saveVisualCue);
+ var el = e.target;
+ var row = el.parentNode.parentNode;
+ this.templates[row.dataset.id].name = el.value.trim() || 'untitled';
+ this.store();
+ el.style.backgroundColor = 'rgb(40, 95, 40)';
+ el.style.transitionDuration = '0s';
+ this.saveVisualCue = setTimeout(function () {
+ el.style.transitionDuration = '.7s';
+ el.style.backgroundColor = 'var(--comfy-input-bg)';
+ }, 15);
+ },
+ onkeypress: (e) => {
+ var el = e.target;
+ clearTimeout(this.saveVisualCue);
+ el.style.transitionDuration = '0s';
+ el.style.backgroundColor = 'var(--comfy-input-bg)';
+ },
+ $: (el) => (nameInput = el),
+ })
+ ]
+ ),
+ $el(
+ "div",
+ {},
+ [
+ $el("button", {
+ textContent: "Export",
+ style: {
+ fontSize: "12px",
+ fontWeight: "normal",
+ },
+ onclick: (e) => {
+ const json = JSON.stringify({templates: [t]}, null, 2); // convert the data to a JSON string
+ const blob = new Blob([json], {type: "application/json"});
+ const url = URL.createObjectURL(blob);
+ const a = $el("a", {
+ href: url,
+ download: (nameInput.value || t.name) + ".json",
+ style: {display: "none"},
+ parent: document.body,
+ });
+ a.click();
+ setTimeout(function () {
+ a.remove();
+ window.URL.revokeObjectURL(url);
+ }, 0);
+ },
+ }),
+ $el("button", {
+ textContent: "Delete",
+ style: {
+ fontSize: "12px",
+ color: "red",
+ fontWeight: "normal",
+ },
+ onclick: (e) => {
+ const item = e.target.parentNode.parentNode;
+ item.parentNode.removeChild(item);
+ this.templates.splice(item.dataset.id*1, 1);
+ this.store();
+ // update the rows index, setTimeout ensures that the list is updated
+ var that = this;
+ setTimeout(function (){
+ that.element.querySelectorAll('.tempateManagerRow').forEach((el,i) => {
+ el.dataset.id = i;
+ });
+ }, 0);
+ },
+ }),
+ ]
+ ),
]
- ),
- $el("button", {
- textContent: "Delete",
- style: {
- fontSize: "12px",
- color: "red",
- fontWeight: "normal",
- },
- onclick: (e) => {
- nameInput.value = "";
- e.target.style.display = "none";
- e.target.previousElementSibling.style.display = "none";
- },
- }),
+ )
];
})
)
@@ -122,11 +292,11 @@ app.registerExtension({
setup() {
const manage = new ManageTemplates();
- const clipboardAction = (cb) => {
+ const clipboardAction = async (cb) => {
// We use the clipboard functions but dont want to overwrite the current user clipboard
// Restore it after we've run our callback
const old = localStorage.getItem("litegrapheditor_clipboard");
- cb();
+ await cb();
localStorage.setItem("litegrapheditor_clipboard", old);
};
@@ -140,13 +310,31 @@ app.registerExtension({
disabled: !Object.keys(app.canvas.selected_nodes || {}).length,
callback: () => {
const name = prompt("Enter name");
- if (!name || !name.trim()) return;
+ if (!name?.trim()) return;
clipboardAction(() => {
app.canvas.copyToClipboard();
+ let data = localStorage.getItem("litegrapheditor_clipboard");
+ data = JSON.parse(data);
+ const nodeIds = Object.keys(app.canvas.selected_nodes);
+ for (let i = 0; i < nodeIds.length; i++) {
+ const node = app.graph.getNodeById(nodeIds[i]);
+ const nodeData = node?.constructor.nodeData;
+
+ let groupData = GroupNodeHandler.getGroupData(node);
+ if (groupData) {
+ groupData = groupData.nodeData;
+ if (!data.groupNodes) {
+ data.groupNodes = {};
+ }
+ data.groupNodes[nodeData.name] = groupData;
+ data.nodes[i].type = nodeData.name;
+ }
+ }
+
manage.templates.push({
name,
- data: localStorage.getItem("litegrapheditor_clipboard"),
+ data: JSON.stringify(data),
});
manage.store();
});
@@ -154,29 +342,31 @@ app.registerExtension({
});
// Map each template to a menu item
- const subItems = manage.templates.map((t) => ({
- content: t.name,
- callback: () => {
- clipboardAction(() => {
- localStorage.setItem("litegrapheditor_clipboard", t.data);
- app.canvas.pasteFromClipboard();
- });
- },
- }));
-
- if (subItems.length) {
- subItems.push(null, {
- content: "Manage",
- callback: () => manage.show(),
- });
-
- options.push({
- content: "Node Templates",
- submenu: {
- options: subItems,
+ const subItems = manage.templates.map((t) => {
+ return {
+ content: t.name,
+ callback: () => {
+ clipboardAction(async () => {
+ const data = JSON.parse(t.data);
+ await GroupNodeConfig.registerFromWorkflow(data.groupNodes, {});
+ localStorage.setItem("litegrapheditor_clipboard", t.data);
+ app.canvas.pasteFromClipboard();
+ });
},
- });
- }
+ };
+ });
+
+ subItems.push(null, {
+ content: "Manage",
+ callback: () => manage.show(),
+ });
+
+ options.push({
+ content: "Node Templates",
+ submenu: {
+ options: subItems,
+ },
+ });
return options;
};
diff --git a/web/extensions/core/undoRedo.js b/web/extensions/core/undoRedo.js
new file mode 100644
index 00000000000..c6613b0f02d
--- /dev/null
+++ b/web/extensions/core/undoRedo.js
@@ -0,0 +1,150 @@
+import { app } from "../../scripts/app.js";
+
+const MAX_HISTORY = 50;
+
+let undo = [];
+let redo = [];
+let activeState = null;
+let isOurLoad = false;
+function checkState() {
+ const currentState = app.graph.serialize();
+ if (!graphEqual(activeState, currentState)) {
+ undo.push(activeState);
+ if (undo.length > MAX_HISTORY) {
+ undo.shift();
+ }
+ activeState = clone(currentState);
+ redo.length = 0;
+ }
+}
+
+const loadGraphData = app.loadGraphData;
+app.loadGraphData = async function () {
+ const v = await loadGraphData.apply(this, arguments);
+ if (isOurLoad) {
+ isOurLoad = false;
+ } else {
+ checkState();
+ }
+ return v;
+};
+
+function clone(obj) {
+ try {
+ if (typeof structuredClone !== "undefined") {
+ return structuredClone(obj);
+ }
+ } catch (error) {
+ // structuredClone is stricter than using JSON.parse/stringify so fallback to that
+ }
+
+ return JSON.parse(JSON.stringify(obj));
+}
+
+function graphEqual(a, b, root = true) {
+ if (a === b) return true;
+
+ if (typeof a == "object" && a && typeof b == "object" && b) {
+ const keys = Object.getOwnPropertyNames(a);
+
+ if (keys.length != Object.getOwnPropertyNames(b).length) {
+ return false;
+ }
+
+ for (const key of keys) {
+ let av = a[key];
+ let bv = b[key];
+ if (root && key === "nodes") {
+ // Nodes need to be sorted as the order changes when selecting nodes
+ av = [...av].sort((a, b) => a.id - b.id);
+ bv = [...bv].sort((a, b) => a.id - b.id);
+ }
+ if (!graphEqual(av, bv, false)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+const undoRedo = async (e) => {
+ if (e.ctrlKey || e.metaKey) {
+ if (e.key === "y") {
+ const prevState = redo.pop();
+ if (prevState) {
+ undo.push(activeState);
+ isOurLoad = true;
+ await app.loadGraphData(prevState);
+ activeState = prevState;
+ }
+ return true;
+ } else if (e.key === "z") {
+ const prevState = undo.pop();
+ if (prevState) {
+ redo.push(activeState);
+ isOurLoad = true;
+ await app.loadGraphData(prevState);
+ activeState = prevState;
+ }
+ return true;
+ }
+ }
+};
+
+const bindInput = (activeEl) => {
+ if (activeEl?.tagName !== "CANVAS" && activeEl?.tagName !== "BODY") {
+ for (const evt of ["change", "input", "blur"]) {
+ if (`on${evt}` in activeEl) {
+ const listener = () => {
+ checkState();
+ activeEl.removeEventListener(evt, listener);
+ };
+ activeEl.addEventListener(evt, listener);
+ return true;
+ }
+ }
+ }
+};
+
+window.addEventListener(
+ "keydown",
+ (e) => {
+ requestAnimationFrame(async () => {
+ const activeEl = document.activeElement;
+ if (activeEl?.tagName === "INPUT" || activeEl?.type === "textarea") {
+ // Ignore events on inputs, they have their native history
+ return;
+ }
+
+ // Check if this is a ctrl+z ctrl+y
+ if (await undoRedo(e)) return;
+
+ // If our active element is some type of input then handle changes after they're done
+ if (bindInput(activeEl)) return;
+ checkState();
+ });
+ },
+ true
+);
+
+// Handle clicking DOM elements (e.g. widgets)
+window.addEventListener("mouseup", () => {
+ checkState();
+});
+
+// Handle litegraph clicks
+const processMouseUp = LGraphCanvas.prototype.processMouseUp;
+LGraphCanvas.prototype.processMouseUp = function (e) {
+ const v = processMouseUp.apply(this, arguments);
+ checkState();
+ return v;
+};
+const processMouseDown = LGraphCanvas.prototype.processMouseDown;
+LGraphCanvas.prototype.processMouseDown = function (e) {
+ const v = processMouseDown.apply(this, arguments);
+ checkState();
+ return v;
+};
diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js
index 606605f0a96..b6fa411f7e1 100644
--- a/web/extensions/core/widgetInputs.js
+++ b/web/extensions/core/widgetInputs.js
@@ -1,8 +1,15 @@
-import { ComfyWidgets, addValueControlWidget } from "../../scripts/widgets.js";
+import { ComfyWidgets, addValueControlWidgets } from "../../scripts/widgets.js";
import { app } from "../../scripts/app.js";
const CONVERTED_TYPE = "converted-widget";
const VALID_TYPES = ["STRING", "combo", "number", "BOOLEAN"];
+const CONFIG = Symbol();
+const GET_CONFIG = Symbol();
+
+function getConfig(widgetName) {
+ const { nodeData } = this.constructor;
+ return nodeData?.input?.required[widgetName] ?? nodeData?.input?.optional?.[widgetName];
+}
function isConvertableWidget(widget, config) {
return (VALID_TYPES.includes(widget.type) || VALID_TYPES.includes(config[0])) && !widget.options?.forceInput;
@@ -55,12 +62,12 @@ function showWidget(widget) {
function convertToInput(node, widget, config) {
hideWidget(node, widget);
- const { linkType } = getWidgetType(config);
+ const { type } = getWidgetType(config);
// Add input and store widget config for creating on primitive node
const sz = node.size;
- node.addInput(widget.name, linkType, {
- widget: { name: widget.name, config },
+ node.addInput(widget.name, type, {
+ widget: { name: widget.name, [GET_CONFIG]: () => config },
});
for (const widget of node.widgets) {
@@ -87,12 +94,135 @@ function convertToWidget(node, widget) {
function getWidgetType(config) {
// Special handling for COMBO so we restrict links based on the entries
let type = config[0];
- let linkType = type;
if (type instanceof Array) {
type = "COMBO";
- linkType = linkType.join(",");
}
- return { type, linkType };
+ return { type };
+}
+
+
+function isValidCombo(combo, obj) {
+ // New input isnt a combo
+ if (!(obj instanceof Array)) {
+ console.log(`connection rejected: tried to connect combo to ${obj}`);
+ return false;
+ }
+ // New imput combo has a different size
+ if (combo.length !== obj.length) {
+ console.log(`connection rejected: combo lists dont match`);
+ return false;
+ }
+ // New input combo has different elements
+ if (combo.find((v, i) => obj[i] !== v)) {
+ console.log(`connection rejected: combo lists dont match`);
+ return false;
+ }
+
+ return true;
+}
+
+export function mergeIfValid(output, config2, forceUpdate, recreateWidget, config1) {
+ if (!config1) {
+ config1 = output.widget[CONFIG] ?? output.widget[GET_CONFIG]();
+ }
+
+ if (config1[0] instanceof Array) {
+ if (!isValidCombo(config1[0], config2[0])) return false;
+ } else if (config1[0] !== config2[0]) {
+ // Types dont match
+ console.log(`connection rejected: types dont match`, config1[0], config2[0]);
+ return false;
+ }
+
+ const keys = new Set([...Object.keys(config1[1] ?? {}), ...Object.keys(config2[1] ?? {})]);
+
+ let customConfig;
+ const getCustomConfig = () => {
+ if (!customConfig) {
+ if (typeof structuredClone === "undefined") {
+ customConfig = JSON.parse(JSON.stringify(config1[1] ?? {}));
+ } else {
+ customConfig = structuredClone(config1[1] ?? {});
+ }
+ }
+ return customConfig;
+ };
+
+ const isNumber = config1[0] === "INT" || config1[0] === "FLOAT";
+ for (const k of keys.values()) {
+ if (k !== "default" && k !== "forceInput" && k !== "defaultInput") {
+ let v1 = config1[1][k];
+ let v2 = config2[1]?.[k];
+
+ if (v1 === v2 || (!v1 && !v2)) continue;
+
+ if (isNumber) {
+ if (k === "min") {
+ const theirMax = config2[1]?.["max"];
+ if (theirMax != null && v1 > theirMax) {
+ console.log("connection rejected: min > max", v1, theirMax);
+ return false;
+ }
+ getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.max(v1, v2);
+ continue;
+ } else if (k === "max") {
+ const theirMin = config2[1]?.["min"];
+ if (theirMin != null && v1 < theirMin) {
+ console.log("connection rejected: max < min", v1, theirMin);
+ return false;
+ }
+ getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.min(v1, v2);
+ continue;
+ } else if (k === "step") {
+ let step;
+ if (v1 == null) {
+ // No current step
+ step = v2;
+ } else if (v2 == null) {
+ // No new step
+ step = v1;
+ } else {
+ if (v1 < v2) {
+ // Ensure v1 is larger for the mod
+ const a = v2;
+ v2 = v1;
+ v1 = a;
+ }
+ if (v1 % v2) {
+ console.log("connection rejected: steps not divisible", "current:", v1, "new:", v2);
+ return false;
+ }
+
+ step = v1;
+ }
+
+ getCustomConfig()[k] = step;
+ continue;
+ }
+ }
+
+ console.log(`connection rejected: config ${k} values dont match`, v1, v2);
+ return false;
+ }
+ }
+
+ if (customConfig || forceUpdate) {
+ if (customConfig) {
+ output.widget[CONFIG] = [config1[0], customConfig];
+ }
+
+ const widget = recreateWidget?.call(this);
+ // When deleting a node this can be null
+ if (widget) {
+ const min = widget.options.min;
+ const max = widget.options.max;
+ if (min != null && widget.value < min) widget.value = min;
+ if (max != null && widget.value > max) widget.value = max;
+ widget.callback(widget.value);
+ }
+ }
+
+ return { customConfig };
}
app.registerExtension({
@@ -116,7 +246,7 @@ app.registerExtension({
callback: () => convertToWidget(this, w),
});
} else {
- const config = nodeData?.input?.required[w.name] || nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}];
+ const config = getConfig.call(this, w.name) ?? [w.type, w.options || {}];
if (isConvertableWidget(w, config)) {
toInput.push({
content: `Convert ${w.name} to input`,
@@ -137,33 +267,67 @@ app.registerExtension({
return r;
};
- const origOnNodeCreated = nodeType.prototype.onNodeCreated
+ nodeType.prototype.onGraphConfigured = function () {
+ if (!this.inputs) return;
+
+ for (const input of this.inputs) {
+ if (input.widget) {
+ if (!input.widget[GET_CONFIG]) {
+ input.widget[GET_CONFIG] = () => getConfig.call(this, input.widget.name);
+ }
+
+ // Cleanup old widget config
+ if (input.widget.config) {
+ if (input.widget.config[0] instanceof Array) {
+ // If we are an old converted combo then replace the input type and the stored link data
+ input.type = "COMBO";
+
+ const link = app.graph.links[input.link];
+ if (link) {
+ link.type = input.type;
+ }
+ }
+ delete input.widget.config;
+ }
+
+ const w = this.widgets.find((w) => w.name === input.widget.name);
+ if (w) {
+ hideWidget(this, w);
+ } else {
+ convertToWidget(this, input);
+ }
+ }
+ }
+ };
+
+ const origOnNodeCreated = nodeType.prototype.onNodeCreated;
nodeType.prototype.onNodeCreated = function () {
const r = origOnNodeCreated ? origOnNodeCreated.apply(this) : undefined;
- if (this.widgets) {
+
+ // When node is created, convert any force/default inputs
+ if (!app.configuringGraph && this.widgets) {
for (const w of this.widgets) {
if (w?.options?.forceInput || w?.options?.defaultInput) {
- const config = nodeData?.input?.required[w.name] || nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}];
+ const config = getConfig.call(this, w.name) ?? [w.type, w.options || {}];
convertToInput(this, w, config);
}
}
}
+
return r;
- }
+ };
- // On initial configure of nodes hide all converted widgets
const origOnConfigure = nodeType.prototype.onConfigure;
nodeType.prototype.onConfigure = function () {
const r = origOnConfigure ? origOnConfigure.apply(this, arguments) : undefined;
-
- if (this.inputs) {
+ if (!app.configuringGraph && this.inputs) {
+ // On copy + paste of nodes, ensure that widget configs are set up
for (const input of this.inputs) {
- if (input.widget && !input.widget.config[1]?.forceInput) {
+ if (input.widget && !input.widget[GET_CONFIG]) {
+ input.widget[GET_CONFIG] = () => getConfig.call(this, input.widget.name);
const w = this.widgets.find((w) => w.name === input.widget.name);
if (w) {
hideWidget(this, w);
- } else {
- convertToWidget(this, input)
}
}
}
@@ -190,7 +354,7 @@ app.registerExtension({
const input = this.inputs[slot];
if (!input.widget || !input[ignoreDblClick]) {
// Not a widget input or already handled input
- if (!(input.type in ComfyWidgets) && !(input.widget.config?.[0] instanceof Array)) {
+ if (!(input.type in ComfyWidgets) && !(input.widget[GET_CONFIG]?.()?.[0] instanceof Array)) {
return r; //also Not a ComfyWidgets input or combo (do nothing)
}
}
@@ -217,6 +381,28 @@ app.registerExtension({
return r;
};
+
+ // Prevent connecting COMBO lists to converted inputs that dont match types
+ const onConnectInput = nodeType.prototype.onConnectInput;
+ nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) {
+ const v = onConnectInput?.(this, arguments);
+ // Not a combo, ignore
+ if (type !== "COMBO") return v;
+ // Primitive output, allow that to handle
+ if (originNode.outputs[originSlot].widget) return v;
+
+ // Ensure target is also a combo
+ const targetCombo = this.inputs[targetSlot].widget?.[GET_CONFIG]?.()?.[0];
+ if (!targetCombo || !(targetCombo instanceof Array)) return v;
+
+ // Check they match
+ const originConfig = originNode.constructor?.nodeData?.output?.[originSlot];
+ if (!originConfig || !isValidCombo(targetCombo, originConfig)) {
+ return false;
+ }
+
+ return v;
+ };
},
registerCustomNodes() {
class PrimitiveNode {
@@ -226,7 +412,7 @@ app.registerExtension({
this.isVirtualNode = true;
}
- applyToGraph() {
+ applyToGraph(extraLinks = []) {
if (!this.outputs[0].links?.length) return;
function get_links(node) {
@@ -243,10 +429,9 @@ app.registerExtension({
return links;
}
- let links = get_links(this);
+ let links = [...get_links(this).map((l) => app.graph.links[l]), ...extraLinks];
// For each output link copy our value over the original widget value
- for (const l of links) {
- const linkInfo = app.graph.links[l];
+ for (const linkInfo of links) {
const node = this.graph.getNodeById(linkInfo.target_id);
const input = node.inputs[linkInfo.target_slot];
const widgetName = input.widget.name;
@@ -262,20 +447,55 @@ app.registerExtension({
}
}
+ refreshComboInNode() {
+ const widget = this.widgets?.[0];
+ if (widget?.type === "combo") {
+ widget.options.values = this.outputs[0].widget[GET_CONFIG]()[0];
+
+ if (!widget.options.values.includes(widget.value)) {
+ widget.value = widget.options.values[0];
+ widget.callback(widget.value);
+ }
+ }
+ }
+
+ onAfterGraphConfigured() {
+ if (this.outputs[0].links?.length && !this.widgets?.length) {
+ if (!this.#onFirstConnection()) return;
+
+ // Populate widget values from config data
+ if (this.widgets) {
+ for (let i = 0; i < this.widgets_values.length; i++) {
+ const w = this.widgets[i];
+ if (w) {
+ w.value = this.widgets_values[i];
+ }
+ }
+ }
+
+ // Merge values if required
+ this.#mergeWidgetConfig();
+ }
+ }
+
onConnectionsChange(_, index, connected) {
+ if (app.configuringGraph) {
+ // Dont run while the graph is still setting up
+ return;
+ }
+
+ const links = this.outputs[0].links;
if (connected) {
- if (this.outputs[0].links?.length) {
- if (!this.widgets?.length) {
- this.#onFirstConnection();
- }
- if (!this.widgets?.length && this.outputs[0].widget) {
- // On first load it often cant recreate the widget as the other node doesnt exist yet
- // Manually recreate it from the output info
- this.#createWidget(this.outputs[0].widget.config);
- }
+ if (links?.length && !this.widgets?.length) {
+ this.#onFirstConnection();
+ }
+ } else {
+ // We may have removed a link that caused the constraints to change
+ this.#mergeWidgetConfig();
+
+ if (!links?.length) {
+ this.#onLastDisconnect();
}
- } else if (!this.outputs[0].links?.length) {
- this.#onLastDisconnect();
}
}
@@ -288,11 +508,16 @@ app.registerExtension({
}
if (this.outputs[slot].links?.length) {
- return this.#isValidConnection(input);
+ const valid = this.#isValidConnection(input);
+ if (valid) {
+ // On connect of additional outputs, copy our value to their widget
+ this.applyToGraph([{ target_id: target_node.id, target_slot }]);
+ }
+ return valid;
}
}
- #onFirstConnection() {
+ #onFirstConnection(recreating) {
// First connection can fire before the graph is ready on initial load so random things can be missing
const linkId = this.outputs[0].links[0];
const link = this.graph.links[linkId];
@@ -304,26 +529,27 @@ app.registerExtension({
const input = theirNode.inputs[link.target_slot];
if (!input) return;
-
- var _widget;
+ let widget;
if (!input.widget) {
if (!(input.type in ComfyWidgets)) return;
- _widget = { "name": input.name, "config": [input.type, {}] }//fake widget
+ widget = { name: input.name, [GET_CONFIG]: () => [input.type, {}] }; //fake widget
} else {
- _widget = input.widget;
+ widget = input.widget;
}
- const widget = _widget;
- const { type, linkType } = getWidgetType(widget.config);
+ const config = widget[GET_CONFIG]?.();
+ if (!config) return;
+
+ const { type } = getWidgetType(config);
// Update our output to restrict to the widget type
- this.outputs[0].type = linkType;
+ this.outputs[0].type = type;
this.outputs[0].name = type;
this.outputs[0].widget = widget;
- this.#createWidget(widget.config, theirNode, widget.name);
+ this.#createWidget(widget[CONFIG] ?? config, theirNode, widget.name, recreating);
}
- #createWidget(inputData, node, widgetName) {
+ #createWidget(inputData, node, widgetName, recreating) {
let type = inputData[0];
if (type instanceof Array) {
@@ -334,7 +560,7 @@ app.registerExtension({
if (type in ComfyWidgets) {
widget = (ComfyWidgets[type](this, "value", inputData, app) || {}).widget;
} else {
- widget = this.addWidget(type, "value", null, () => { }, {});
+ widget = this.addWidget(type, "value", null, () => {}, {});
}
if (node?.widgets && widget) {
@@ -344,8 +570,16 @@ app.registerExtension({
}
}
- if (widget.type === "number" || widget.type === "combo") {
- addValueControlWidget(this, widget, "fixed");
+ if (!inputData?.[1]?.control_after_generate && (widget.type === "number" || widget.type === "combo")) {
+ let control_value = this.widgets_values?.[1];
+ if (!control_value) {
+ control_value = "fixed";
+ }
+ addValueControlWidgets(this, widget, control_value, undefined, inputData);
+ let filter = this.widgets_values?.[2];
+ if(filter && this.widgets.length === 3) {
+ this.widgets[2].value = filter;
+ }
}
// When our value changes, update other widgets to reflect our changes
@@ -358,60 +592,75 @@ app.registerExtension({
return r;
};
- // Grow our node if required
- const sz = this.computeSize();
- if (this.size[0] < sz[0]) {
- this.size[0] = sz[0];
- }
- if (this.size[1] < sz[1]) {
- this.size[1] = sz[1];
+ if (!recreating) {
+ // Grow our node if required
+ const sz = this.computeSize();
+ if (this.size[0] < sz[0]) {
+ this.size[0] = sz[0];
+ }
+ if (this.size[1] < sz[1]) {
+ this.size[1] = sz[1];
+ }
+
+ requestAnimationFrame(() => {
+ if (this.onResize) {
+ this.onResize(this.size);
+ }
+ });
}
+ }
- requestAnimationFrame(() => {
- if (this.onResize) {
- this.onResize(this.size);
- }
- });
+ #recreateWidget() {
+ const values = this.widgets.map((w) => w.value);
+ this.#removeWidgets();
+ this.#onFirstConnection(true);
+ for (let i = 0; i < this.widgets?.length; i++) this.widgets[i].value = values[i];
+ return this.widgets[0];
}
- #isValidConnection(input) {
- // Only allow connections where the configs match
- const config1 = this.outputs[0].widget.config;
- const config2 = input.widget.config;
-
- if (config1[0] instanceof Array) {
- // These checks shouldnt actually be necessary as the types should match
- // but double checking doesn't hurt
-
- // New input isnt a combo
- if (!(config2[0] instanceof Array)) return false;
- // New imput combo has a different size
- if (config1[0].length !== config2[0].length) return false;
- // New input combo has different elements
- if (config1[0].find((v, i) => config2[0][i] !== v)) return false;
- } else if (config1[0] !== config2[0]) {
- // Configs dont match
- return false;
+ #mergeWidgetConfig() {
+ // Merge widget configs if the node has multiple outputs
+ const output = this.outputs[0];
+ const links = output.links;
+
+ const hasConfig = !!output.widget[CONFIG];
+ if (hasConfig) {
+ delete output.widget[CONFIG];
}
- for (const k in config1[1]) {
- if (k !== "default" && k !== 'forceInput') {
- if (config1[1][k] !== config2[1][k]) {
- return false;
- }
+ if (links?.length < 2 && hasConfig) {
+ // Copy the widget options from the source
+ if (links.length) {
+ this.#recreateWidget();
}
+
+ return;
}
- return true;
+ const config1 = output.widget[GET_CONFIG]();
+ const isNumber = config1[0] === "INT" || config1[0] === "FLOAT";
+ if (!isNumber) return;
+
+ for (const linkId of links) {
+ const link = app.graph.links[linkId];
+ if (!link) continue; // Can be null when removing a node
+
+ const theirNode = app.graph.getNodeById(link.target_id);
+ const theirInput = theirNode.inputs[link.target_slot];
+
+ // Call is valid connection so it can merge the configs when validating
+ this.#isValidConnection(theirInput, hasConfig);
+ }
}
- #onLastDisconnect() {
- // We cant remove + re-add the output here as if you drag a link over the same link
- // it removes, then re-adds, causing it to break
- this.outputs[0].type = "*";
- this.outputs[0].name = "connect to widget input";
- delete this.outputs[0].widget;
+ #isValidConnection(input, forceUpdate) {
+ // Only allow connections where the configs match
+ const output = this.outputs[0];
+ const config2 = input.widget[GET_CONFIG]();
+ return !!mergeIfValid.call(this, output, config2, forceUpdate, this.#recreateWidget);
+ }
+ #removeWidgets() {
if (this.widgets) {
// Allow widgets to cleanup
for (const w of this.widgets) {
@@ -422,6 +671,16 @@ app.registerExtension({
this.widgets.length = 0;
}
}
+
+ #onLastDisconnect() {
+ // We cant remove + re-add the output here as if you drag a link over the same link
+ // it removes, then re-adds, causing it to break
+ this.outputs[0].type = "*";
+ this.outputs[0].name = "connect to widget input";
+ delete this.outputs[0].widget;
+
+ this.#removeWidgets();
+ }
}
LiteGraph.registerNodeType(
diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js
index f81c83a8a4c..f571edb30b8 100644
--- a/web/lib/litegraph.core.js
+++ b/web/lib/litegraph.core.js
@@ -2533,7 +2533,7 @@
var w = this.widgets[i];
if(!w)
continue;
- if(w.options && w.options.property && this.properties[ w.options.property ])
+ if(w.options && w.options.property && (this.properties[ w.options.property ] != undefined))
w.value = JSON.parse( JSON.stringify( this.properties[ w.options.property ] ) );
}
if (info.widgets_values) {
@@ -3796,7 +3796,7 @@
out = out || new Float32Array(4);
out[0] = this.pos[0] - 4;
out[1] = this.pos[1] - LiteGraph.NODE_TITLE_HEIGHT;
- out[2] = this.size[0] + 4;
+ out[2] = this.flags.collapsed ? (this._collapsed_width || LiteGraph.NODE_COLLAPSED_WIDTH) : this.size[0] + 4;
out[3] = this.flags.collapsed ? LiteGraph.NODE_TITLE_HEIGHT : this.size[1] + LiteGraph.NODE_TITLE_HEIGHT;
if (this.onBounding) {
@@ -5714,10 +5714,10 @@ LGraphNode.prototype.executeAction = function(action)
* @method enableWebGL
**/
LGraphCanvas.prototype.enableWebGL = function() {
- if (typeof GL === undefined) {
+ if (typeof GL === "undefined") {
throw "litegl.js must be included to use a WebGL canvas";
}
- if (typeof enableWebGLCanvas === undefined) {
+ if (typeof enableWebGLCanvas === "undefined") {
throw "webglCanvas.js must be included to use this feature";
}
@@ -7110,15 +7110,16 @@ LGraphNode.prototype.executeAction = function(action)
}
};
- LGraphCanvas.prototype.copyToClipboard = function() {
+ LGraphCanvas.prototype.copyToClipboard = function(nodes) {
var clipboard_info = {
nodes: [],
links: []
};
var index = 0;
var selected_nodes_array = [];
- for (var i in this.selected_nodes) {
- var node = this.selected_nodes[i];
+ if (!nodes) nodes = this.selected_nodes;
+ for (var i in nodes) {
+ var node = nodes[i];
if (node.clonable === false)
continue;
node._relative_id = index;
@@ -11702,7 +11703,7 @@ LGraphNode.prototype.executeAction = function(action)
default:
iS = 0; // try with first if no name set
}
- if (typeof options.node_from.outputs[iS] !== undefined){
+ if (typeof options.node_from.outputs[iS] !== "undefined"){
if (iS!==false && iS>-1){
options.node_from.connectByType( iS, node, options.node_from.outputs[iS].type );
}
@@ -11730,7 +11731,7 @@ LGraphNode.prototype.executeAction = function(action)
default:
iS = 0; // try with first if no name set
}
- if (typeof options.node_to.inputs[iS] !== undefined){
+ if (typeof options.node_to.inputs[iS] !== "undefined"){
if (iS!==false && iS>-1){
// try connection
options.node_to.connectByTypeOutput(iS,node,options.node_to.inputs[iS].type);
diff --git a/web/scripts/api.js b/web/scripts/api.js
index b1d245d73ff..9aa7528af04 100644
--- a/web/scripts/api.js
+++ b/web/scripts/api.js
@@ -254,9 +254,9 @@ class ComfyApi extends EventTarget {
* Gets the prompt execution history
* @returns Prompt history including node outputs
*/
- async getHistory() {
+ async getHistory(max_items=200) {
try {
- const res = await this.fetchApi("/history");
+ const res = await this.fetchApi(`/history?max_items=${max_items}`);
return { History: Object.values(await res.json()) };
} catch (error) {
console.error(error);
diff --git a/web/scripts/app.js b/web/scripts/app.js
index ba9a438516c..397439bf13c 100644
--- a/web/scripts/app.js
+++ b/web/scripts/app.js
@@ -1,9 +1,28 @@
import { ComfyLogging } from "./logging.js";
-import { ComfyWidgets } from "./widgets.js";
+import { ComfyWidgets, getWidgetType } from "./widgets.js";
import { ComfyUI, $el } from "./ui.js";
import { api } from "./api.js";
import { defaultGraph } from "./defaultGraph.js";
-import { getPngMetadata, importA1111, getLatentMetadata } from "./pnginfo.js";
+import { getPngMetadata, getWebpMetadata, importA1111, getLatentMetadata } from "./pnginfo.js";
+import { addDomClippingSetting } from "./domWidget.js";
+import { createImageHost, calculateImageGrid } from "./ui/imagePreview.js"
+
+export const ANIM_PREVIEW_WIDGET = "$$comfy_animation_preview"
+
+function sanitizeNodeName(string) {
+ let entityMap = {
+ '&': '',
+ '<': '',
+ '>': '',
+ '"': '',
+ "'": '',
+ '`': '',
+ '=': ''
+ };
+ return String(string).replace(/[&<>"'`=]/g, function fromEntityMap (s) {
+ return entityMap[s];
+ });
+}
/**
* @typedef {import("types/comfy").ComfyExtension} ComfyExtension
@@ -389,7 +408,9 @@ export class ComfyApp {
return shiftY;
}
- node.prototype.setSizeForImage = function () {
+ node.prototype.setSizeForImage = function (force) {
+ if(!force && this.animatedImages) return;
+
if (this.inputHeight) {
this.setSize(this.size);
return;
@@ -406,13 +427,20 @@ export class ComfyApp {
let imagesChanged = false
const output = app.nodeOutputs[this.id + ""];
- if (output && output.images) {
+ if (output?.images) {
+ this.animatedImages = output?.animated?.find(Boolean);
if (this.images !== output.images) {
this.images = output.images;
imagesChanged = true;
- imgURLs = imgURLs.concat(output.images.map(params => {
- return api.apiURL("/view?" + new URLSearchParams(params).toString() + app.getPreviewFormatParam());
- }))
+ imgURLs = imgURLs.concat(
+ output.images.map((params) => {
+ return api.apiURL(
+ "/view?" +
+ new URLSearchParams(params).toString() +
+ (this.animatedImages ? "" : app.getPreviewFormatParam())
+ );
+ })
+ );
}
}
@@ -450,8 +478,77 @@ export class ComfyApp {
}
}
- if (this.imgs && this.imgs.length) {
- const canvas = graph.list_of_graphcanvas[0];
+ function calculateGrid(w, h, n) {
+ let columns, rows, cellsize;
+
+ if (w > h) {
+ cellsize = h;
+ columns = Math.ceil(w / cellsize);
+ rows = Math.ceil(n / columns);
+ } else {
+ cellsize = w;
+ rows = Math.ceil(h / cellsize);
+ columns = Math.ceil(n / rows);
+ }
+
+ while (columns * rows < n) {
+ cellsize++;
+ if (w >= h) {
+ columns = Math.ceil(w / cellsize);
+ rows = Math.ceil(n / columns);
+ } else {
+ rows = Math.ceil(h / cellsize);
+ columns = Math.ceil(n / rows);
+ }
+ }
+
+ const cell_size = Math.min(w/columns, h/rows);
+ return {cell_size, columns, rows};
+ }
+
+ function is_all_same_aspect_ratio(imgs) {
+ // assume: imgs.length >= 2
+ let ratio = imgs[0].naturalWidth/imgs[0].naturalHeight;
+
+ for(let i=1; i w.name === ANIM_PREVIEW_WIDGET);
+
+ if(this.animatedImages) {
+ // Instead of using the canvas we'll use a IMG
+ if(widgetIdx > -1) {
+ // Replace content
+ const widget = this.widgets[widgetIdx];
+ widget.options.host.updateImages(this.imgs);
+ } else {
+ const host = createImageHost(this);
+ this.setSizeForImage(true);
+ const widget = this.addDOMWidget(ANIM_PREVIEW_WIDGET, "img", host.el, {
+ host,
+ getHeight: host.getHeight,
+ onDraw: host.onDraw,
+ hideOnZoom: false
+ });
+ widget.serializeValue = () => undefined;
+ widget.options.host.updateImages(this.imgs);
+ }
+ return;
+ }
+
+ if (widgetIdx > -1) {
+ this.widgets[widgetIdx].onRemove?.();
+ this.widgets.splice(widgetIdx, 1);
+ }
+
+ const canvas = app.graph.list_of_graphcanvas[0];
const mouse = canvas.graph_mouse;
if (!canvas.pointer_is_down && this.pointerDown) {
if (mouse[0] === this.pointerDown.pos[0] && mouse[1] === this.pointerDown.pos[1]) {
@@ -460,45 +557,37 @@ export class ComfyApp {
this.pointerDown = null;
}
- let w = this.imgs[0].naturalWidth;
- let h = this.imgs[0].naturalHeight;
let imageIndex = this.imageIndex;
const numImages = this.imgs.length;
if (numImages === 1 && !imageIndex) {
this.imageIndex = imageIndex = 0;
}
- const shiftY = getImageTop(this);
+ const top = getImageTop(this);
+ var shiftY = top;
let dw = this.size[0];
let dh = this.size[1];
dh -= shiftY;
if (imageIndex == null) {
- let best = 0;
- let cellWidth;
- let cellHeight;
- let cols = 0;
- let shiftX = 0;
- for (let c = 1; c <= numImages; c++) {
- const rows = Math.ceil(numImages / c);
- const cW = dw / c;
- const cH = dh / rows;
- const scaleX = cW / w;
- const scaleY = cH / h;
-
- const scale = Math.min(scaleX, scaleY, 1);
- const imageW = w * scale;
- const imageH = h * scale;
- const area = imageW * imageH * numImages;
-
- if (area > best) {
- best = area;
- cellWidth = imageW;
- cellHeight = imageH;
- cols = c;
- shiftX = c * ((cW - imageW) / 2);
- }
+ var cellWidth, cellHeight, shiftX, cell_padding, cols;
+
+ const compact_mode = is_all_same_aspect_ratio(this.imgs);
+ if(!compact_mode) {
+ // use rectangle cell style and border line
+ cell_padding = 2;
+ const { cell_size, columns, rows } = calculateGrid(dw, dh, numImages);
+ cols = columns;
+
+ cellWidth = cell_size;
+ cellHeight = cell_size;
+ shiftX = (dw-cell_size*cols)/2;
+ shiftY = (dh-cell_size*rows)/2 + top;
+ }
+ else {
+ cell_padding = 0;
+ ({ cellWidth, cellHeight, cols, shiftX } = calculateImageGrid(this.imgs, dw, dh));
}
let anyHovered = false;
@@ -542,7 +631,14 @@ export class ComfyApp {
let imgWidth = ratio * img.width;
let imgX = col * cellWidth + shiftX + (cellWidth - imgWidth)/2;
- ctx.drawImage(img, imgX, imgY, imgWidth, imgHeight);
+ ctx.drawImage(img, imgX+cell_padding, imgY+cell_padding, imgWidth-cell_padding*2, imgHeight-cell_padding*2);
+ if(!compact_mode) {
+ // rectangle cell and border line style
+ ctx.strokeStyle = "#8F8F8F";
+ ctx.lineWidth = 1;
+ ctx.strokeRect(x+cell_padding, y+cell_padding, cellWidth-cell_padding*2, cellHeight-cell_padding*2);
+ }
+
ctx.filter = "none";
}
@@ -552,6 +648,9 @@ export class ComfyApp {
}
} else {
// Draw individual
+ let w = this.imgs[imageIndex].naturalWidth;
+ let h = this.imgs[imageIndex].naturalHeight;
+
const scaleX = dw / w;
const scaleY = dh / h;
const scale = Math.min(scaleX, scaleY, 1);
@@ -594,14 +693,14 @@ export class ComfyApp {
};
if (numImages > 1) {
- if (drawButton(x + w - 35, y + h - 35, 30, `${this.imageIndex + 1}/${numImages}`)) {
+ if (drawButton(dw - 40, dh + top - 40, 30, `${this.imageIndex + 1}/${numImages}`)) {
let i = this.imageIndex + 1 >= numImages ? 0 : this.imageIndex + 1;
if (!this.pointerDown || !this.pointerDown.index === i) {
this.pointerDown = { index: i, pos: [...mouse] };
}
}
- if (drawButton(x + w - 35, y + 5, 30, `x`)) {
+ if (drawButton(dw - 40, top + 10, 30, `x`)) {
if (!this.pointerDown || !this.pointerDown.index === null) {
this.pointerDown = { index: null, pos: [...mouse] };
}
@@ -680,7 +779,7 @@ export class ComfyApp {
* Adds a handler on paste that extracts and loads images or workflows from pasted JSON data
*/
#addPasteHandler() {
- document.addEventListener("paste", (e) => {
+ document.addEventListener("paste", async (e) => {
// ctrl+shift+v is used to paste nodes with connections
// this is handled by litegraph
if(this.shiftDown) return;
@@ -728,7 +827,7 @@ export class ComfyApp {
}
if (workflow && workflow.version && workflow.nodes && workflow.extra) {
- this.loadGraphData(workflow);
+ await this.loadGraphData(workflow);
}
else {
if (e.target.type === "text" || e.target.type === "textarea") {
@@ -861,6 +960,16 @@ export class ComfyApp {
block_default = true;
}
+ // Alt + C collapse/uncollapse
+ if (e.key === 'c' && e.altKey) {
+ if (this.selected_nodes) {
+ for (var i in this.selected_nodes) {
+ this.selected_nodes[i].collapse()
+ }
+ }
+ block_default = true;
+ }
+
// Ctrl+C Copy
if ((e.key === 'c') && (e.metaKey || e.ctrlKey)) {
// Trigger onCopy
@@ -1068,7 +1177,19 @@ export class ComfyApp {
});
api.addEventListener("executed", ({ detail }) => {
- this.nodeOutputs[detail.node] = detail.output;
+ const output = this.nodeOutputs[detail.node];
+ if (detail.merge && output) {
+ for (const k in detail.output ?? {}) {
+ const v = output[k];
+ if (v instanceof Array) {
+ output[k] = v.concat(detail.output[k]);
+ } else {
+ output[k] = detail.output[k];
+ }
+ }
+ } else {
+ this.nodeOutputs[detail.node] = detail.output;
+ }
const node = this.graph.getNodeById(detail.node);
if (node) {
if (node.onExecuted)
@@ -1114,6 +1235,40 @@ export class ComfyApp {
});
}
+ #addConfigureHandler() {
+ const app = this;
+ const configure = LGraph.prototype.configure;
+ // Flag that the graph is configuring to prevent nodes from running checks while its still loading
+ LGraph.prototype.configure = function () {
+ app.configuringGraph = true;
+ try {
+ return configure.apply(this, arguments);
+ } finally {
+ app.configuringGraph = false;
+ }
+ };
+ }
+
+ #addAfterConfigureHandler() {
+ const app = this;
+ const onConfigure = app.graph.onConfigure;
+ app.graph.onConfigure = function () {
+ // Fire callbacks before the onConfigure, this is used by widget inputs to setup the config
+ for (const node of app.graph._nodes) {
+ node.onGraphConfigured?.();
+ }
+
+ const r = onConfigure?.apply(this, arguments);
+
+ // Fire after onConfigure, used by primitves to generate widget using input nodes config
+ for (const node of app.graph._nodes) {
+ node.onAfterGraphConfigured?.();
+ }
+
+ return r;
+ };
+ }
+
/**
* Loads all extensions from the API into the window in parallel
*/
@@ -1145,10 +1300,16 @@ export class ComfyApp {
canvasEl.tabIndex = "1";
document.body.prepend(canvasEl);
+ addDomClippingSetting();
this.#addProcessMouseHandler();
this.#addProcessKeyHandler();
+ this.#addConfigureHandler();
+ this.#addApiUpdateHandlers();
this.graph = new LGraph();
+
+ this.#addAfterConfigureHandler();
+
const canvas = (this.canvas = new LGraphCanvas(canvasEl, this.graph));
this.ctx = canvasEl.getContext("2d");
@@ -1180,7 +1341,7 @@ export class ComfyApp {
const json = localStorage.getItem("workflow");
if (json) {
const workflow = JSON.parse(json);
- this.loadGraphData(workflow);
+ await this.loadGraphData(workflow);
restored = true;
}
} catch (err) {
@@ -1189,7 +1350,7 @@ export class ComfyApp {
// We failed to restore a workflow so load the default
if (!restored) {
- this.loadGraphData();
+ await this.loadGraphData();
}
// Save current workflow automatically
@@ -1197,7 +1358,6 @@ export class ComfyApp {
this.#addDrawNodeHandler();
this.#addDrawGroupsHandler();
- this.#addApiUpdateHandlers();
this.#addDropHandler();
this.#addCopyHandler();
this.#addPasteHandler();
@@ -1217,11 +1377,86 @@ export class ComfyApp {
await this.#invokeExtensionsAsync("registerCustomNodes");
}
+ async registerNodeDef(nodeId, nodeData) {
+ const self = this;
+ const node = Object.assign(
+ function ComfyNode() {
+ var inputs = nodeData["input"]["required"];
+ if (nodeData["input"]["optional"] != undefined) {
+ inputs = Object.assign({}, nodeData["input"]["required"], nodeData["input"]["optional"]);
+ }
+ const config = { minWidth: 1, minHeight: 1 };
+ for (const inputName in inputs) {
+ const inputData = inputs[inputName];
+ const type = inputData[0];
+ const extraInfo = {};
+
+ let widgetCreated = true;
+ const widgetType = getWidgetType(inputData, inputName);
+ if(widgetType) {
+ if(widgetType === "COMBO") {
+ Object.assign(config, self.widgets.COMBO(this, inputName, inputData, app) || {});
+ } else {
+ Object.assign(config, self.widgets[widgetType](this, inputName, inputData, app) || {});
+ }
+ } else {
+ // Node connection inputs
+ if (inputData[1]?.multiple) {
+ extraInfo.multiple = true;
+ extraInfo.shape = LiteGraph.GRID_SHAPE;
+ }
+ this.addInput(inputName, type, extraInfo);
+ widgetCreated = false;
+ }
+
+ if(widgetCreated && inputData[1]?.forceInput && config?.widget) {
+ if (!config.widget.options) config.widget.options = {};
+ config.widget.options.forceInput = inputData[1].forceInput;
+ }
+ if(widgetCreated && inputData[1]?.defaultInput && config?.widget) {
+ if (!config.widget.options) config.widget.options = {};
+ config.widget.options.defaultInput = inputData[1].defaultInput;
+ }
+ }
+
+ for (const o in nodeData["output"]) {
+ let output = nodeData["output"][o];
+ if(output instanceof Array) output = "COMBO";
+ const outputName = nodeData["output_name"][o] || output;
+ const outputShape = nodeData["output_is_list"][o] ? LiteGraph.GRID_SHAPE : LiteGraph.CIRCLE_SHAPE ;
+ this.addOutput(outputName, output, { shape: outputShape });
+ }
+
+ const s = this.computeSize();
+ s[0] = Math.max(config.minWidth, s[0] * 1.5);
+ s[1] = Math.max(config.minHeight, s[1]);
+ this.size = s;
+ this.serialize_widgets = true;
+
+ app.#invokeExtensionsAsync("nodeCreated", this);
+ },
+ {
+ title: nodeData.display_name || nodeData.name,
+ comfyClass: nodeData.name,
+ nodeData
+ }
+ );
+ node.prototype.comfyClass = nodeData.name;
+
+ this.#addNodeContextMenuHandler(node);
+ this.#addDrawBackgroundHandler(node, app);
+ this.#addNodeKeyHandler(node);
+
+ await this.#invokeExtensionsAsync("beforeRegisterNodeDef", node, nodeData);
+ LiteGraph.registerNodeType(nodeId, node);
+ node.category = nodeData.category;
+ }
+
async registerNodesFromDefs(defs) {
await this.#invokeExtensionsAsync("addCustomNodeDefs", defs);
// Generate list of known widgets
- const widgets = Object.assign(
+ this.widgets = Object.assign(
{},
ComfyWidgets,
...(await this.#invokeExtensionsAsync("getCustomWidgets")).filter(Boolean)
@@ -1229,109 +1464,96 @@ export class ComfyApp {
// Register a node for each definition
for (const nodeId in defs) {
- const nodeData = defs[nodeId];
- const node = Object.assign(
- function ComfyNode() {
- var inputs = nodeData["input"]["required"];
- if (nodeData["input"]["optional"] != undefined){
- inputs = Object.assign({}, nodeData["input"]["required"], nodeData["input"]["optional"])
- }
- const config = { minWidth: 1, minHeight: 1 };
- for (const inputName in inputs) {
- const inputData = inputs[inputName];
- const type = inputData[0];
- const extraInfo = {};
-
- let widgetCreated = true;
- if (Array.isArray(type)) {
- // Enums
- Object.assign(config, widgets.COMBO(this, inputName, inputData, app) || {});
- } else if (`${type}:${inputName}` in widgets) {
- // Support custom widgets by Type:Name
- Object.assign(config, widgets[`${type}:${inputName}`](this, inputName, inputData, app) || {});
- } else if (type in widgets) {
- // Standard type widgets
- Object.assign(config, widgets[type](this, inputName, inputData, app) || {});
- } else {
- // Node connection inputs
- if (inputData[1]?.multiple) {
- extraInfo.multiple = true;
- extraInfo.shape = LiteGraph.GRID_SHAPE;
- }
- this.addInput(inputName, type, extraInfo);
- widgetCreated = false;
- }
+ this.registerNodeDef(nodeId, defs[nodeId]);
+ }
+ }
- if(widgetCreated && inputData[1]?.forceInput && config?.widget) {
- if (!config.widget.options) config.widget.options = {};
- config.widget.options.forceInput = inputData[1].forceInput;
- }
- if(widgetCreated && inputData[1]?.defaultInput && config?.widget) {
- if (!config.widget.options) config.widget.options = {};
- config.widget.options.defaultInput = inputData[1].defaultInput;
- }
- }
+ loadTemplateData(templateData) {
+ if (!templateData?.templates) {
+ return;
+ }
- for (const o in nodeData["output"]) {
- const output = nodeData["output"][o];
- const outputName = nodeData["output_name"][o] || output;
- const outputShape = nodeData["output_is_list"][o] ? LiteGraph.GRID_SHAPE : LiteGraph.CIRCLE_SHAPE ;
- this.addOutput(outputName, output, { shape: outputShape });
- }
+ const old = localStorage.getItem("litegrapheditor_clipboard");
- const s = this.computeSize();
- s[0] = Math.max(config.minWidth, s[0] * 1.5);
- s[1] = Math.max(config.minHeight, s[1]);
- this.size = s;
- this.serialize_widgets = true;
-
- app.#invokeExtensionsAsync("nodeCreated", this);
- },
- {
- title: nodeData.display_name || nodeData.name,
- comfyClass: nodeData.name,
- }
- );
- node.prototype.comfyClass = nodeData.name;
+ var maxY, nodeBottom, node;
- this.#addNodeContextMenuHandler(node);
- this.#addDrawBackgroundHandler(node, app);
- this.#addNodeKeyHandler(node);
+ for (const template of templateData.templates) {
+ if (!template?.data) {
+ continue;
+ }
- await this.#invokeExtensionsAsync("beforeRegisterNodeDef", node, nodeData);
- LiteGraph.registerNodeType(nodeId, node);
- node.category = nodeData.category;
+ localStorage.setItem("litegrapheditor_clipboard", template.data);
+ app.canvas.pasteFromClipboard();
+
+ // Move mouse position down to paste the next template below
+
+ maxY = false;
+
+ for (const i in app.canvas.selected_nodes) {
+ node = app.canvas.selected_nodes[i];
+
+ nodeBottom = node.pos[1] + node.size[1];
+
+ if (maxY === false || nodeBottom > maxY) {
+ maxY = nodeBottom;
+ }
+ }
+
+ app.canvas.graph_mouse[1] = maxY + 50;
}
+
+ localStorage.setItem("litegrapheditor_clipboard", old);
+ }
+
+ showMissingNodesError(missingNodeTypes, hasAddedNodes = true) {
+ this.ui.dialog.show(
+ $el("div", [
+ $el("span", { textContent: "When loading the graph, the following node types were not found: " }),
+ $el(
+ "ul",
+ Array.from(new Set(missingNodeTypes)).map((t) => $el("li", { textContent: t }))
+ ),
+ ...(hasAddedNodes ? [$el("span", { textContent: "Nodes that have failed to load will show as red on the graph." })] : []),
+ ])
+ );
+ this.logging.addEntry("Comfy.App", "warn", {
+ MissingNodes: missingNodeTypes,
+ });
}
/**
* Populates the graph with the specified workflow data
* @param {*} graphData A serialized graph object
*/
- loadGraphData(graphData) {
+ async loadGraphData(graphData) {
this.clean();
let reset_invalid_values = false;
if (!graphData) {
- if (typeof structuredClone === "undefined")
- {
- graphData = JSON.parse(JSON.stringify(defaultGraph));
- }else
- {
- graphData = structuredClone(defaultGraph);
- }
+ graphData = defaultGraph;
reset_invalid_values = true;
}
+ if (typeof structuredClone === "undefined")
+ {
+ graphData = JSON.parse(JSON.stringify(graphData));
+ }else
+ {
+ graphData = structuredClone(graphData);
+ }
+
const missingNodeTypes = [];
+ await this.#invokeExtensionsAsync("beforeConfigureGraph", graphData, missingNodeTypes);
for (let n of graphData.nodes) {
// Patch T2IAdapterLoader to ControlNetLoader since they are the same node now
if (n.type == "T2IAdapterLoader") n.type = "ControlNetLoader";
if (n.type == "ConditioningAverage ") n.type = "ConditioningAverage"; //typo fix
+ if (n.type == "SDV_img2vid_Conditioning") n.type = "SVD_img2vid_Conditioning"; //typo fix
// Find missing node types
if (!(n.type in LiteGraph.registered_node_types)) {
missingNodeTypes.push(n.type);
+ n.type = sanitizeNodeName(n.type);
}
}
@@ -1421,14 +1643,7 @@ export class ComfyApp {
}
if (missingNodeTypes.length) {
- this.ui.dialog.show(
- `When loading the graph, the following node types were not found: ${Array.from(new Set(missingNodeTypes)).map(
- (t) => `- ${t}
`
- ).join("")}
Nodes that have failed to load will show as red on the graph.`
- );
- this.logging.addEntry("Comfy.App", "warn", {
- MissingNodes: missingNodeTypes,
- });
+ this.showMissingNodesError(missingNodeTypes);
}
}
@@ -1437,86 +1652,98 @@ export class ComfyApp {
* @returns The workflow and node links
*/
async graphToPrompt() {
+ for (const outerNode of this.graph.computeExecutionOrder(false)) {
+ const innerNodes = outerNode.getInnerNodes ? outerNode.getInnerNodes() : [outerNode];
+ for (const node of innerNodes) {
+ if (node.isVirtualNode) {
+ // Don't serialize frontend only nodes but let them make changes
+ if (node.applyToGraph) {
+ node.applyToGraph();
+ }
+ }
+ }
+ }
+
const workflow = this.graph.serialize();
const output = {};
// Process nodes in order of execution
- for (const node of this.graph.computeExecutionOrder(false)) {
- const n = workflow.nodes.find((n) => n.id === node.id);
-
- if (node.isVirtualNode) {
- // Don't serialize frontend only nodes but let them make changes
- if (node.applyToGraph) {
- node.applyToGraph(workflow);
+ for (const outerNode of this.graph.computeExecutionOrder(false)) {
+ const innerNodes = outerNode.getInnerNodes ? outerNode.getInnerNodes() : [outerNode];
+ for (const node of innerNodes) {
+ if (node.isVirtualNode) {
+ continue;
}
- continue;
- }
- if (node.mode === 2 || node.mode === 4) {
- // Don't serialize muted nodes
- continue;
- }
+ if (node.mode === 2 || node.mode === 4) {
+ // Don't serialize muted nodes
+ continue;
+ }
- const inputs = {};
- const widgets = node.widgets;
+ const inputs = {};
+ const widgets = node.widgets;
- // Store all widget values
- if (widgets) {
- for (const i in widgets) {
- const widget = widgets[i];
- if (!widget.options || widget.options.serialize !== false) {
- inputs[widget.name] = widget.serializeValue ? await widget.serializeValue(n, i) : widget.value;
+ // Store all widget values
+ if (widgets) {
+ for (const i in widgets) {
+ const widget = widgets[i];
+ if (!widget.options || widget.options.serialize !== false) {
+ inputs[widget.name] = widget.serializeValue ? await widget.serializeValue(node, i) : widget.value;
+ }
}
}
- }
- // Store all node links
- for (let i in node.inputs) {
- let parent = node.getInputNode(i);
- if (parent) {
- let link = node.getInputLink(i);
- while (parent.mode === 4 || parent.isVirtualNode) {
- let found = false;
- if (parent.isVirtualNode) {
- link = parent.getInputLink(link.origin_slot);
- if (link) {
- parent = parent.getInputNode(link.target_slot);
- if (parent) {
- found = true;
+ // Store all node links
+ for (let i in node.inputs) {
+ let parent = node.getInputNode(i);
+ if (parent) {
+ let link = node.getInputLink(i);
+ while (parent.mode === 4 || parent.isVirtualNode) {
+ let found = false;
+ if (parent.isVirtualNode) {
+ link = parent.getInputLink(link.origin_slot);
+ if (link) {
+ parent = parent.getInputNode(link.target_slot);
+ if (parent) {
+ found = true;
+ }
}
- }
- } else if (link && parent.mode === 4) {
- let all_inputs = [link.origin_slot];
- if (parent.inputs) {
- all_inputs = all_inputs.concat(Object.keys(parent.inputs))
- for (let parent_input in all_inputs) {
- parent_input = all_inputs[parent_input];
- if (parent.inputs[parent_input].type === node.inputs[i].type) {
- link = parent.getInputLink(parent_input);
- if (link) {
- parent = parent.getInputNode(parent_input);
+ } else if (link && parent.mode === 4) {
+ let all_inputs = [link.origin_slot];
+ if (parent.inputs) {
+ all_inputs = all_inputs.concat(Object.keys(parent.inputs))
+ for (let parent_input in all_inputs) {
+ parent_input = all_inputs[parent_input];
+ if (parent.inputs[parent_input]?.type === node.inputs[i].type) {
+ link = parent.getInputLink(parent_input);
+ if (link) {
+ parent = parent.getInputNode(parent_input);
+ }
+ found = true;
+ break;
}
- found = true;
- break;
}
}
}
- }
- if (!found) {
- break;
+ if (!found) {
+ break;
+ }
}
- }
- if (link) {
- inputs[node.inputs[i].name] = [String(link.origin_id), parseInt(link.origin_slot)];
+ if (link) {
+ if (parent?.updateLink) {
+ link = parent.updateLink(link);
+ }
+ inputs[node.inputs[i].name] = [String(link.origin_id), parseInt(link.origin_slot)];
+ }
}
}
- }
- output[String(node.id)] = {
- inputs,
- class_type: node.comfyClass,
- };
+ output[String(node.id)] = {
+ inputs,
+ class_type: node.comfyClass,
+ };
+ }
}
// Remove inputs connected to removed nodes
@@ -1636,25 +1863,86 @@ export class ComfyApp {
const pngInfo = await getPngMetadata(file);
if (pngInfo) {
if (pngInfo.workflow) {
- this.loadGraphData(JSON.parse(pngInfo.workflow));
+ await this.loadGraphData(JSON.parse(pngInfo.workflow));
} else if (pngInfo.parameters) {
importA1111(this.graph, pngInfo.parameters);
}
}
+ } else if (file.type === "image/webp") {
+ const pngInfo = await getWebpMetadata(file);
+ if (pngInfo) {
+ if (pngInfo.workflow) {
+ this.loadGraphData(JSON.parse(pngInfo.workflow));
+ } else if (pngInfo.Workflow) {
+ this.loadGraphData(JSON.parse(pngInfo.Workflow)); // Support loading workflows from that webp custom node.
+ }
+ }
} else if (file.type === "application/json" || file.name?.endsWith(".json")) {
const reader = new FileReader();
- reader.onload = () => {
- this.loadGraphData(JSON.parse(reader.result));
+ reader.onload = async () => {
+ const jsonContent = JSON.parse(reader.result);
+ if (jsonContent?.templates) {
+ this.loadTemplateData(jsonContent);
+ } else if(this.isApiJson(jsonContent)) {
+ this.loadApiJson(jsonContent);
+ } else {
+ await this.loadGraphData(jsonContent);
+ }
};
reader.readAsText(file);
} else if (file.name?.endsWith(".latent") || file.name?.endsWith(".safetensors")) {
const info = await getLatentMetadata(file);
if (info.workflow) {
- this.loadGraphData(JSON.parse(info.workflow));
+ await this.loadGraphData(JSON.parse(info.workflow));
}
}
}
+ isApiJson(data) {
+ return Object.values(data).every((v) => v.class_type);
+ }
+
+ loadApiJson(apiData) {
+ const missingNodeTypes = Object.values(apiData).filter((n) => !LiteGraph.registered_node_types[n.class_type]);
+ if (missingNodeTypes.length) {
+ this.showMissingNodesError(missingNodeTypes.map(t => t.class_type), false);
+ return;
+ }
+
+ const ids = Object.keys(apiData);
+ app.graph.clear();
+ for (const id of ids) {
+ const data = apiData[id];
+ const node = LiteGraph.createNode(data.class_type);
+ node.id = isNaN(+id) ? id : +id;
+ graph.add(node);
+ }
+
+ for (const id of ids) {
+ const data = apiData[id];
+ const node = app.graph.getNodeById(id);
+ for (const input in data.inputs ?? {}) {
+ const value = data.inputs[input];
+ if (value instanceof Array) {
+ const [fromId, fromSlot] = value;
+ const fromNode = app.graph.getNodeById(fromId);
+ const toSlot = node.inputs?.findIndex((inp) => inp.name === input);
+ if (toSlot !== -1) {
+ fromNode.connect(fromSlot, node, toSlot);
+ }
+ } else {
+ const widget = node.widgets?.find((w) => w.name === input);
+ if (widget) {
+ widget.value = value;
+ widget.callback?.(value);
+ }
+ }
+ }
+ }
+
+ app.graph.arrange();
+ }
+
/**
* Registers a Comfy web extension with the app
* @param {ComfyExtension} extension
@@ -1675,13 +1963,21 @@ export class ComfyApp {
async refreshComboInNodes() {
const defs = await api.getNodeDefs();
+ for(const nodeId in LiteGraph.registered_node_types) {
+ const node = LiteGraph.registered_node_types[nodeId];
+ const nodeDef = defs[nodeId];
+ if(!nodeDef) continue;
+
+ node.nodeData = nodeDef;
+ }
+
for(let nodeNum in this.graph._nodes) {
const node = this.graph._nodes[nodeNum];
-
const def = defs[node.type];
- // HOTFIX: The current patch is designed to prevent the rest of the code from breaking due to primitive nodes,
- // and additional work is needed to consider the primitive logic in the refresh logic.
+ // Allow primitive nodes to handle refresh
+ node.refreshComboInNode?.(defs);
+
if(!def)
continue;
diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js
new file mode 100644
index 00000000000..37d26f3c5ef
--- /dev/null
+++ b/web/scripts/domWidget.js
@@ -0,0 +1,322 @@
+import { app, ANIM_PREVIEW_WIDGET } from "./app.js";
+
+const SIZE = Symbol();
+
+function intersect(a, b) {
+ const x = Math.max(a.x, b.x);
+ const num1 = Math.min(a.x + a.width, b.x + b.width);
+ const y = Math.max(a.y, b.y);
+ const num2 = Math.min(a.y + a.height, b.y + b.height);
+ if (num1 >= x && num2 >= y) return [x, y, num1 - x, num2 - y];
+ else return null;
+}
+
+function getClipPath(node, element, elRect) {
+ const selectedNode = Object.values(app.canvas.selected_nodes)[0];
+ if (selectedNode && selectedNode !== node) {
+ const MARGIN = 7;
+ const scale = app.canvas.ds.scale;
+
+ const bounding = selectedNode.getBounding();
+ const intersection = intersect(
+ { x: elRect.x / scale, y: elRect.y / scale, width: elRect.width / scale, height: elRect.height / scale },
+ {
+ x: selectedNode.pos[0] + app.canvas.ds.offset[0] - MARGIN,
+ y: selectedNode.pos[1] + app.canvas.ds.offset[1] - LiteGraph.NODE_TITLE_HEIGHT - MARGIN,
+ width: bounding[2] + MARGIN + MARGIN,
+ height: bounding[3] + MARGIN + MARGIN,
+ }
+ );
+
+ if (!intersection) {
+ return "";
+ }
+
+ const widgetRect = element.getBoundingClientRect();
+ const clipX = intersection[0] - widgetRect.x / scale + "px";
+ const clipY = intersection[1] - widgetRect.y / scale + "px";
+ const clipWidth = intersection[2] + "px";
+ const clipHeight = intersection[3] + "px";
+ const path = `polygon(0% 0%, 0% 100%, ${clipX} 100%, ${clipX} ${clipY}, calc(${clipX} + ${clipWidth}) ${clipY}, calc(${clipX} + ${clipWidth}) calc(${clipY} + ${clipHeight}), ${clipX} calc(${clipY} + ${clipHeight}), ${clipX} 100%, 100% 100%, 100% 0%)`;
+ return path;
+ }
+ return "";
+}
+
+function computeSize(size) {
+ if (this.widgets?.[0]?.last_y == null) return;
+
+ let y = this.widgets[0].last_y;
+ let freeSpace = size[1] - y;
+
+ let widgetHeight = 0;
+ let dom = [];
+ for (const w of this.widgets) {
+ if (w.type === "converted-widget") {
+ // Ignore
+ delete w.computedHeight;
+ } else if (w.computeSize) {
+ widgetHeight += w.computeSize()[1] + 4;
+ } else if (w.element) {
+ // Extract DOM widget size info
+ const styles = getComputedStyle(w.element);
+ let minHeight = w.options.getMinHeight?.() ?? parseInt(styles.getPropertyValue("--comfy-widget-min-height"));
+ let maxHeight = w.options.getMaxHeight?.() ?? parseInt(styles.getPropertyValue("--comfy-widget-max-height"));
+
+ let prefHeight = w.options.getHeight?.() ?? styles.getPropertyValue("--comfy-widget-height");
+ if (prefHeight.endsWith?.("%")) {
+ prefHeight = size[1] * (parseFloat(prefHeight.substring(0, prefHeight.length - 1)) / 100);
+ } else {
+ prefHeight = parseInt(prefHeight);
+ if (isNaN(minHeight)) {
+ minHeight = prefHeight;
+ }
+ }
+ if (isNaN(minHeight)) {
+ minHeight = 50;
+ }
+ if (!isNaN(maxHeight)) {
+ if (!isNaN(prefHeight)) {
+ prefHeight = Math.min(prefHeight, maxHeight);
+ } else {
+ prefHeight = maxHeight;
+ }
+ }
+ dom.push({
+ minHeight,
+ prefHeight,
+ w,
+ });
+ } else {
+ widgetHeight += LiteGraph.NODE_WIDGET_HEIGHT + 4;
+ }
+ }
+
+ freeSpace -= widgetHeight;
+
+ // Calculate sizes with all widgets at their min height
+ const prefGrow = []; // Nodes that want to grow to their prefd size
+ const canGrow = []; // Nodes that can grow to auto size
+ let growBy = 0;
+ for (const d of dom) {
+ freeSpace -= d.minHeight;
+ if (isNaN(d.prefHeight)) {
+ canGrow.push(d);
+ d.w.computedHeight = d.minHeight;
+ } else {
+ const diff = d.prefHeight - d.minHeight;
+ if (diff > 0) {
+ prefGrow.push(d);
+ growBy += diff;
+ d.diff = diff;
+ } else {
+ d.w.computedHeight = d.minHeight;
+ }
+ }
+ }
+
+ if (this.imgs && !this.widgets.find((w) => w.name === ANIM_PREVIEW_WIDGET)) {
+ // Allocate space for image
+ freeSpace -= 220;
+ }
+
+ if (freeSpace < 0) {
+ // Not enough space for all widgets so we need to grow
+ size[1] -= freeSpace;
+ this.graph.setDirtyCanvas(true);
+ } else {
+ // Share the space between each
+ const growDiff = freeSpace - growBy;
+ if (growDiff > 0) {
+ // All pref sizes can be fulfilled
+ freeSpace = growDiff;
+ for (const d of prefGrow) {
+ d.w.computedHeight = d.prefHeight;
+ }
+ } else {
+ // We need to grow evenly
+ const shared = -growDiff / prefGrow.length;
+ for (const d of prefGrow) {
+ d.w.computedHeight = d.prefHeight - shared;
+ }
+ freeSpace = 0;
+ }
+
+ if (freeSpace > 0 && canGrow.length) {
+ // Grow any that are auto height
+ const shared = freeSpace / canGrow.length;
+ for (const d of canGrow) {
+ d.w.computedHeight += shared;
+ }
+ }
+ }
+
+ // Position each of the widgets
+ for (const w of this.widgets) {
+ w.y = y;
+ if (w.computedHeight) {
+ y += w.computedHeight;
+ } else if (w.computeSize) {
+ y += w.computeSize()[1] + 4;
+ } else {
+ y += LiteGraph.NODE_WIDGET_HEIGHT + 4;
+ }
+ }
+}
+
+// Override the compute visible nodes function to allow us to hide/show DOM elements when the node goes offscreen
+const elementWidgets = new Set();
+const computeVisibleNodes = LGraphCanvas.prototype.computeVisibleNodes;
+LGraphCanvas.prototype.computeVisibleNodes = function () {
+ const visibleNodes = computeVisibleNodes.apply(this, arguments);
+ for (const node of app.graph._nodes) {
+ if (elementWidgets.has(node)) {
+ const hidden = visibleNodes.indexOf(node) === -1;
+ for (const w of node.widgets) {
+ if (w.element) {
+ w.element.hidden = hidden;
+ if (hidden) {
+ w.options.onHide?.(w);
+ }
+ }
+ }
+ }
+ }
+
+ return visibleNodes;
+};
+
+let enableDomClipping = true;
+
+export function addDomClippingSetting() {
+ app.ui.settings.addSetting({
+ id: "Comfy.DOMClippingEnabled",
+ name: "Enable DOM element clipping (enabling may reduce performance)",
+ type: "boolean",
+ defaultValue: enableDomClipping,
+ onChange(value) {
+ enableDomClipping = !!value;
+ },
+ });
+}
+
+LGraphNode.prototype.addDOMWidget = function (name, type, element, options) {
+ options = { hideOnZoom: true, selectOn: ["focus", "click"], ...options };
+
+ if (!element.parentElement) {
+ document.body.append(element);
+ }
+
+ let mouseDownHandler;
+ if (element.blur) {
+ mouseDownHandler = (event) => {
+ if (!element.contains(event.target)) {
+ element.blur();
+ }
+ };
+ document.addEventListener("mousedown", mouseDownHandler);
+ }
+
+ const widget = {
+ type,
+ name,
+ get value() {
+ return options.getValue?.() ?? undefined;
+ },
+ set value(v) {
+ options.setValue?.(v);
+ widget.callback?.(widget.value);
+ },
+ draw: function (ctx, node, widgetWidth, y, widgetHeight) {
+ if (widget.computedHeight == null) {
+ computeSize.call(node, node.size);
+ }
+
+ const hidden =
+ node.flags?.collapsed ||
+ (!!options.hideOnZoom && app.canvas.ds.scale < 0.5) ||
+ widget.computedHeight <= 0 ||
+ widget.type === "converted-widget";
+ element.hidden = hidden;
+ element.style.display = hidden ? "none" : null;
+ if (hidden) {
+ widget.options.onHide?.(widget);
+ return;
+ }
+
+ const margin = 10;
+ const elRect = ctx.canvas.getBoundingClientRect();
+ const transform = new DOMMatrix()
+ .scaleSelf(elRect.width / ctx.canvas.width, elRect.height / ctx.canvas.height)
+ .multiplySelf(ctx.getTransform())
+ .translateSelf(margin, margin + y);
+
+ const scale = new DOMMatrix().scaleSelf(transform.a, transform.d);
+
+ Object.assign(element.style, {
+ transformOrigin: "0 0",
+ transform: scale,
+ left: `${transform.a + transform.e}px`,
+ top: `${transform.d + transform.f}px`,
+ width: `${widgetWidth - margin * 2}px`,
+ height: `${(widget.computedHeight ?? 50) - margin * 2}px`,
+ position: "absolute",
+ zIndex: app.graph._nodes.indexOf(node),
+ });
+
+ if (enableDomClipping) {
+ element.style.clipPath = getClipPath(node, element, elRect);
+ element.style.willChange = "clip-path";
+ }
+
+ this.options.onDraw?.(widget);
+ },
+ element,
+ options,
+ onRemove() {
+ if (mouseDownHandler) {
+ document.removeEventListener("mousedown", mouseDownHandler);
+ }
+ element.remove();
+ },
+ };
+
+ for (const evt of options.selectOn) {
+ element.addEventListener(evt, () => {
+ app.canvas.selectNode(this);
+ app.canvas.bringToFront(this);
+ });
+ }
+
+ this.addCustomWidget(widget);
+ elementWidgets.add(this);
+
+ const collapse = this.collapse;
+ this.collapse = function() {
+ collapse.apply(this, arguments);
+ if(this.flags?.collapsed) {
+ element.hidden = true;
+ element.style.display = "none";
+ }
+ }
+
+ const onRemoved = this.onRemoved;
+ this.onRemoved = function () {
+ element.remove();
+ elementWidgets.delete(this);
+ onRemoved?.apply(this, arguments);
+ };
+
+ if (!this[SIZE]) {
+ this[SIZE] = true;
+ const onResize = this.onResize;
+ this.onResize = function (size) {
+ options.beforeResize?.call(widget, this);
+ computeSize.call(this, size);
+ onResize?.apply(this, arguments);
+ options.afterResize?.call(widget, this);
+ };
+ }
+
+ return widget;
+};
diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js
index c5293dfa332..83a4ebc86c4 100644
--- a/web/scripts/pnginfo.js
+++ b/web/scripts/pnginfo.js
@@ -24,7 +24,7 @@ export function getPngMetadata(file) {
const length = dataView.getUint32(offset);
// Get the chunk type
const type = String.fromCharCode(...pngData.slice(offset + 4, offset + 8));
- if (type === "tEXt") {
+ if (type === "tEXt" || type == "comf") {
// Get the keyword
let keyword_end = offset + 8;
while (pngData[keyword_end] !== 0) {
@@ -47,6 +47,105 @@ export function getPngMetadata(file) {
});
}
+function parseExifData(exifData) {
+ // Check for the correct TIFF header (0x4949 for little-endian or 0x4D4D for big-endian)
+ const isLittleEndian = new Uint16Array(exifData.slice(0, 2))[0] === 0x4949;
+
+ // Function to read 16-bit and 32-bit integers from binary data
+ function readInt(offset, isLittleEndian, length) {
+ let arr = exifData.slice(offset, offset + length)
+ if (length === 2) {
+ return new DataView(arr.buffer, arr.byteOffset, arr.byteLength).getUint16(0, isLittleEndian);
+ } else if (length === 4) {
+ return new DataView(arr.buffer, arr.byteOffset, arr.byteLength).getUint32(0, isLittleEndian);
+ }
+ }
+
+ // Read the offset to the first IFD (Image File Directory)
+ const ifdOffset = readInt(4, isLittleEndian, 4);
+
+ function parseIFD(offset) {
+ const numEntries = readInt(offset, isLittleEndian, 2);
+ const result = {};
+
+ for (let i = 0; i < numEntries; i++) {
+ const entryOffset = offset + 2 + i * 12;
+ const tag = readInt(entryOffset, isLittleEndian, 2);
+ const type = readInt(entryOffset + 2, isLittleEndian, 2);
+ const numValues = readInt(entryOffset + 4, isLittleEndian, 4);
+ const valueOffset = readInt(entryOffset + 8, isLittleEndian, 4);
+
+ // Read the value(s) based on the data type
+ let value;
+ if (type === 2) {
+ // ASCII string
+ value = String.fromCharCode(...exifData.slice(valueOffset, valueOffset + numValues - 1));
+ }
+
+ result[tag] = value;
+ }
+
+ return result;
+ }
+
+ // Parse the first IFD
+ const ifdData = parseIFD(ifdOffset);
+ return ifdData;
+}
+
+function splitValues(input) {
+ var output = {};
+ for (var key in input) {
+ var value = input[key];
+ var splitValues = value.split(':', 2);
+ output[splitValues[0]] = splitValues[1];
+ }
+ return output;
+}
+
+export function getWebpMetadata(file) {
+ return new Promise((r) => {
+ const reader = new FileReader();
+ reader.onload = (event) => {
+ const webp = new Uint8Array(event.target.result);
+ const dataView = new DataView(webp.buffer);
+
+ // Check that the WEBP signature is present
+ if (dataView.getUint32(0) !== 0x52494646 || dataView.getUint32(8) !== 0x57454250) {
+ console.error("Not a valid WEBP file");
+ r();
+ return;
+ }
+
+ // Start searching for chunks after the WEBP signature
+ let offset = 12;
+ let txt_chunks = {};
+ // Loop through the chunks in the WEBP file
+ while (offset < webp.length) {
+ const chunk_length = dataView.getUint32(offset + 4, true);
+ const chunk_type = String.fromCharCode(...webp.slice(offset, offset + 4));
+ if (chunk_type === "EXIF") {
+ if (String.fromCharCode(...webp.slice(offset + 8, offset + 8 + 6)) == "Exif\0\0") {
+ offset += 6;
+ }
+ let data = parseExifData(webp.slice(offset + 8, offset + 8 + chunk_length));
+ for (var key in data) {
+ var value = data[key];
+ let index = value.indexOf(':');
+ txt_chunks[value.slice(0, index)] = value.slice(index + 1);
+ }
+ }
+
+ offset += 8 + chunk_length;
+ }
+
+ r(txt_chunks);
+ };
+
+ reader.readAsArrayBuffer(file);
+ });
+}
+
export function getLatentMetadata(file) {
return new Promise((r) => {
const reader = new FileReader();
diff --git a/web/scripts/ui.js b/web/scripts/ui.js
index 1e7920167a6..ebaf86fe428 100644
--- a/web/scripts/ui.js
+++ b/web/scripts/ui.js
@@ -462,8 +462,8 @@ class ComfyList {
return $el("div", {textContent: item.prompt[0] + ": "}, [
$el("button", {
textContent: "Load",
- onclick: () => {
- app.loadGraphData(item.prompt[3].extra_pnginfo.workflow);
+ onclick: async () => {
+ await app.loadGraphData(item.prompt[3].extra_pnginfo.workflow);
if (item.outputs) {
app.nodeOutputs = item.outputs;
}
@@ -599,7 +599,7 @@ export class ComfyUI {
const fileInput = $el("input", {
id: "comfy-file-input",
type: "file",
- accept: ".json,image/png,.latent,.safetensors",
+ accept: ".json,image/png,.latent,.safetensors,image/webp",
style: {display: "none"},
parent: document.body,
onchange: () => {
@@ -719,20 +719,22 @@ export class ComfyUI {
filename += ".json";
}
}
- const json = JSON.stringify(app.graph.serialize(), null, 2); // convert the data to a JSON string
- const blob = new Blob([json], {type: "application/json"});
- const url = URL.createObjectURL(blob);
- const a = $el("a", {
- href: url,
- download: filename,
- style: {display: "none"},
- parent: document.body,
+ app.graphToPrompt().then(p=>{
+ const json = JSON.stringify(p.workflow, null, 2); // convert the data to a JSON string
+ const blob = new Blob([json], {type: "application/json"});
+ const url = URL.createObjectURL(blob);
+ const a = $el("a", {
+ href: url,
+ download: filename,
+ style: {display: "none"},
+ parent: document.body,
+ });
+ a.click();
+ setTimeout(function () {
+ a.remove();
+ window.URL.revokeObjectURL(url);
+ }, 0);
});
- a.click();
- setTimeout(function () {
- a.remove();
- window.URL.revokeObjectURL(url);
- }, 0);
},
}),
$el("button", {
@@ -782,9 +784,9 @@ export class ComfyUI {
}
}),
$el("button", {
- id: "comfy-load-default-button", textContent: "Load Default", onclick: () => {
+ id: "comfy-load-default-button", textContent: "Load Default", onclick: async () => {
if (!confirmClear.value || confirm("Load default workflow?")) {
- app.loadGraphData()
+ await app.loadGraphData()
}
}
}),
@@ -809,7 +811,8 @@ export class ComfyUI {
if (
this.lastQueueSize != 0 &&
status.exec_info.queue_remaining == 0 &&
- document.getElementById("autoQueueCheckbox").checked
+ document.getElementById("autoQueueCheckbox").checked &&
+ ! app.lastExecutionError
) {
app.queuePrompt(0, this.batchCount);
}
diff --git a/web/scripts/ui/imagePreview.js b/web/scripts/ui/imagePreview.js
new file mode 100644
index 00000000000..2a7f66b8f3b
--- /dev/null
+++ b/web/scripts/ui/imagePreview.js
@@ -0,0 +1,97 @@
+import { $el } from "../ui.js";
+
+export function calculateImageGrid(imgs, dw, dh) {
+ let best = 0;
+ let w = imgs[0].naturalWidth;
+ let h = imgs[0].naturalHeight;
+ const numImages = imgs.length;
+
+ let cellWidth, cellHeight, cols, rows, shiftX;
+ // compact style
+ for (let c = 1; c <= numImages; c++) {
+ const r = Math.ceil(numImages / c);
+ const cW = dw / c;
+ const cH = dh / r;
+ const scaleX = cW / w;
+ const scaleY = cH / h;
+
+ const scale = Math.min(scaleX, scaleY, 1);
+ const imageW = w * scale;
+ const imageH = h * scale;
+ const area = imageW * imageH * numImages;
+
+ if (area > best) {
+ best = area;
+ cellWidth = imageW;
+ cellHeight = imageH;
+ cols = c;
+ rows = r;
+ shiftX = c * ((cW - imageW) / 2);
+ }
+ }
+
+ return { cellWidth, cellHeight, cols, rows, shiftX };
+}
+
+export function createImageHost(node) {
+ const el = $el("div.comfy-img-preview");
+ let currentImgs;
+ let first = true;
+
+ function updateSize() {
+ let w = null;
+ let h = null;
+
+ if (currentImgs) {
+ let elH = el.clientHeight;
+ if (first) {
+ first = false;
+ // On first run, if we are small then grow a bit
+ if (elH < 190) {
+ elH = 190;
+ }
+ el.style.setProperty("--comfy-widget-min-height", elH);
+ } else {
+ el.style.setProperty("--comfy-widget-min-height", null);
+ }
+
+ const nw = node.size[0];
+ ({ cellWidth: w, cellHeight: h } = calculateImageGrid(currentImgs, nw - 20, elH));
+ w += "px";
+ h += "px";
+
+ el.style.setProperty("--comfy-img-preview-width", w);
+ el.style.setProperty("--comfy-img-preview-height", h);
+ }
+ }
+ return {
+ el,
+ updateImages(imgs) {
+ if (imgs !== currentImgs) {
+ if (currentImgs == null) {
+ requestAnimationFrame(() => {
+ updateSize();
+ });
+ }
+ el.replaceChildren(...imgs);
+ currentImgs = imgs;
+ node.onResize(node.size);
+ node.graph.setDirtyCanvas(true, true);
+ }
+ },
+ getHeight() {
+ updateSize();
+ },
+ onDraw() {
+ // Element from point uses a hittest find elements so we need to toggle pointer events
+ el.style.pointerEvents = "all";
+ const over = document.elementFromPoint(app.canvas.mouse[0], app.canvas.mouse[1]);
+ el.style.pointerEvents = "none";
+
+ if(!over) return;
+ // Set the overIndex so Open Image etc work
+ const idx = currentImgs.indexOf(over);
+ node.overIndex = idx;
+ },
+ };
+}
diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js
index 2b023937415..de5877e5448 100644
--- a/web/scripts/widgets.js
+++ b/web/scripts/widgets.js
@@ -1,4 +1,5 @@
import { api } from "./api.js"
+import "./domWidget.js";
function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) {
let defaultVal = inputData[1]["default"];
@@ -22,18 +23,103 @@ function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) {
return { val: defaultVal, config: { min, max, step: 10.0 * step, round, precision } };
}
-export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) {
- const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, {
- values: ["fixed", "increment", "decrement", "randomize"],
- serialize: false, // Don't include this in prompt.
- });
- valueControl.afterQueued = () => {
+export function getWidgetType(inputData, inputName) {
+ const type = inputData[0];
+ if (Array.isArray(type)) {
+ return "COMBO";
+ } else if (`${type}:${inputName}` in ComfyWidgets) {
+ return `${type}:${inputName}`;
+ } else if (type in ComfyWidgets) {
+ return type;
+ } else {
+ return null;
+ }
+}
+
+export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values, widgetName, inputData) {
+ let name = inputData[1]?.control_after_generate;
+ if(typeof name !== "string") {
+ name = widgetName;
+ }
+ const widgets = addValueControlWidgets(node, targetWidget, defaultValue, {
+ addFilterList: false,
+ controlAfterGenerateName: name
+ }, inputData);
+ return widgets[0];
+}
+
+export function addValueControlWidgets(node, targetWidget, defaultValue = "randomize", options, inputData) {
+ if (!defaultValue) defaultValue = "randomize";
+ if (!options) options = {};
+
+ const getName = (defaultName, optionName) => {
+ let name = defaultName;
+ if (options[optionName]) {
+ name = options[optionName];
+ } else if (typeof inputData?.[1]?.[defaultName] === "string") {
+ name = inputData?.[1]?.[defaultName];
+ } else if (inputData?.[1]?.control_prefix) {
+ name = inputData?.[1]?.control_prefix + " " + name
+ }
+ return name;
+ }
+
+ const widgets = [];
+ const valueControl = node.addWidget(
+ "combo",
+ getName("control_after_generate", "controlAfterGenerateName"),
+ defaultValue,
+ function () {},
+ {
+ values: ["fixed", "increment", "decrement", "randomize"],
+ serialize: false, // Don't include this in prompt.
+ }
+ );
+ widgets.push(valueControl);
+
+ const isCombo = targetWidget.type === "combo";
+ let comboFilter;
+ if (isCombo && options.addFilterList !== false) {
+ comboFilter = node.addWidget(
+ "string",
+ getName("control_filter_list", "controlFilterListName"),
+ "",
+ function () {},
+ {
+ serialize: false, // Don't include this in prompt.
+ }
+ );
+ widgets.push(comboFilter);
+ }
+
+ valueControl.afterQueued = () => {
var v = valueControl.value;
- if (targetWidget.type == "combo" && v !== "fixed") {
- let current_index = targetWidget.options.values.indexOf(targetWidget.value);
- let current_length = targetWidget.options.values.length;
+ if (isCombo && v !== "fixed") {
+ let values = targetWidget.options.values;
+ const filter = comboFilter?.value;
+ if (filter) {
+ let check;
+ if (filter.startsWith("/") && filter.endsWith("/")) {
+ try {
+ const regex = new RegExp(filter.substring(1, filter.length - 1));
+ check = (item) => regex.test(item);
+ } catch (error) {
+ console.error("Error constructing RegExp filter for node " + node.id, filter, error);
+ }
+ }
+ if (!check) {
+ const lower = filter.toLocaleLowerCase();
+ check = (item) => item.toLocaleLowerCase().includes(lower);
+ }
+ values = values.filter(item => check(item));
+ if (!values.length && targetWidget.options.values.length) {
+ console.warn("Filter for node " + node.id + " has filtered out all items", filter);
+ }
+ }
+ let current_index = values.indexOf(targetWidget.value);
+ let current_length = values.length;
switch (v) {
case "increment":
@@ -50,11 +136,12 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random
current_index = Math.max(0, current_index);
current_index = Math.min(current_length - 1, current_index);
if (current_index >= 0) {
- let value = targetWidget.options.values[current_index];
+ let value = values[current_index];
targetWidget.value = value;
targetWidget.callback(value);
}
- } else { //number
+ } else {
+ //number
let min = targetWidget.options.min;
let max = targetWidget.options.max;
// limit to something that javascript can handle
@@ -77,185 +164,68 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random
default:
break;
}
- /*check if values are over or under their respective
- * ranges and set them to min or max.*/
- if (targetWidget.value < min)
- targetWidget.value = min;
+ /*check if values are over or under their respective
+ * ranges and set them to min or max.*/
+ if (targetWidget.value < min) targetWidget.value = min;
if (targetWidget.value > max)
targetWidget.value = max;
+ targetWidget.callback(targetWidget.value);
}
- }
- return valueControl;
+ };
+ return widgets;
};
-function seedWidget(node, inputName, inputData, app) {
- const seed = ComfyWidgets.INT(node, inputName, inputData, app);
- const seedControl = addValueControlWidget(node, seed.widget, "randomize");
+function seedWidget(node, inputName, inputData, app, widgetName) {
+ const seed = createIntWidget(node, inputName, inputData, app, true);
+ const seedControl = addValueControlWidget(node, seed.widget, "randomize", undefined, widgetName, inputData);
seed.widget.linkedWidgets = [seedControl];
return seed;
}
-const MultilineSymbol = Symbol();
-const MultilineResizeSymbol = Symbol();
-
-function addMultilineWidget(node, name, opts, app) {
- const MIN_SIZE = 50;
-
- function computeSize(size) {
- if (node.widgets[0].last_y == null) return;
-
- let y = node.widgets[0].last_y;
- let freeSpace = size[1] - y;
-
- // Compute the height of all non customtext widgets
- let widgetHeight = 0;
- const multi = [];
- for (let i = 0; i < node.widgets.length; i++) {
- const w = node.widgets[i];
- if (w.type === "customtext") {
- multi.push(w);
- } else {
- if (w.computeSize) {
- widgetHeight += w.computeSize()[1] + 4;
- } else {
- widgetHeight += LiteGraph.NODE_WIDGET_HEIGHT + 4;
- }
- }
- }
-
- // See how large each text input can be
- freeSpace -= widgetHeight;
- freeSpace /= multi.length + (!!node.imgs?.length);
-
- if (freeSpace < MIN_SIZE) {
- // There isnt enough space for all the widgets, increase the size of the node
- freeSpace = MIN_SIZE;
- node.size[1] = y + widgetHeight + freeSpace * (multi.length + (!!node.imgs?.length));
- node.graph.setDirtyCanvas(true);
- }
-
- // Position each of the widgets
- for (const w of node.widgets) {
- w.y = y;
- if (w.type === "customtext") {
- y += freeSpace;
- w.computedHeight = freeSpace - multi.length*4;
- } else if (w.computeSize) {
- y += w.computeSize()[1] + 4;
- } else {
- y += LiteGraph.NODE_WIDGET_HEIGHT + 4;
- }
- }
-
- node.inputHeight = freeSpace;
+function createIntWidget(node, inputName, inputData, app, isSeedInput) {
+ const control = inputData[1]?.control_after_generate;
+ if (!isSeedInput && control) {
+ return seedWidget(node, inputName, inputData, app, typeof control === "string" ? control : undefined);
}
- const widget = {
- type: "customtext",
- name,
- get value() {
- return this.inputEl.value;
- },
- set value(x) {
- this.inputEl.value = x;
+ let widgetType = isSlider(inputData[1]["display"], app);
+ const { val, config } = getNumberDefaults(inputData, 1, 0, true);
+ Object.assign(config, { precision: 0 });
+ return {
+ widget: node.addWidget(
+ widgetType,
+ inputName,
+ val,
+ function (v) {
+ const s = this.options.step / 10;
+ this.value = Math.round(v / s) * s;
+ },
+ config
+ ),
+ };
+}
+
+function addMultilineWidget(node, name, opts, app) {
+ const inputEl = document.createElement("textarea");
+ inputEl.className = "comfy-multiline-input";
+ inputEl.value = opts.defaultVal;
+ inputEl.placeholder = opts.placeholder || name;
+
+ const widget = node.addDOMWidget(name, "customtext", inputEl, {
+ getValue() {
+ return inputEl.value;
},
- draw: function (ctx, _, widgetWidth, y, widgetHeight) {
- if (!this.parent.inputHeight) {
- // If we are initially offscreen when created we wont have received a resize event
- // Calculate it here instead
- computeSize(node.size);
- }
- const visible = app.canvas.ds.scale > 0.5 && this.type === "customtext";
- const margin = 10;
- const elRect = ctx.canvas.getBoundingClientRect();
- const transform = new DOMMatrix()
- .scaleSelf(elRect.width / ctx.canvas.width, elRect.height / ctx.canvas.height)
- .multiplySelf(ctx.getTransform())
- .translateSelf(margin, margin + y);
-
- const scale = new DOMMatrix().scaleSelf(transform.a, transform.d)
- Object.assign(this.inputEl.style, {
- transformOrigin: "0 0",
- transform: scale,
- left: `${transform.a + transform.e}px`,
- top: `${transform.d + transform.f}px`,
- width: `${widgetWidth - (margin * 2)}px`,
- height: `${this.parent.inputHeight - (margin * 2)}px`,
- position: "absolute",
- background: (!node.color)?'':node.color,
- color: (!node.color)?'':'white',
- zIndex: app.graph._nodes.indexOf(node),
- });
- this.inputEl.hidden = !visible;
+ setValue(v) {
+ inputEl.value = v;
},
- };
- widget.inputEl = document.createElement("textarea");
- widget.inputEl.className = "comfy-multiline-input";
- widget.inputEl.value = opts.defaultVal;
- widget.inputEl.placeholder = opts.placeholder || "";
- document.addEventListener("mousedown", function (event) {
- if (!widget.inputEl.contains(event.target)) {
- widget.inputEl.blur();
- }
});
- widget.parent = node;
- document.body.appendChild(widget.inputEl);
-
- node.addCustomWidget(widget);
-
- app.canvas.onDrawBackground = function () {
- // Draw node isnt fired once the node is off the screen
- // if it goes off screen quickly, the input may not be removed
- // this shifts it off screen so it can be moved back if the node is visible.
- for (let n in app.graph._nodes) {
- n = graph._nodes[n];
- for (let w in n.widgets) {
- let wid = n.widgets[w];
- if (Object.hasOwn(wid, "inputEl")) {
- wid.inputEl.style.left = -8000 + "px";
- wid.inputEl.style.position = "absolute";
- }
- }
- }
- };
-
- node.onRemoved = function () {
- // When removing this node we need to remove the input from the DOM
- for (let y in this.widgets) {
- if (this.widgets[y].inputEl) {
- this.widgets[y].inputEl.remove();
- }
- }
- };
-
- widget.onRemove = () => {
- widget.inputEl?.remove();
+ widget.inputEl = inputEl;
- // Restore original size handler if we are the last
- if (!--node[MultilineSymbol]) {
- node.onResize = node[MultilineResizeSymbol];
- delete node[MultilineSymbol];
- delete node[MultilineResizeSymbol];
- }
- };
-
- if (node[MultilineSymbol]) {
- node[MultilineSymbol]++;
- } else {
- node[MultilineSymbol] = 1;
- const onResize = (node[MultilineResizeSymbol] = node.onResize);
-
- node.onResize = function (size) {
- computeSize(size);
-
- // Call original resizer handler
- if (onResize) {
- onResize.apply(this, arguments);
- }
- };
- }
+ inputEl.addEventListener("input", () => {
+ widget.callback?.(widget.value);
+ });
return { minWidth: 400, minHeight: 200, widget };
}
@@ -287,31 +257,26 @@ export const ComfyWidgets = {
}, config) };
},
INT(node, inputName, inputData, app) {
- let widgetType = isSlider(inputData[1]["display"], app);
- const { val, config } = getNumberDefaults(inputData, 1, 0, true);
- Object.assign(config, { precision: 0 });
- return {
- widget: node.addWidget(
- widgetType,
- inputName,
- val,
- function (v) {
- const s = this.options.step / 10;
- this.value = Math.round(v / s) * s;
- },
- config
- ),
- };
+ return createIntWidget(node, inputName, inputData, app);
},
BOOLEAN(node, inputName, inputData) {
- let defaultVal = inputData[1]["default"];
+ let defaultVal = false;
+ let options = {};
+ if (inputData[1]) {
+ if (inputData[1].default)
+ defaultVal = inputData[1].default;
+ if (inputData[1].label_on)
+ options["on"] = inputData[1].label_on;
+ if (inputData[1].label_off)
+ options["off"] = inputData[1].label_off;
+ }
return {
widget: node.addWidget(
"toggle",
inputName,
defaultVal,
() => {},
- {"on": inputData[1].label_on, "off": inputData[1].label_off}
+ options,
)
};
},
@@ -337,10 +302,14 @@ export const ComfyWidgets = {
if (inputData[1] && inputData[1].default) {
defaultValue = inputData[1].default;
}
- return { widget: node.addWidget("combo", inputName, defaultValue, () => {}, { values: type }) };
+ const res = { widget: node.addWidget("combo", inputName, defaultValue, () => {}, { values: type }) };
+ if (inputData[1]?.control_after_generate) {
+ res.widget.linkedWidgets = addValueControlWidgets(node, res.widget, undefined, undefined, inputData);
+ }
+ return res;
},
IMAGEUPLOAD(node, inputName, inputData, app) {
- const imageWidget = node.widgets.find((w) => w.name === "image");
+ const imageWidget = node.widgets.find((w) => w.name === (inputData[1]?.widget ?? "image"));
let uploadWidget;
function showImage(name) {
@@ -454,9 +423,10 @@ export const ComfyWidgets = {
document.body.append(fileInput);
// Create the button widget for selecting the files
- uploadWidget = node.addWidget("button", "choose file to upload", "image", () => {
+ uploadWidget = node.addWidget("button", inputName, "image", () => {
fileInput.click();
});
+ uploadWidget.label = "choose file to upload";
uploadWidget.serialize = false;
// Add handler to check if an image is being dragged over our node
diff --git a/web/style.css b/web/style.css
index 692fa31d672..378fe0a48b9 100644
--- a/web/style.css
+++ b/web/style.css
@@ -409,6 +409,21 @@ dialog::backdrop {
width: calc(100% - 10px);
}
+.comfy-img-preview {
+ pointer-events: none;
+ overflow: hidden;
+ display: flex;
+ flex-wrap: wrap;
+ align-content: flex-start;
+ justify-content: center;
+}
+
+.comfy-img-preview img {
+ object-fit: contain;
+ width: var(--comfy-img-preview-width);
+ height: var(--comfy-img-preview-height);
+}
+
/* Search box */
.litegraph.litesearchbox {