Skip to content

Commit 59da8f9

Browse files
committed
Merge branch 'master' into beta
2 parents 268f79e + 94a5a67 commit 59da8f9

File tree

10 files changed

+58
-41
lines changed

10 files changed

+58
-41
lines changed

comfy/model_base.py

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,8 @@ def __init__(self, model_config, model_type=ModelType.EPS, device=None, unet_mod
6666
self.adm_channels = unet_config.get("adm_in_channels", None)
6767
if self.adm_channels is None:
6868
self.adm_channels = 0
69-
self.inpaint_model = False
69+
70+
self.concat_keys = ()
7071
logging.info("model_type {}".format(model_type.name))
7172
logging.debug("adm {}".format(self.adm_channels))
7273

@@ -107,8 +108,7 @@ def encode_adm(self, **kwargs):
107108

108109
def extra_conds(self, **kwargs):
109110
out = {}
110-
if self.inpaint_model:
111-
concat_keys = ("mask", "masked_image")
111+
if len(self.concat_keys) > 0:
112112
cond_concat = []
113113
denoise_mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None))
114114
concat_latent_image = kwargs.get("concat_latent_image", None)
@@ -125,24 +125,16 @@ def extra_conds(self, **kwargs):
125125

126126
concat_latent_image = utils.resize_to_batch_size(concat_latent_image, noise.shape[0])
127127

128-
if len(denoise_mask.shape) == len(noise.shape):
129-
denoise_mask = denoise_mask[:,:1]
130-
131-
denoise_mask = denoise_mask.reshape((-1, 1, denoise_mask.shape[-2], denoise_mask.shape[-1]))
132-
if denoise_mask.shape[-2:] != noise.shape[-2:]:
133-
denoise_mask = utils.common_upscale(denoise_mask, noise.shape[-1], noise.shape[-2], "bilinear", "center")
134-
denoise_mask = utils.resize_to_batch_size(denoise_mask.round(), noise.shape[0])
128+
if denoise_mask is not None:
129+
if len(denoise_mask.shape) == len(noise.shape):
130+
denoise_mask = denoise_mask[:,:1]
135131

136-
def blank_inpaint_image_like(latent_image):
137-
blank_image = torch.ones_like(latent_image)
138-
# these are the values for "zero" in pixel space translated to latent space
139-
blank_image[:,0] *= 0.8223
140-
blank_image[:,1] *= -0.6876
141-
blank_image[:,2] *= 0.6364
142-
blank_image[:,3] *= 0.1380
143-
return blank_image
132+
denoise_mask = denoise_mask.reshape((-1, 1, denoise_mask.shape[-2], denoise_mask.shape[-1]))
133+
if denoise_mask.shape[-2:] != noise.shape[-2:]:
134+
denoise_mask = utils.common_upscale(denoise_mask, noise.shape[-1], noise.shape[-2], "bilinear", "center")
135+
denoise_mask = utils.resize_to_batch_size(denoise_mask.round(), noise.shape[0])
144136

145-
for ck in concat_keys:
137+
for ck in self.concat_keys:
146138
if denoise_mask is not None:
147139
if ck == "mask":
148140
cond_concat.append(denoise_mask.to(device))
@@ -152,7 +144,7 @@ def blank_inpaint_image_like(latent_image):
152144
if ck == "mask":
153145
cond_concat.append(torch.ones_like(noise)[:,:1])
154146
elif ck == "masked_image":
155-
cond_concat.append(blank_inpaint_image_like(noise))
147+
cond_concat.append(self.blank_inpaint_image_like(noise))
156148
data = torch.cat(cond_concat, dim=1)
157149
out['c_concat'] = comfy.conds.CONDNoiseShape(data)
158150

@@ -221,7 +213,16 @@ def state_dict_for_saving(self, clip_state_dict=None, vae_state_dict=None, clip_
221213
return unet_state_dict
222214

223215
def set_inpaint(self):
224-
self.inpaint_model = True
216+
self.concat_keys = ("mask", "masked_image")
217+
def blank_inpaint_image_like(latent_image):
218+
blank_image = torch.ones_like(latent_image)
219+
# these are the values for "zero" in pixel space translated to latent space
220+
blank_image[:,0] *= 0.8223
221+
blank_image[:,1] *= -0.6876
222+
blank_image[:,2] *= 0.6364
223+
blank_image[:,3] *= 0.1380
224+
return blank_image
225+
self.blank_inpaint_image_like = blank_inpaint_image_like
225226

226227
def memory_required(self, input_shape):
227228
if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention():

comfy/model_detection.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,13 @@ def unet_config_from_diffusers_unet(state_dict, dtype=None):
345345
'channel_mult': [1, 2, 4], 'transformer_depth_middle': 6, 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64,
346346
'use_temporal_attention': False, 'use_temporal_resblock': False}
347347

348-
supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B, Segmind_Vega, KOALA_700M, KOALA_1B]
348+
SD09_XS = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
349+
'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [1, 1, 1],
350+
'transformer_depth': [1, 1, 1], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -2, 'use_linear_in_transformer': True,
351+
'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1],
352+
'use_temporal_attention': False, 'use_temporal_resblock': False, 'disable_self_attentions': [True, False, False]}
353+
354+
supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B, Segmind_Vega, KOALA_700M, KOALA_1B, SD09_XS]
349355

350356
for unet_config in supported_models:
351357
matches = True

comfy/model_management.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,7 @@ def __init__(self, model):
288288
self.model = model
289289
self.device = model.load_device
290290
self.weights_loaded = False
291+
self.real_model = None
291292

292293
def model_memory(self):
293294
return self.model.model_size()
@@ -326,6 +327,7 @@ def model_unload(self, unpatch_weights=True):
326327
self.model.unpatch_model(self.model.offload_device, unpatch_weights=unpatch_weights)
327328
self.model.model_patches_to(self.model.offload_device)
328329
self.weights_loaded = self.weights_loaded and not unpatch_weights
330+
self.real_model = None
329331

330332
def __eq__(self, other):
331333
return self.model is other.model
@@ -340,7 +342,7 @@ def unload_model_clones(model, unload_weights_only=True, force_unload=True):
340342
to_unload = [i] + to_unload
341343

342344
if len(to_unload) == 0:
343-
return None
345+
return True
344346

345347
same_weights = 0
346348
for i in to_unload:
@@ -422,8 +424,8 @@ def load_models_gpu(models, memory_required=0):
422424

423425
total_memory_required = {}
424426
for loaded_model in models_to_load:
425-
unload_model_clones(loaded_model.model, unload_weights_only=True, force_unload=False) #unload clones where the weights are different
426-
total_memory_required[loaded_model.device] = total_memory_required.get(loaded_model.device, 0) + loaded_model.model_memory_required(loaded_model.device)
427+
if unload_model_clones(loaded_model.model, unload_weights_only=True, force_unload=False) == True:#unload clones where the weights are different
428+
total_memory_required[loaded_model.device] = total_memory_required.get(loaded_model.device, 0) + loaded_model.model_memory_required(loaded_model.device)
427429

428430
for device in total_memory_required:
429431
if device != torch.device("cpu"):
@@ -462,11 +464,15 @@ def load_models_gpu(models, memory_required=0):
462464
def load_model_gpu(model):
463465
return load_models_gpu([model])
464466

465-
def cleanup_models():
467+
def cleanup_models(keep_clone_weights_loaded=False):
466468
to_delete = []
467469
for i in range(len(current_loaded_models)):
468470
if sys.getrefcount(current_loaded_models[i].model) <= 2:
469-
to_delete = [i] + to_delete
471+
if not keep_clone_weights_loaded:
472+
to_delete = [i] + to_delete
473+
#TODO: find a less fragile way to do this.
474+
elif sys.getrefcount(current_loaded_models[i].real_model) <= 3: #references from .real_model + the .model
475+
to_delete = [i] + to_delete
470476

471477
for i in to_delete:
472478
x = current_loaded_models.pop(i)

comfy/supported_models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,8 @@ class SD20(supported_models_base.BASE):
7070
def model_type(self, state_dict, prefix=""):
7171
if self.unet_config["in_channels"] == 4: #SD2.0 inpainting models are not v prediction
7272
k = "{}output_blocks.11.1.transformer_blocks.0.norm1.bias".format(prefix)
73-
out = state_dict[k]
74-
if torch.std(out, unbiased=False) > 0.09: # not sure how well this will actually work. I guess we will find out.
73+
out = state_dict.get(k, None)
74+
if out is not None and torch.std(out, unbiased=False) > 0.09: # not sure how well this will actually work. I guess we will find out.
7575
return model_base.ModelType.V_PREDICTION
7676
return model_base.ModelType.EPS
7777

comfy_extras/nodes_post_processing.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,13 +204,13 @@ def INPUT_TYPES(s):
204204
"default": 1.0,
205205
"min": 0.1,
206206
"max": 10.0,
207-
"step": 0.1
207+
"step": 0.01
208208
}),
209209
"alpha": ("FLOAT", {
210210
"default": 1.0,
211211
"min": 0.0,
212212
"max": 5.0,
213-
"step": 0.1
213+
"step": 0.01
214214
}),
215215
},
216216
}

cuda_malloc.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def enum_display_devices():
4747
"Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
4848
"Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000",
4949
"GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M",
50-
"GeForce GTX 1650", "GeForce GTX 1630"
50+
"GeForce GTX 1650", "GeForce GTX 1630", "Tesla M4", "Tesla M6", "Tesla M10", "Tesla M40", "Tesla M60"
5151
}
5252

5353
def cuda_malloc_supported():

execution.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -392,6 +392,7 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]):
392392
d = self.outputs_ui.pop(x)
393393
del d
394394

395+
comfy.model_management.cleanup_models(keep_clone_weights_loaded=True)
395396
self.add_message("execution_cached",
396397
{ "nodes": list(current_outputs) , "prompt_id": prompt_id},
397398
broadcast=False)

folder_paths.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import os
22
import time
3+
import logging
34

45
supported_pt_extensions = set(['.ckpt', '.pt', '.bin', '.pth', '.safetensors'])
56

@@ -44,7 +45,7 @@
4445
try:
4546
os.makedirs(input_directory)
4647
except:
47-
print("Failed to create input directory")
48+
logging.error("Failed to create input directory")
4849

4950
def set_output_directory(output_dir):
5051
global output_directory
@@ -146,21 +147,23 @@ def recursive_search(directory, excluded_dir_names=None):
146147
try:
147148
dirs[directory] = os.path.getmtime(directory)
148149
except FileNotFoundError:
149-
print(f"Warning: Unable to access {directory}. Skipping this path.")
150-
150+
logging.warning(f"Warning: Unable to access {directory}. Skipping this path.")
151+
152+
logging.debug("recursive file list on directory {}".format(directory))
151153
for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True):
152154
subdirs[:] = [d for d in subdirs if d not in excluded_dir_names]
153155
for file_name in filenames:
154156
relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory)
155157
result.append(relative_path)
156-
158+
157159
for d in subdirs:
158160
path = os.path.join(dirpath, d)
159161
try:
160162
dirs[path] = os.path.getmtime(path)
161163
except FileNotFoundError:
162-
print(f"Warning: Unable to access {path}. Skipping this path.")
164+
logging.warning(f"Warning: Unable to access {path}. Skipping this path.")
163165
continue
166+
logging.debug("found {} files".format(len(result)))
164167
return result, dirs
165168

166169
def filter_files_extensions(files, extensions):
@@ -248,8 +251,8 @@ def compute_vars(input, image_width, image_height):
248251
err = "**** ERROR: Saving image outside the output folder is not allowed." + \
249252
"\n full_output_folder: " + os.path.abspath(full_output_folder) + \
250253
"\n output_dir: " + output_dir + \
251-
"\n commonpath: " + os.path.commonpath((output_dir, os.path.abspath(full_output_folder)))
252-
print(err)
254+
"\n commonpath: " + os.path.commonpath((output_dir, os.path.abspath(full_output_folder)))
255+
logging.error(err)
253256
raise Exception(err)
254257

255258
try:

nodes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def before_node_execution():
4141
def interrupt_processing(value=True):
4242
comfy.model_management.interrupt_current_processing(value)
4343

44-
MAX_RESOLUTION=8192
44+
MAX_RESOLUTION=16384
4545

4646
class CLIPTextEncode:
4747
@classmethod

tests-ui/tests/groupNode.test.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -947,7 +947,7 @@ describe("group node", () => {
947947
expect(p1.widgets.value.widget.options?.step).toBe(80); // width/height step * 10
948948

949949
expect(p2.widgets.value.widget.options?.min).toBe(16); // width/height min
950-
expect(p2.widgets.value.widget.options?.max).toBe(8192); // width/height max
950+
expect(p2.widgets.value.widget.options?.max).toBe(16384); // width/height max
951951
expect(p2.widgets.value.widget.options?.step).toBe(80); // width/height step * 10
952952

953953
expect(p1.widgets.value.value).toBe(128);

0 commit comments

Comments
 (0)