Skip to content

Commit f902999

Browse files
committed
Merge branch 'master' into beta
2 parents 033ca81 + 7114cfe commit f902999

File tree

9 files changed

+344
-52
lines changed

9 files changed

+344
-52
lines changed

comfy/k_diffusion/sampling.py

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -750,3 +750,61 @@ def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, n
750750
if sigmas[i + 1] > 0:
751751
x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])
752752
return x
753+
754+
755+
756+
@torch.no_grad()
757+
def sample_heunpp2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
758+
# From MIT licensed: https://github.com/Carzit/sd-webui-samplers-scheduler/
759+
extra_args = {} if extra_args is None else extra_args
760+
s_in = x.new_ones([x.shape[0]])
761+
s_end = sigmas[-1]
762+
for i in trange(len(sigmas) - 1, disable=disable):
763+
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
764+
eps = torch.randn_like(x) * s_noise
765+
sigma_hat = sigmas[i] * (gamma + 1)
766+
if gamma > 0:
767+
x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
768+
denoised = model(x, sigma_hat * s_in, **extra_args)
769+
d = to_d(x, sigma_hat, denoised)
770+
if callback is not None:
771+
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
772+
dt = sigmas[i + 1] - sigma_hat
773+
if sigmas[i + 1] == s_end:
774+
# Euler method
775+
x = x + d * dt
776+
elif sigmas[i + 2] == s_end:
777+
778+
# Heun's method
779+
x_2 = x + d * dt
780+
denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
781+
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
782+
783+
w = 2 * sigmas[0]
784+
w2 = sigmas[i+1]/w
785+
w1 = 1 - w2
786+
787+
d_prime = d * w1 + d_2 * w2
788+
789+
790+
x = x + d_prime * dt
791+
792+
else:
793+
# Heun++
794+
x_2 = x + d * dt
795+
denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
796+
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
797+
dt_2 = sigmas[i + 2] - sigmas[i + 1]
798+
799+
x_3 = x_2 + d_2 * dt_2
800+
denoised_3 = model(x_3, sigmas[i + 2] * s_in, **extra_args)
801+
d_3 = to_d(x_3, sigmas[i + 2], denoised_3)
802+
803+
w = 3 * sigmas[0]
804+
w2 = sigmas[i + 1] / w
805+
w3 = sigmas[i + 2] / w
806+
w1 = 1 - w2 - w3
807+
808+
d_prime = w1 * d + w2 * d_2 + w3 * d_3
809+
x = x + d_prime * dt
810+
return x

comfy/ldm/modules/diffusionmodules/openaimodel.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -624,6 +624,11 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo
624624
transformer_options["block"] = ("input", id)
625625
h = forward_timestep_embed(module, h, emb, context, transformer_options)
626626
h = apply_control(h, control, 'input')
627+
if "input_block_patch" in transformer_patches:
628+
patch = transformer_patches["input_block_patch"]
629+
for p in patch:
630+
h = p(h, transformer_options)
631+
627632
hs.append(h)
628633

629634
transformer_options["block"] = ("middle", 0)

comfy/model_patcher.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,9 @@ def set_model_attn1_output_patch(self, patch):
9696
def set_model_attn2_output_patch(self, patch):
9797
self.set_model_patch(patch, "attn2_output_patch")
9898

99+
def set_model_input_block_patch(self, patch):
100+
self.set_model_patch(patch, "input_block_patch")
101+
99102
def set_model_output_block_patch(self, patch):
100103
self.set_model_patch(patch, "output_block_patch")
101104

comfy/samplers.py

Lines changed: 52 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -518,46 +518,63 @@ class UNIPCBH2(Sampler):
518518
def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False):
519519
return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar)
520520

521-
KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral",
521+
KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
522522
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
523523
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"]
524524

525-
def ksampler(sampler_name, extra_options={}, inpaint_options={}):
526-
class KSAMPLER(Sampler):
527-
def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False):
528-
extra_args["denoise_mask"] = denoise_mask
529-
model_k = KSamplerX0Inpaint(model_wrap)
530-
model_k.latent_image = latent_image
531-
if inpaint_options.get("random", False): #TODO: Should this be the default?
532-
generator = torch.manual_seed(extra_args.get("seed", 41) + 1)
533-
model_k.noise = torch.randn(noise.shape, generator=generator, device="cpu").to(noise.dtype).to(noise.device)
534-
else:
535-
model_k.noise = noise
525+
class KSAMPLER(Sampler):
526+
def __init__(self, sampler_function, extra_options={}, inpaint_options={}):
527+
self.sampler_function = sampler_function
528+
self.extra_options = extra_options
529+
self.inpaint_options = inpaint_options
536530

537-
if self.max_denoise(model_wrap, sigmas):
538-
noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0)
539-
else:
540-
noise = noise * sigmas[0]
531+
def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False):
532+
extra_args["denoise_mask"] = denoise_mask
533+
model_k = KSamplerX0Inpaint(model_wrap)
534+
model_k.latent_image = latent_image
535+
if self.inpaint_options.get("random", False): #TODO: Should this be the default?
536+
generator = torch.manual_seed(extra_args.get("seed", 41) + 1)
537+
model_k.noise = torch.randn(noise.shape, generator=generator, device="cpu").to(noise.dtype).to(noise.device)
538+
else:
539+
model_k.noise = noise
541540

542-
k_callback = None
543-
total_steps = len(sigmas) - 1
544-
if callback is not None:
545-
k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps)
541+
if self.max_denoise(model_wrap, sigmas):
542+
noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0)
543+
else:
544+
noise = noise * sigmas[0]
545+
546+
k_callback = None
547+
total_steps = len(sigmas) - 1
548+
if callback is not None:
549+
k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps)
550+
551+
if latent_image is not None:
552+
noise += latent_image
546553

554+
samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options)
555+
return samples
556+
557+
558+
def ksampler(sampler_name, extra_options={}, inpaint_options={}):
559+
if sampler_name == "dpm_fast":
560+
def dpm_fast_function(model, noise, sigmas, extra_args, callback, disable):
547561
sigma_min = sigmas[-1]
548562
if sigma_min == 0:
549563
sigma_min = sigmas[-2]
564+
total_steps = len(sigmas) - 1
565+
return k_diffusion_sampling.sample_dpm_fast(model, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=callback, disable=disable)
566+
sampler_function = dpm_fast_function
567+
elif sampler_name == "dpm_adaptive":
568+
def dpm_adaptive_function(model, noise, sigmas, extra_args, callback, disable):
569+
sigma_min = sigmas[-1]
570+
if sigma_min == 0:
571+
sigma_min = sigmas[-2]
572+
return k_diffusion_sampling.sample_dpm_adaptive(model, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=callback, disable=disable)
573+
sampler_function = dpm_adaptive_function
574+
else:
575+
sampler_function = getattr(k_diffusion_sampling, "sample_{}".format(sampler_name))
550576

551-
if latent_image is not None:
552-
noise += latent_image
553-
if sampler_name == "dpm_fast":
554-
samples = k_diffusion_sampling.sample_dpm_fast(model_k, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=k_callback, disable=disable_pbar)
555-
elif sampler_name == "dpm_adaptive":
556-
samples = k_diffusion_sampling.sample_dpm_adaptive(model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback, disable=disable_pbar)
557-
else:
558-
samples = getattr(k_diffusion_sampling, "sample_{}".format(sampler_name))(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **extra_options)
559-
return samples
560-
return KSAMPLER
577+
return KSAMPLER(sampler_function, extra_options, inpaint_options)
561578

562579
def wrap_model(model):
563580
model_denoise = CFGNoisePredictor(model)
@@ -618,11 +635,11 @@ def calculate_sigmas_scheduler(model, scheduler_name, steps):
618635
print("error invalid scheduler", self.scheduler)
619636
return sigmas
620637

621-
def sampler_class(name):
638+
def sampler_object(name):
622639
if name == "uni_pc":
623-
sampler = UNIPC
640+
sampler = UNIPC()
624641
elif name == "uni_pc_bh2":
625-
sampler = UNIPCBH2
642+
sampler = UNIPCBH2()
626643
elif name == "ddim":
627644
sampler = ksampler("euler", inpaint_options={"random": True})
628645
else:
@@ -687,6 +704,6 @@ def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=N
687704
else:
688705
return torch.zeros_like(noise)
689706

690-
sampler = sampler_class(self.sampler)
707+
sampler = sampler_object(self.sampler)
691708

692-
return sample(self.model, noise, positive, negative, cfg, self.device, sampler(), sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
709+
return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)

comfy/sd1_clip.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -173,9 +173,9 @@ def forward(self, tokens):
173173
if getattr(self.transformer, self.inner_name).final_layer_norm.weight.dtype != torch.float32:
174174
precision_scope = torch.autocast
175175
else:
176-
precision_scope = lambda a, b: contextlib.nullcontext(a)
176+
precision_scope = lambda a, dtype: contextlib.nullcontext(a)
177177

178-
with precision_scope(model_management.get_autocast_device(device), torch.float32):
178+
with precision_scope(model_management.get_autocast_device(device), dtype=torch.float32):
179179
attention_mask = None
180180
if self.enable_attention_masks:
181181
attention_mask = torch.zeros_like(tokens)

comfy/utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -333,13 +333,13 @@ def slerp(b1, b2, r):
333333
res[dot < 1e-5 - 1] = (b1 * (1.0-r) + b2 * r)[dot < 1e-5 - 1]
334334
return res
335335

336-
def generate_bilinear_data(length_old, length_new):
337-
coords_1 = torch.arange(length_old).reshape((1,1,1,-1)).to(torch.float32)
336+
def generate_bilinear_data(length_old, length_new, device):
337+
coords_1 = torch.arange(length_old, dtype=torch.float32, device=device).reshape((1,1,1,-1))
338338
coords_1 = torch.nn.functional.interpolate(coords_1, size=(1, length_new), mode="bilinear")
339339
ratios = coords_1 - coords_1.floor()
340340
coords_1 = coords_1.to(torch.int64)
341341

342-
coords_2 = torch.arange(length_old).reshape((1,1,1,-1)).to(torch.float32) + 1
342+
coords_2 = torch.arange(length_old, dtype=torch.float32, device=device).reshape((1,1,1,-1)) + 1
343343
coords_2[:,:,:,-1] -= 1
344344
coords_2 = torch.nn.functional.interpolate(coords_2, size=(1, length_new), mode="bilinear")
345345
coords_2 = coords_2.to(torch.int64)
@@ -349,7 +349,7 @@ def generate_bilinear_data(length_old, length_new):
349349
h_new, w_new = (height, width)
350350

351351
#linear w
352-
ratios, coords_1, coords_2 = generate_bilinear_data(w, w_new)
352+
ratios, coords_1, coords_2 = generate_bilinear_data(w, w_new, samples.device)
353353
coords_1 = coords_1.expand((n, c, h, -1))
354354
coords_2 = coords_2.expand((n, c, h, -1))
355355
ratios = ratios.expand((n, 1, h, -1))
@@ -362,7 +362,7 @@ def generate_bilinear_data(length_old, length_new):
362362
result = result.reshape(n, h, w_new, c).movedim(-1, 1)
363363

364364
#linear h
365-
ratios, coords_1, coords_2 = generate_bilinear_data(h, h_new)
365+
ratios, coords_1, coords_2 = generate_bilinear_data(h, h_new, samples.device)
366366
coords_1 = coords_1.reshape((1,1,-1,1)).expand((n, c, -1, w_new))
367367
coords_2 = coords_2.reshape((1,1,-1,1)).expand((n, c, -1, w_new))
368368
ratios = ratios.reshape((1,1,-1,1)).expand((n, 1, -1, w_new))

comfy_extras/nodes_custom_sampler.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ def INPUT_TYPES(s):
149149
FUNCTION = "get_sampler"
150150

151151
def get_sampler(self, sampler_name):
152-
sampler = comfy.samplers.sampler_class(sampler_name)()
152+
sampler = comfy.samplers.sampler_object(sampler_name)
153153
return (sampler, )
154154

155155
class SamplerDPMPP_2M_SDE:
@@ -172,7 +172,7 @@ def get_sampler(self, solver_type, eta, s_noise, noise_device):
172172
sampler_name = "dpmpp_2m_sde"
173173
else:
174174
sampler_name = "dpmpp_2m_sde_gpu"
175-
sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type})()
175+
sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type})
176176
return (sampler, )
177177

178178

@@ -196,7 +196,7 @@ def get_sampler(self, eta, s_noise, r, noise_device):
196196
sampler_name = "dpmpp_sde"
197197
else:
198198
sampler_name = "dpmpp_sde_gpu"
199-
sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "r": r})()
199+
sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "r": r})
200200
return (sampler, )
201201

202202
class SamplerCustom:

0 commit comments

Comments
 (0)