Skip to content

Commit

Permalink
Merge branch 'master' into beta
Browse files Browse the repository at this point in the history
  • Loading branch information
jn-jairo committed Jun 8, 2024
2 parents 2aa6af8 + 56333d4 commit 8aba730
Show file tree
Hide file tree
Showing 7 changed files with 40 additions and 14 deletions.
15 changes: 15 additions & 0 deletions comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,7 @@ def __init__(self, model):
self.device = model.load_device
self.weights_loaded = False
self.real_model = None
self.currently_used = True

def model_memory(self):
return self.model.model_size()
Expand Down Expand Up @@ -380,6 +381,7 @@ def free_memory(memory_required, device, keep_loaded=[]):
if shift_model.device == device:
if shift_model not in keep_loaded:
can_unload.append((sys.getrefcount(shift_model.model), shift_model.model_memory(), i))
shift_model.currently_used = False

for x in sorted(can_unload):
i = x[-1]
Expand Down Expand Up @@ -425,6 +427,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False):
current_loaded_models.pop(loaded_model_index).model_unload(unpatch_weights=True)
loaded = None
else:
loaded.currently_used = True
models_already_loaded.append(loaded)

if loaded is None:
Expand Down Expand Up @@ -481,6 +484,16 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False):
def load_model_gpu(model):
return load_models_gpu([model])

def loaded_models(only_currently_used=False):
output = []
for m in current_loaded_models:
if only_currently_used:
if not m.currently_used:
continue

output.append(m.model)
return output

def cleanup_models(keep_clone_weights_loaded=False):
to_delete = []
for i in range(len(current_loaded_models)):
Expand Down Expand Up @@ -708,6 +721,8 @@ def pytorch_attention_flash_attention():
#TODO: more reliable way of checking for flash attention?
if is_nvidia(): #pytorch flash attention only works on Nvidia
return True
if is_intel_xpu():
return True
return False

def force_upcast_attention_dtype():
Expand Down
6 changes: 2 additions & 4 deletions comfy/model_patcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,7 @@ def __init__(self, model, load_device, offload_device, size=0, current_device=No
def model_size(self):
if self.size > 0:
return self.size
model_sd = self.model.state_dict()
self.size = comfy.model_management.module_size(self.model)
self.model_keys = set(model_sd.keys())
return self.size

def clone(self):
Expand All @@ -90,7 +88,6 @@ def clone(self):

n.object_patches = self.object_patches.copy()
n.model_options = copy.deepcopy(self.model_options)
n.model_keys = self.model_keys
n.backup = self.backup
n.object_patches_backup = self.object_patches_backup
return n
Expand Down Expand Up @@ -210,8 +207,9 @@ def model_dtype(self):

def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
p = set()
model_sd = self.model.state_dict()
for k in patches:
if k in self.model_keys:
if k in model_sd:
p.add(k)
current_patches = self.patches.get(k, [])
current_patches.append((strength_patch, patches[k], strength_model))
Expand Down
4 changes: 2 additions & 2 deletions comfy/sd1_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,11 +168,11 @@ def forward(self, tokens):
attention_mask = None
if self.enable_attention_masks:
attention_mask = torch.zeros_like(tokens)
max_token = self.transformer.get_input_embeddings().weight.shape[0] - 1
end_token = self.special_tokens.get("end", -1)
for x in range(attention_mask.shape[0]):
for y in range(attention_mask.shape[1]):
attention_mask[x, y] = 1
if tokens[x, y] == max_token:
if tokens[x, y] == end_token:
break

outputs = self.transformer(tokens, attention_mask, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state)
Expand Down
19 changes: 16 additions & 3 deletions comfy_extras/nodes_compositing.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,22 @@ class PorterDuffMode(Enum):


def porter_duff_composite(src_image: torch.Tensor, src_alpha: torch.Tensor, dst_image: torch.Tensor, dst_alpha: torch.Tensor, mode: PorterDuffMode):
# convert mask to alpha
src_alpha = 1 - src_alpha
dst_alpha = 1 - dst_alpha
# premultiply alpha
src_image = src_image * src_alpha
dst_image = dst_image * dst_alpha

# composite ops below assume alpha-premultiplied images
if mode == PorterDuffMode.ADD:
out_alpha = torch.clamp(src_alpha + dst_alpha, 0, 1)
out_image = torch.clamp(src_image + dst_image, 0, 1)
elif mode == PorterDuffMode.CLEAR:
out_alpha = torch.zeros_like(dst_alpha)
out_image = torch.zeros_like(dst_image)
elif mode == PorterDuffMode.DARKEN:
out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image + torch.min(src_image, dst_image)
elif mode == PorterDuffMode.DST:
out_alpha = dst_alpha
Expand Down Expand Up @@ -84,8 +92,13 @@ def porter_duff_composite(src_image: torch.Tensor, src_alpha: torch.Tensor, dst_
out_alpha = (1 - dst_alpha) * src_alpha + (1 - src_alpha) * dst_alpha
out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image
else:
out_alpha = None
out_image = None
return None, None

# back to non-premultiplied alpha
out_image = torch.where(out_alpha > 1e-5, out_image / out_alpha, torch.zeros_like(out_image))
out_image = torch.clamp(out_image, 0, 1)
# convert alpha to mask
out_alpha = 1 - out_alpha
return out_image, out_alpha


Expand Down
4 changes: 2 additions & 2 deletions nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ def INPUT_TYPES(s):

CATEGORY = "advanced/loaders"

def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
def load_checkpoint(self, config_name, ckpt_name):
config_path = folder_paths.get_full_path("configs", config_name)
ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
Expand All @@ -511,7 +511,7 @@ def INPUT_TYPES(s):

CATEGORY = "loaders"

def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
def load_checkpoint(self, ckpt_name):
ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
return out[:3]
Expand Down
4 changes: 2 additions & 2 deletions web/scripts/app.js
Original file line number Diff line number Diff line change
Expand Up @@ -1805,7 +1805,7 @@ export class ComfyApp {
* @param {*} graphData A serialized graph object
* @param { boolean } clean If the graph state, e.g. images, should be cleared
*/
async loadGraphData(graphData, clean = true) {
async loadGraphData(graphData, clean = true, restore_view = true) {
if (clean !== false) {
this.clean();
}
Expand Down Expand Up @@ -1841,7 +1841,7 @@ export class ComfyApp {

try {
this.graph.configure(graphData);
if (this.enableWorkflowViewRestore.value && graphData.extra?.ds) {
if (restore_view && this.enableWorkflowViewRestore.value && graphData.extra?.ds) {
this.canvas.ds.offset = graphData.extra.ds.offset;
this.canvas.ds.scale = graphData.extra.ds.scale;
}
Expand Down
2 changes: 1 addition & 1 deletion web/scripts/ui.js
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ class ComfyList {
$el("button", {
textContent: "Load",
onclick: async () => {
await app.loadGraphData(item.prompt[3].extra_pnginfo.workflow);
await app.loadGraphData(item.prompt[3].extra_pnginfo.workflow, true, false);
if (item.outputs) {
app.nodeOutputs = item.outputs;
}
Expand Down

0 comments on commit 8aba730

Please sign in to comment.