Skip to content

Commit 2799875

Browse files
committed
Merge branch 'master' into beta
2 parents b900232 + 56d9496 commit 2799875

File tree

5 files changed

+62
-40
lines changed

5 files changed

+62
-40
lines changed

app/user_manager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ async def get_users(request):
9494
else:
9595
user_dir = self.get_request_user_filepath(request, None, create_dir=False)
9696
return web.json_response({
97-
"storage": "server" if args.server_storage else "browser",
97+
"storage": "server",
9898
"migrated": os.path.exists(user_dir)
9999
})
100100

comfy/cli_args.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -122,8 +122,7 @@ class LatentPreviewMethod(enum.Enum):
122122

123123
parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.")
124124

125-
parser.add_argument("--server-storage", action="store_true", help="Saves settings and other user configuration on the server instead of in browser storage.")
126-
parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage. If enabled, server-storage will be unconditionally enabled.")
125+
parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage.")
127126

128127
if comfy.options.args_parsing:
129128
args = parser.parse_args()
@@ -135,6 +134,3 @@ class LatentPreviewMethod(enum.Enum):
135134

136135
if args.disable_auto_launch:
137136
args.auto_launch = False
138-
139-
if args.multi_user:
140-
args.server_storage = True

comfy_extras/nodes_hypertile.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -37,24 +37,24 @@ def INPUT_TYPES(s):
3737
def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
3838
model_channels = model.model.model_config.unet_config["model_channels"]
3939

40-
apply_to = set()
41-
temp = model_channels
42-
for x in range(max_depth + 1):
43-
apply_to.add(temp)
44-
temp *= 2
45-
4640
latent_tile_size = max(32, tile_size) // 8
4741
self.temp = None
4842

4943
def hypertile_in(q, k, v, extra_options):
50-
if q.shape[-1] in apply_to:
44+
model_chans = q.shape[-2]
45+
orig_shape = extra_options['original_shape']
46+
apply_to = []
47+
for i in range(max_depth + 1):
48+
apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i)))
49+
50+
if model_chans in apply_to:
5151
shape = extra_options["original_shape"]
5252
aspect_ratio = shape[-1] / shape[-2]
5353

5454
hw = q.size(1)
5555
h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
5656

57-
factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1
57+
factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1
5858
nh = random_divisor(h, latent_tile_size * factor, swap_size)
5959
nw = random_divisor(w, latent_tile_size * factor, swap_size)
6060

execution.py

Lines changed: 46 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
import gc
1010
import re
1111
import inspect
12+
from typing import List, Literal, NamedTuple, Optional
1213

1314
import torch
1415
import nodes
@@ -298,8 +299,15 @@ def reset(self):
298299
self.outputs = {}
299300
self.object_storage = {}
300301
self.outputs_ui = {}
302+
self.status_messages = []
303+
self.success = True
301304
self.old_prompt = {}
302305

306+
def add_message(self, event, data, broadcast: bool):
307+
self.status_messages.append((event, data))
308+
if self.server.client_id is not None or broadcast:
309+
self.server.send_sync(event, data, self.server.client_id)
310+
303311
def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, error, ex):
304312
node_id = error["node_id"]
305313
class_type = prompt[node_id]["class_type"]
@@ -313,23 +321,22 @@ def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, e
313321
"node_type": class_type,
314322
"executed": list(executed),
315323
}
316-
self.server.send_sync("execution_interrupted", mes, self.server.client_id)
324+
self.add_message("execution_interrupted", mes, broadcast=True)
317325
else:
318-
if self.server.client_id is not None:
319-
mes = {
320-
"prompt_id": prompt_id,
321-
"node_id": node_id,
322-
"node_type": class_type,
323-
"executed": list(executed),
324-
325-
"exception_message": error["exception_message"],
326-
"exception_type": error["exception_type"],
327-
"traceback": error["traceback"],
328-
"current_inputs": error["current_inputs"],
329-
"current_outputs": error["current_outputs"],
330-
}
331-
self.server.send_sync("execution_error", mes, self.server.client_id)
326+
mes = {
327+
"prompt_id": prompt_id,
328+
"node_id": node_id,
329+
"node_type": class_type,
330+
"executed": list(executed),
332331

332+
"exception_message": error["exception_message"],
333+
"exception_type": error["exception_type"],
334+
"traceback": error["traceback"],
335+
"current_inputs": error["current_inputs"],
336+
"current_outputs": error["current_outputs"],
337+
}
338+
self.add_message("execution_error", mes, broadcast=False)
339+
333340
# Next, remove the subsequent outputs since they will not be executed
334341
to_delete = []
335342
for o in self.outputs:
@@ -350,8 +357,8 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]):
350357
else:
351358
self.server.client_id = None
352359

353-
if self.server.client_id is not None:
354-
self.server.send_sync("execution_start", { "prompt_id": prompt_id}, self.server.client_id)
360+
self.status_messages = []
361+
self.add_message("execution_start", { "prompt_id": prompt_id}, broadcast=False)
355362

356363
with torch.inference_mode():
357364
#delete cached outputs if nodes don't exist for them
@@ -384,8 +391,9 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]):
384391
del d
385392

386393
comfy.model_management.cleanup_models()
387-
if self.server.client_id is not None:
388-
self.server.send_sync("execution_cached", { "nodes": list(current_outputs) , "prompt_id": prompt_id}, self.server.client_id)
394+
self.add_message("execution_cached",
395+
{ "nodes": list(current_outputs) , "prompt_id": prompt_id},
396+
broadcast=False)
389397
executed = set()
390398
output_node_id = None
391399
to_execute = []
@@ -401,8 +409,8 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]):
401409
# This call shouldn't raise anything if there's an error deep in
402410
# the actual SD code, instead it will report the node where the
403411
# error was raised
404-
success, error, ex = recursive_execute(self.server, prompt, self.outputs, output_node_id, extra_data, executed, prompt_id, self.outputs_ui, self.object_storage)
405-
if success is not True:
412+
self.success, error, ex = recursive_execute(self.server, prompt, self.outputs, output_node_id, extra_data, executed, prompt_id, self.outputs_ui, self.object_storage)
413+
if self.success is not True:
406414
self.handle_execution_error(prompt_id, prompt, current_outputs, executed, error, ex)
407415
break
408416

@@ -754,14 +762,27 @@ def get(self, timeout=None):
754762
self.server.queue_updated()
755763
return (item, i)
756764

757-
def task_done(self, item_id, outputs):
765+
class ExecutionStatus(NamedTuple):
766+
status_str: Literal['success', 'error']
767+
completed: bool
768+
messages: List[str]
769+
770+
def task_done(self, item_id, outputs,
771+
status: Optional['PromptQueue.ExecutionStatus']):
758772
with self.mutex:
759773
prompt = self.currently_running.pop(item_id)
760774
if len(self.history) > MAXIMUM_HISTORY_SIZE:
761775
self.history.pop(next(iter(self.history)))
762-
self.history[prompt[1]] = { "prompt": prompt, "outputs": {} }
763-
for o in outputs:
764-
self.history[prompt[1]]["outputs"][o] = outputs[o]
776+
777+
status_dict: dict|None = None
778+
if status is not None:
779+
status_dict = copy.deepcopy(status._asdict())
780+
781+
self.history[prompt[1]] = {
782+
"prompt": prompt,
783+
"outputs": copy.deepcopy(outputs),
784+
'status': status_dict,
785+
}
765786
self.server.queue_updated()
766787

767788
def get_current_queue(self):

main.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,12 @@ def prompt_worker(q, server):
110110

111111
e.execute(item[2], prompt_id, item[3], item[4])
112112
need_gc = True
113-
q.task_done(item_id, e.outputs_ui)
113+
q.task_done(item_id,
114+
e.outputs_ui,
115+
status=execution.PromptQueue.ExecutionStatus(
116+
status_str='success' if e.success else 'error',
117+
completed=e.success,
118+
messages=e.status_messages))
114119
if server.client_id is not None:
115120
server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id)
116121

0 commit comments

Comments
 (0)