Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion app/app_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def get_settings(self, request):
with open(file) as f:
return json.load(f)
except:
logging.error(f"The user settings file is corrupted: {file}")
logging.error("The user settings file is corrupted: %s", file)
return {}
else:
return {}
Expand Down
2 changes: 1 addition & 1 deletion app/custom_node_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def safe_load_json_file(file_path: str) -> dict:
with open(file_path, "r", encoding="utf-8") as f:
return json.load(f)
except json.JSONDecodeError:
logging.error(f"Error loading {file_path}")
logging.error("Error loading %s", file_path)
return {}


Expand Down
4 changes: 2 additions & 2 deletions app/database/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def get_db_path():

def init_db():
db_url = args.database_url
logging.debug(f"Database URL: {db_url}")
logging.debug("Database URL: %s", db_url)
db_path = get_db_path()
db_exists = os.path.exists(db_path)

Expand Down Expand Up @@ -95,7 +95,7 @@ def init_db():

try:
command.upgrade(config, target_rev)
logging.info(f"Database upgraded from {current_rev} to {target_rev}")
logging.info("Database upgraded from %s to %s", current_rev, target_rev)
except Exception as e:
if backup_path:
# Restore the database from backup if upgrade fails
Expand Down
20 changes: 10 additions & 10 deletions app/frontend_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def get_required_frontend_version():
if line.startswith("comfyui-frontend-package=="):
version_str = line.split("==")[-1]
if not is_valid_version(version_str):
logging.error(f"Invalid version format in requirements.txt: {version_str}")
logging.error("Invalid version format in requirements.txt: %s", version_str)
return None
return version_str
logging.error("comfyui-frontend-package not found in requirements.txt")
Expand All @@ -62,7 +62,7 @@ def get_required_frontend_version():
logging.error("requirements.txt not found. Cannot determine required frontend version.")
return None
except Exception as e:
logging.error(f"Error reading requirements.txt: {e}")
logging.error("Error reading requirements.txt: %s", e)
return None


Expand All @@ -87,9 +87,9 @@ def check_frontend_version():
""".strip()
)
else:
logging.info("ComfyUI frontend version: {}".format(frontend_version_str))
logging.info("ComfyUI frontend version: %s", frontend_version_str)
except Exception as e:
logging.error(f"Failed to check frontend version: {e}")
logging.error("Failed to check frontend version: %s", e)


REQUEST_TIMEOUT = 10 # seconds
Expand Down Expand Up @@ -225,7 +225,7 @@ def get_required_templates_version(cls) -> str:
if line.startswith("comfyui-workflow-templates=="):
version_str = line.split("==")[-1]
if not is_valid_version(version_str):
logging.error(f"Invalid templates version format in requirements.txt: {version_str}")
logging.error("Invalid templates version format in requirements.txt: %s", version_str)
return None
return version_str
logging.error("comfyui-workflow-templates not found in requirements.txt")
Expand All @@ -234,7 +234,7 @@ def get_required_templates_version(cls) -> str:
logging.error("requirements.txt not found. Cannot determine required templates version.")
return None
except Exception as e:
logging.error(f"Error reading requirements.txt: {e}")
logging.error("Error reading requirements.txt: %s", e)
return None

@classmethod
Expand Down Expand Up @@ -282,7 +282,7 @@ def template_asset_map(cls) -> Optional[Dict[str, str]]:
try:
template_entries = list(iter_templates())
except Exception as exc:
logging.error(f"Failed to enumerate workflow templates: {exc}")
logging.error("Failed to enumerate workflow templates: %s", exc)
return None

asset_map: Dict[str, str] = {}
Expand All @@ -293,7 +293,7 @@ def template_asset_map(cls) -> Optional[Dict[str, str]]:
entry.template_id, asset.filename
)
except Exception as exc:
logging.error(f"Failed to resolve template asset paths: {exc}")
logging.error("Failed to resolve template asset paths: %s", exc)
return None

if not asset_map:
Expand Down Expand Up @@ -390,12 +390,12 @@ def init_frontend_unsafe(
)
if os.path.exists(expected_path):
logging.info(
f"Using existing copy of specific frontend version tag: {repo_owner}/{repo_name}@{version}"
"Using existing copy of specific frontend version tag: %s/%s@%s", repo_owner, repo_name, version
)
return expected_path

logging.info(
f"Initializing frontend: {repo_owner}/{repo_name}@{version}, requesting version details from GitHub..."
"Initializing frontend: %s/%s@%s, requesting version details from GitHub...", repo_owner, repo_name, version
)

provider = provider or FrontEndProvider(repo_owner, repo_name)
Expand Down
4 changes: 2 additions & 2 deletions app/model_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,15 +144,15 @@ def recursive_search_models_(self, directory: str, pathIndex: int) -> tuple[list
result.append(file_info)

except Exception as e:
logging.warning(f"Warning: Unable to access {file_name}. Error: {e}. Skipping this file.")
logging.warning("Warning: Unable to access %s. Error: %s. Skipping this file.", file_name, e)
continue

for d in subdirs:
path: str = os.path.join(dirpath, d)
try:
dirs[path] = os.path.getmtime(path)
except FileNotFoundError:
logging.warning(f"Warning: Unable to access {path}. Skipping this path.")
logging.warning("Warning: Unable to access %s. Skipping this path.", path)
continue

return result, dirs, time.perf_counter()
Expand Down
12 changes: 6 additions & 6 deletions app/user_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ async def list_userdata_v2(request):
try:
requested_rel_path = parse.unquote(requested_rel_path)
except Exception as e:
logging.warning(f"Failed to decode path parameter: {requested_rel_path}, Error: {e}")
logging.warning("Failed to decode path parameter: %s, Error: %s", requested_rel_path, e)
return web.Response(status=400, text="Invalid characters in path parameter")


Expand All @@ -256,7 +256,7 @@ async def list_userdata_v2(request):

except KeyError as e:
# Invalid user detected by get_request_user_id inside get_request_user_filepath
logging.warning(f"Access denied for user: {e}")
logging.warning("Access denied for user: %s", e)
return web.Response(status=403, text="Invalid user specified in request")


Expand Down Expand Up @@ -304,11 +304,11 @@ async def list_userdata_v2(request):
entry_info["size"] = stats.st_size
entry_info["modified"] = stats.st_mtime
except OSError as stat_error:
logging.warning(f"Could not stat file {file_path}: {stat_error}")
logging.warning("Could not stat file %s: %s", file_path, stat_error)
pass # Include file with available info
results.append(entry_info)
except OSError as e:
logging.error(f"Error listing directory {target_abs_path}: {e}")
logging.error("Error listing directory %s: %s", target_abs_path, e)
return web.Response(status=500, text="Error reading directory contents")

# Sort results alphabetically, directories first then files
Expand Down Expand Up @@ -380,7 +380,7 @@ async def post_userdata(request):
with open(path, "wb") as f:
f.write(body)
except OSError as e:
logging.warning(f"Error saving file '{path}': {e}")
logging.warning("Error saving file '%s': %s", path, e)
return web.Response(
status=400,
reason="Invalid filename. Please avoid special characters like :\\/*?\"<>|"
Expand Down Expand Up @@ -444,7 +444,7 @@ async def move_userdata(request):
if not overwrite and os.path.exists(dest):
return web.Response(status=409, text="File already exists")

logging.info(f"moving '{source}' -> '{dest}'")
logging.info("moving '%s' -> '%s'", source, dest)
shutil.move(source, dest)

user_path = self.get_request_user_filepath(request, None)
Expand Down
4 changes: 2 additions & 2 deletions comfy/audio_encoders/audio_encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,8 @@ def load_audio_encoder_from_sd(sd, prefix=""):
audio_encoder = AudioEncoderModel(config)
m, u = audio_encoder.load_sd(sd)
if len(m) > 0:
logging.warning("missing audio encoder: {}".format(m))
logging.warning("missing audio encoder: %s", m)
if len(u) > 0:
logging.warning("unexpected audio encoder: {}".format(u))
logging.warning("unexpected audio encoder: %s", u)

return audio_encoder
2 changes: 1 addition & 1 deletion comfy/clip_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
clip = ClipVisionModel(json_config)
m, u = clip.load_sd(sd)
if len(m) > 0:
logging.warning("missing clip vision: {}".format(m))
logging.warning("missing clip vision: %s", m)
u = set(u)
keys = list(sd.keys())
for k in keys:
Expand Down
6 changes: 3 additions & 3 deletions comfy/context_windows.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,9 @@ def __init__(self, context_schedule: ContextSchedule, fuse_method: ContextFuseMe
def should_use_context(self, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]) -> bool:
# for now, assume first dim is batch - should have stored on BaseModel in actual implementation
if x_in.size(self.dim) > self.context_length:
logging.info(f"Using context windows {self.context_length} with overlap {self.context_overlap} for {x_in.size(self.dim)} frames.")
logging.info("Using context windows %d with overlap %d for %d frames.", self.context_length, self.context_overlap, x_in.size(self.dim))
if self.cond_retain_index_list:
logging.info(f"Retaining original cond for indexes: {self.cond_retain_index_list}")
logging.info("Retaining original cond for indexes: %s", self.cond_retain_index_list)
return True
return False

Expand All @@ -143,7 +143,7 @@ def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: Inde
# if multiple conds, split based on primary region
if self.split_conds_to_windows and len(cond_in) > 1:
region = window.get_region_index(len(cond_in))
logging.info(f"Splitting conds to windows; using region {region} for window {window.index_list[0]}-{window.index_list[-1]} with center ratio {window.center_ratio:.3f}")
logging.info("Splitting conds to windows; using region %d for window %d-%d with center ratio %.3f", region, window.index_list[0], window.index_list[-1], window.center_ratio)
cond_in = [cond_in[region]]
# cond object is a list containing a dict - outer list is irrelevant, so just loop through it
for actual_cond in cond_in:
Expand Down
16 changes: 8 additions & 8 deletions comfy/controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,10 +442,10 @@ def controlnet_load_state_dict(control_model, sd):
missing, unexpected = control_model.load_state_dict(sd, strict=False)

if len(missing) > 0:
logging.warning("missing controlnet keys: {}".format(missing))
logging.warning("missing controlnet keys: %s", missing)

if len(unexpected) > 0:
logging.debug("unexpected controlnet keys: {}".format(unexpected))
logging.debug("unexpected controlnet keys: %s", unexpected)
return control_model


Expand Down Expand Up @@ -668,7 +668,7 @@ def load_controlnet_state_dict(state_dict, model=None, model_options={}):

leftover_keys = controlnet_data.keys()
if len(leftover_keys) > 0:
logging.warning("leftover keys: {}".format(leftover_keys))
logging.warning("leftover keys: %s", leftover_keys)
controlnet_data = new_sd
elif "controlnet_blocks.0.weight" in controlnet_data:
if "double_blocks.0.img_attn.norm.key_norm.scale" in controlnet_data:
Expand Down Expand Up @@ -753,10 +753,10 @@ class WeightsLoader(torch.nn.Module):
missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)

if len(missing) > 0:
logging.warning("missing controlnet keys: {}".format(missing))
logging.warning("missing controlnet keys: %s", missing)

if len(unexpected) > 0:
logging.debug("unexpected controlnet keys: {}".format(unexpected))
logging.debug("unexpected controlnet keys: %s", unexpected)

global_average_pooling = model_options.get("global_average_pooling", False)
control = ControlNet(control_model, global_average_pooling=global_average_pooling, load_device=load_device, manual_cast_dtype=manual_cast_dtype)
Expand All @@ -771,7 +771,7 @@ def load_controlnet(ckpt_path, model=None, model_options={}):

cnet = load_controlnet_state_dict(comfy.utils.load_torch_file(ckpt_path, safe_load=True), model=model, model_options=model_options)
if cnet is None:
logging.error("error checkpoint does not contain controlnet or t2i adapter data {}".format(ckpt_path))
logging.error("error checkpoint does not contain controlnet or t2i adapter data %s", ckpt_path)
return cnet

class T2IAdapter(ControlBase):
Expand Down Expand Up @@ -876,9 +876,9 @@ def load_t2i_adapter(t2i_data, model_options={}): #TODO: model_options

missing, unexpected = model_ad.load_state_dict(t2i_data)
if len(missing) > 0:
logging.warning("t2i missing {}".format(missing))
logging.warning("t2i missing", missing)

if len(unexpected) > 0:
logging.debug("t2i unexpected {}".format(unexpected))
logging.debug("t2i unexpected", unexpected)

return T2IAdapter(model_ad, model_ad.input_channels, compression_ratio, upscale_algorithm)
2 changes: 1 addition & 1 deletion comfy/diffusers_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def convert_vae_state_dict(vae_state_dict):
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
logging.debug(f"Reshaping {k} for SD format")
logging.debug("Reshaping %s for SD format", k)
new_state_dict[k] = reshape_weight_for_sd(v, conv3d=conv3d)
return new_state_dict

Expand Down
2 changes: 1 addition & 1 deletion comfy/extra_samplers/uni_pc.py
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,7 @@ def multistep_uni_pc_update(self, x, model_prev_list, t_prev_list, t, order, **k
return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs)

def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True):
logging.info(f'using unified predictor-corrector with order {order} (solver type: vary coeff)')
logging.info("using unified predictor-corrector with order %s (solver type: vary coeff)", order)
ns = self.noise_schedule
assert order <= len(model_prev_list)

Expand Down
2 changes: 1 addition & 1 deletion comfy/hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ def load_hook_lora_for_models(model: ModelPatcher, clip: CLIP, lora: dict[str, t
k1 = set(k1)
for x in loaded:
if (x not in k) and (x not in k1):
logging.warning(f"NOT LOADED {x}")
logging.warning("NOT LOADED %s", x)
return (new_modelpatcher, new_clip, hook_group)

def _combine_hooks_from_values(c_dict: dict[str, HookGroup], values: dict[str, HookGroup], cache: dict[tuple[HookGroup, HookGroup], HookGroup]):
Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/cosmos/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ class TimestepEmbedding(nn.Module):
def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, weight_args={}, operations=None):
super().__init__()
logging.debug(
f"Using AdaLN LoRA Flag: {use_adaln_lora}. We enable bias if no AdaLN LoRA for backward compatibility."
"Using AdaLN LoRA Flag: %s. We enable bias if no AdaLN LoRA for backward compatibility.", use_adaln_lora
)
self.linear_1 = operations.Linear(in_features, out_features, bias=not use_adaln_lora, **weight_args)
self.activation = nn.SiLU()
Expand Down
8 changes: 2 additions & 6 deletions comfy/ldm/cosmos/cosmos_tokenizer/layers3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -632,9 +632,7 @@ def __init__(
curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1)
self.z_shape = (1, z_channels, curr_res, curr_res)
logging.debug(
"Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)
)
"Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape)
)

# z to block_in
Expand Down Expand Up @@ -929,9 +927,7 @@ def __init__(
curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1)
self.z_shape = (1, z_channels, curr_res, curr_res)
logging.debug(
"Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)
)
"Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape)
)

# z to block_in
Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/cosmos/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def build_pos_embed(self, device=None, dtype=None):
else:
raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}")

logging.debug(f"Building positional embedding with {self.pos_emb_cls} class, impl {cls_type}")
logging.debug("Building positional embedding with %s class, impl %s", self.pos_emb_cls, cls_type)
kwargs = dict(
model_channels=self.model_channels,
len_h=self.max_img_h // self.patch_spatial,
Expand Down
17 changes: 12 additions & 5 deletions comfy/ldm/cosmos/predict2.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,13 +118,20 @@ def __init__(
operations=None,
) -> None:
super().__init__()

context_dim = query_dim if context_dim is None else context_dim

logging.debug(
f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
f"{n_heads} heads with a dimension of {head_dim}."
"Setting up %s. Query dim is %d, context_dim is %d and using "
"%d heads with a dimension of %d.",
self.__class__.__name__,
query_dim,
context_dim,
n_heads,
head_dim,
)
self.is_selfattn = context_dim is None # self attention

context_dim = query_dim if context_dim is None else context_dim
inner_dim = head_dim * n_heads

self.n_heads = n_heads
Expand Down Expand Up @@ -226,7 +233,7 @@ class TimestepEmbedding(nn.Module):
def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, device=None, dtype=None, operations=None):
super().__init__()
logging.debug(
f"Using AdaLN LoRA Flag: {use_adaln_lora}. We enable bias if no AdaLN LoRA for backward compatibility."
"Using AdaLN LoRA Flag: %s. We enable bias if no AdaLN LoRA for backward compatibility.", use_adaln_lora
)
self.in_dim = in_features
self.out_dim = out_features
Expand Down Expand Up @@ -718,7 +725,7 @@ def build_pos_embed(self, device=None, dtype=None) -> None:
else:
raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}")

logging.debug(f"Building positional embedding with {self.pos_emb_cls} class, impl {cls_type}")
logging.debug("Building positional embedding with %s class, impl %s", self.pos_emb_cls, cls_type)
kwargs = dict(
model_channels=self.model_channels,
len_h=self.max_img_h // self.patch_spatial,
Expand Down
Loading