diff --git a/comfy/model_management.py b/comfy/model_management.py index 731fb584..15800a8e 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1121,9 +1121,8 @@ def soft_empty_cache(force=False): elif is_ascend_npu(): torch.npu.empty_cache() elif torch.cuda.is_available(): - if force or is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda - torch.cuda.empty_cache() - torch.cuda.ipc_collect() + torch.cuda.empty_cache() + torch.cuda.ipc_collect() def unload_all_models(): free_memory(1e30, get_torch_device())