Skip to content

Commit f9e55d8

Browse files
Only auto enable bf16 VAE on nvidia GPUs that actually support it.
1 parent 2395ae7 commit f9e55d8

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

comfy/model_management.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ def is_nvidia():
175175
if int(torch_version[0]) >= 2:
176176
if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
177177
ENABLE_PYTORCH_ATTENTION = True
178-
if torch.cuda.is_bf16_supported():
178+
if torch.cuda.is_bf16_supported() and torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8:
179179
VAE_DTYPE = torch.bfloat16
180180
if is_intel_xpu():
181181
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:

0 commit comments

Comments
 (0)