We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 2395ae7 commit f9e55d8Copy full SHA for f9e55d8
comfy/model_management.py
@@ -175,7 +175,7 @@ def is_nvidia():
175
if int(torch_version[0]) >= 2:
176
if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
177
ENABLE_PYTORCH_ATTENTION = True
178
- if torch.cuda.is_bf16_supported():
+ if torch.cuda.is_bf16_supported() and torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8:
179
VAE_DTYPE = torch.bfloat16
180
if is_intel_xpu():
181
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
0 commit comments