Commit 547778b1 authored by AUTOMATIC1111's avatar AUTOMATIC1111

possibly make NaN check cheaper

parent 194c2620
......@@ -243,22 +243,22 @@ def test_for_nans(x, where):
if shared.cmd_opts.disable_nan_check:
return
if not torch.all(torch.isnan(x)).item():
if not torch.isnan(x[(0, ) * len(x.shape)]):
return
if where == "unet":
message = "A tensor with all NaNs was produced in Unet."
message = "A tensor with NaNs was produced in Unet."
if not shared.cmd_opts.no_half:
message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this."
elif where == "vae":
message = "A tensor with all NaNs was produced in VAE."
message = "A tensor with NaNs was produced in VAE."
if not shared.cmd_opts.no_half and not shared.cmd_opts.no_half_vae:
message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this."
else:
message = "A tensor with all NaNs was produced."
message = "A tensor with NaNs was produced."
message += " Use --disable-nan-check commandline argument to disable this check."
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment