Commit 5fe0dd79 authored by AUTOMATIC's avatar AUTOMATIC

rename CPU RNG to RNG source in settings, add infotext and parameters...

rename CPU RNG to RNG source in settings, add infotext and parameters copypaste support to RNG source
parent cb9571e3
......@@ -95,7 +95,7 @@ def randn(seed, shape):
from modules.shared import opts
torch.manual_seed(seed)
if opts.use_cpu_randn or device.type == 'mps':
if opts.randn_source == "CPU" or device.type == 'mps':
return torch.randn(shape, device=cpu).to(device)
return torch.randn(shape, device=device)
......@@ -103,7 +103,7 @@ def randn(seed, shape):
def randn_without_seed(shape):
from modules.shared import opts
if opts.use_cpu_randn or device.type == 'mps':
if opts.randn_source == "CPU" or device.type == 'mps':
return torch.randn(shape, device=cpu).to(device)
return torch.randn(shape, device=device)
......
......@@ -284,6 +284,10 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
restore_old_hires_fix_params(res)
# Missing RNG means the default was set, which is GPU RNG
if "RNG" not in res:
res["RNG"] = "GPU"
return res
......@@ -304,6 +308,7 @@ infotext_to_setting_name_mapping = [
('UniPC skip type', 'uni_pc_skip_type'),
('UniPC order', 'uni_pc_order'),
('UniPC lower order final', 'uni_pc_lower_order_final'),
('RNG', 'randn_source'),
]
......
......@@ -477,7 +477,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
"Clip skip": None if clip_skip <= 1 else clip_skip,
"ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
"Init image hash": getattr(p, 'init_img_hash', None)
"Init image hash": getattr(p, 'init_img_hash', None),
"RNG": (opts.randn_source if opts.randn_source != "GPU" else None)
}
generation_params.update(p.extra_generation_params)
......
......@@ -61,7 +61,8 @@ def store_latent(decoded):
class InterruptedException(BaseException):
pass
if opts.use_cpu_randn:
if opts.randn_source == "CPU":
import torchsde._brownian.brownian_interval
def torchsde_randn(size, dtype, device, seed):
......
......@@ -190,7 +190,7 @@ class TorchHijack:
if noise.shape == x.shape:
return noise
if opts.use_cpu_randn or x.device.type == 'mps':
if opts.randn_source == "CPU" or x.device.type == 'mps':
return torch.randn_like(x, device=devices.cpu).to(x.device)
else:
return torch.randn_like(x)
......
......@@ -334,7 +334,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }),
"CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
"upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
"use_cpu_randn": OptionInfo(False, "Use CPU for random number generation to make manual seeds generate the same image across platforms. This may change existing seeds."),
"randn_source": OptionInfo("GPU", "Random number generator source. Changes seeds drastically. Use CPU to produce the same picture across different vidocard vendors.", gr.Radio, {"choices": ["GPU", "CPU"]}),
}))
options_templates.update(options_section(('compatibility', "Compatibility"), {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment