Commit 6ac24731 authored by AUTOMATIC's avatar AUTOMATIC

Merge branch 'release_candidate' into dev

parents 59419bd6 dbc88c96
import gradio as gr
from modules import shared from modules import shared
shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), { shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), {
"canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas"), "canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
"canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
"canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
"canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "), "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap ( Technical button, neededs for testing )"), "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
"canvas_swap_controls": shared.OptionInfo(False, "Swap hotkey combinations for Zoom and Adjust brush resize"), "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
})) }))
.tooltip-info { .canvas-tooltip-info {
position: absolute; position: absolute;
top: 10px; top: 10px;
left: 10px; left: 10px;
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
z-index: 100; z-index: 100;
} }
.tooltip-info::after { .canvas-tooltip-info::after {
content: ''; content: '';
display: block; display: block;
width: 2px; width: 2px;
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
margin-top: 2px; margin-top: 2px;
} }
.tooltip-info::before { .canvas-tooltip-info::before {
content: ''; content: '';
display: block; display: block;
width: 2px; width: 2px;
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
background-color: white; background-color: white;
} }
.tooltip-content { .canvas-tooltip-content {
display: none; display: none;
background-color: #f9f9f9; background-color: #f9f9f9;
color: #333; color: #333;
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
z-index: 100; z-index: 100;
} }
.tooltip:hover .tooltip-content { .canvas-tooltip:hover .canvas-tooltip-content {
display: block; display: block;
animation: fadeIn 0.5s; animation: fadeIn 0.5s;
opacity: 1; opacity: 1;
......
...@@ -15,7 +15,7 @@ var titles = { ...@@ -15,7 +15,7 @@ var titles = {
"CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results", "CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results",
"Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result", "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result",
"\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time", "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time",
"\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed", "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomized",
"\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.", "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
"\u{1f4c2}": "Open images output directory", "\u{1f4c2}": "Open images output directory",
"\u{1f4be}": "Save style", "\u{1f4be}": "Save style",
...@@ -112,7 +112,7 @@ var titles = { ...@@ -112,7 +112,7 @@ var titles = {
"Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders.", "Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders.",
"Multiplier for extra networks": "When adding extra network such as Hypernetwork or Lora to prompt, use this multiplier for it.", "Multiplier for extra networks": "When adding extra network such as Hypernetwork or Lora to prompt, use this multiplier for it.",
"Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.", "Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.",
"Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order lsited.", "Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order listed.",
"Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction." "Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction."
}; };
......
...@@ -20,9 +20,7 @@ codeformer = None ...@@ -20,9 +20,7 @@ codeformer = None
def setup_model(dirname): def setup_model(dirname):
global model_path os.makedirs(model_path, exist_ok=True)
if not os.path.exists(model_path):
os.makedirs(model_path)
path = modules.paths.paths.get("CodeFormer", None) path = modules.paths.paths.get("CodeFormer", None)
if path is None: if path is None:
......
...@@ -7,8 +7,7 @@ from modules.paths_internal import extensions_dir, extensions_builtin_dir, scrip ...@@ -7,8 +7,7 @@ from modules.paths_internal import extensions_dir, extensions_builtin_dir, scrip
extensions = [] extensions = []
if not os.path.exists(extensions_dir): os.makedirs(extensions_dir, exist_ok=True)
os.makedirs(extensions_dir)
def active(): def active():
......
...@@ -357,6 +357,7 @@ infotext_to_setting_name_mapping = [ ...@@ -357,6 +357,7 @@ infotext_to_setting_name_mapping = [
('Token merging ratio hr', 'token_merging_ratio_hr'), ('Token merging ratio hr', 'token_merging_ratio_hr'),
('RNG', 'randn_source'), ('RNG', 'randn_source'),
('NGMS', 's_min_uncond'), ('NGMS', 's_min_uncond'),
('Pad conds', 'pad_cond_uncond'),
] ]
......
...@@ -70,11 +70,8 @@ gfpgan_constructor = None ...@@ -70,11 +70,8 @@ gfpgan_constructor = None
def setup_model(dirname): def setup_model(dirname):
global model_path
if not os.path.exists(model_path):
os.makedirs(model_path)
try: try:
os.makedirs(model_path, exist_ok=True)
from gfpgan import GFPGANer from gfpgan import GFPGANer
from facexlib import detection, parsing # noqa: F401 from facexlib import detection, parsing # noqa: F401
global user_path global user_path
......
...@@ -95,8 +95,7 @@ def cleanup_models(): ...@@ -95,8 +95,7 @@ def cleanup_models():
def move_files(src_path: str, dest_path: str, ext_filter: str = None): def move_files(src_path: str, dest_path: str, ext_filter: str = None):
try: try:
if not os.path.exists(dest_path): os.makedirs(dest_path, exist_ok=True)
os.makedirs(dest_path)
if os.path.exists(src_path): if os.path.exists(src_path):
for file in os.listdir(src_path): for file in os.listdir(src_path):
fullpath = os.path.join(src_path, file) fullpath = os.path.join(src_path, file)
......
...@@ -95,8 +95,7 @@ except Exception: ...@@ -95,8 +95,7 @@ except Exception:
def setup_model(): def setup_model():
if not os.path.exists(model_path): os.makedirs(model_path, exist_ok=True)
os.makedirs(model_path)
enable_midas_autodownload() enable_midas_autodownload()
......
...@@ -69,6 +69,7 @@ class CFGDenoiser(torch.nn.Module): ...@@ -69,6 +69,7 @@ class CFGDenoiser(torch.nn.Module):
self.init_latent = None self.init_latent = None
self.step = 0 self.step = 0
self.image_cfg_scale = None self.image_cfg_scale = None
self.padded_cond_uncond = False
def combine_denoised(self, x_out, conds_list, uncond, cond_scale): def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
denoised_uncond = x_out[-uncond.shape[0]:] denoised_uncond = x_out[-uncond.shape[0]:]
...@@ -133,15 +134,17 @@ class CFGDenoiser(torch.nn.Module): ...@@ -133,15 +134,17 @@ class CFGDenoiser(torch.nn.Module):
x_in = x_in[:-batch_size] x_in = x_in[:-batch_size]
sigma_in = sigma_in[:-batch_size] sigma_in = sigma_in[:-batch_size]
# TODO add infotext entry self.padded_cond_uncond = False
if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]:
empty = shared.sd_model.cond_stage_model_empty_prompt empty = shared.sd_model.cond_stage_model_empty_prompt
num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1]
if num_repeats < 0: if num_repeats < 0:
tensor = torch.cat([tensor, empty.repeat((tensor.shape[0], -num_repeats, 1))], axis=1) tensor = torch.cat([tensor, empty.repeat((tensor.shape[0], -num_repeats, 1))], axis=1)
self.padded_cond_uncond = True
elif num_repeats > 0: elif num_repeats > 0:
uncond = torch.cat([uncond, empty.repeat((uncond.shape[0], num_repeats, 1))], axis=1) uncond = torch.cat([uncond, empty.repeat((uncond.shape[0], num_repeats, 1))], axis=1)
self.padded_cond_uncond = True
if tensor.shape[1] == uncond.shape[1] or skip_uncond: if tensor.shape[1] == uncond.shape[1] or skip_uncond:
if is_edit_model: if is_edit_model:
...@@ -405,6 +408,9 @@ class KDiffusionSampler: ...@@ -405,6 +408,9 @@ class KDiffusionSampler:
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
if self.model_wrap_cfg.padded_cond_uncond:
p.extra_generation_params["Pad conds"] = True
return samples return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
...@@ -438,5 +444,8 @@ class KDiffusionSampler: ...@@ -438,5 +444,8 @@ class KDiffusionSampler:
's_min_uncond': self.s_min_uncond 's_min_uncond': self.s_min_uncond
}, disable=False, callback=self.callback_state, **extra_params_kwargs)) }, disable=False, callback=self.callback_state, **extra_params_kwargs))
if self.model_wrap_cfg.padded_cond_uncond:
p.extra_generation_params["Pad conds"] = True
return samples return samples
...@@ -409,7 +409,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { ...@@ -409,7 +409,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
"CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP nrtwork; 1 ignores none, 2 ignores one layer"), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
"upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
"randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"),
})) }))
......
...@@ -298,8 +298,7 @@ def download_and_cache_models(dirname): ...@@ -298,8 +298,7 @@ def download_and_cache_models(dirname):
download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true' download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
model_file_name = 'face_detection_yunet.onnx' model_file_name = 'face_detection_yunet.onnx'
if not os.path.exists(dirname): os.makedirs(dirname, exist_ok=True)
os.makedirs(dirname)
cache_file = os.path.join(dirname, model_file_name) cache_file = os.path.join(dirname, model_file_name)
if not os.path.exists(cache_file): if not os.path.exists(cache_file):
......
...@@ -325,6 +325,11 @@ def normalize_git_url(url): ...@@ -325,6 +325,11 @@ def normalize_git_url(url):
def install_extension_from_url(dirname, url, branch_name=None): def install_extension_from_url(dirname, url, branch_name=None):
check_access() check_access()
if isinstance(dirname, str):
dirname = dirname.strip()
if isinstance(url, str):
url = url.strip()
assert url, 'No URL specified' assert url, 'No URL specified'
if dirname is None or dirname == "": if dirname is None or dirname == "":
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment