Commit 8dbe793a authored by AUTOMATIC1111's avatar AUTOMATIC1111 Committed by GitHub

Merge branch 'master' into extra-network-info

parents 8e3ced73 70615448
...@@ -178,6 +178,7 @@ def load_loras(names, multipliers=None): ...@@ -178,6 +178,7 @@ def load_loras(names, multipliers=None):
def lora_forward(module, input, res): def lora_forward(module, input, res):
input = devices.cond_cast_unet(input)
if len(loaded_loras) == 0: if len(loaded_loras) == 0:
return res return res
......
...@@ -89,22 +89,15 @@ function checkBrackets(evt, textArea, counterElt) { ...@@ -89,22 +89,15 @@ function checkBrackets(evt, textArea, counterElt) {
function setupBracketChecking(id_prompt, id_counter){ function setupBracketChecking(id_prompt, id_counter){
var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
var counter = gradioApp().getElementById(id_counter) var counter = gradioApp().getElementById(id_counter)
textarea.addEventListener("input", function(evt){ textarea.addEventListener("input", function(evt){
checkBrackets(evt, textarea, counter) checkBrackets(evt, textarea, counter)
}); });
} }
var shadowRootLoaded = setInterval(function() { onUiLoaded(function(){
var shadowRoot = document.querySelector('gradio-app').shadowRoot;
if(! shadowRoot) return false;
var shadowTextArea = shadowRoot.querySelectorAll('#txt2img_prompt > label > textarea');
if(shadowTextArea.length < 1) return false;
clearInterval(shadowRootLoaded);
setupBracketChecking('txt2img_prompt', 'txt2img_token_counter') setupBracketChecking('txt2img_prompt', 'txt2img_token_counter')
setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter') setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter')
setupBracketChecking('img2img_prompt', 'imgimg_token_counter') setupBracketChecking('img2img_prompt', 'img2img_token_counter')
setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter') setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter')
}, 1000); })
\ No newline at end of file
...@@ -635,4 +635,30 @@ SOFTWARE. ...@@ -635,4 +635,30 @@ SOFTWARE.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
</pre>
<h2><a href="https://github.com/explosion/curated-transformers/blob/main/LICENSE">Curated transformers</a></h2>
<small>The MPS workaround for nn.Linear on macOS 13.2.X is based on the MPS workaround for nn.Linear created by danieldk for Curated transformers</small>
<pre>
The MIT License (MIT)
Copyright (C) 2021 ExplosionAI GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
</pre> </pre>
\ No newline at end of file
...@@ -43,7 +43,7 @@ contextMenuInit = function(){ ...@@ -43,7 +43,7 @@ contextMenuInit = function(){
}) })
gradioApp().getRootNode().appendChild(contextMenu) gradioApp().appendChild(contextMenu)
let menuWidth = contextMenu.offsetWidth + 4; let menuWidth = contextMenu.offsetWidth + 4;
let menuHeight = contextMenu.offsetHeight + 4; let menuHeight = contextMenu.offsetHeight + 4;
......
function keyupEditAttention(event){ function keyupEditAttention(event){
let target = event.originalTarget || event.composedPath()[0]; let target = event.originalTarget || event.composedPath()[0];
if (!target.matches("[id*='_toprow'] textarea.gr-text-input[placeholder]")) return; if (! target.matches("[id*='_toprow'] [id*='_prompt'] textarea")) return;
if (! (event.metaKey || event.ctrlKey)) return; if (! (event.metaKey || event.ctrlKey)) return;
let isPlus = event.key == "ArrowUp" let isPlus = event.key == "ArrowUp"
......
...@@ -141,3 +141,39 @@ function extraNetworksShowMetadata(event, text){ ...@@ -141,3 +141,39 @@ function extraNetworksShowMetadata(event, text){
event.stopPropagation() event.stopPropagation()
} }
function requestGet(url, data, handler, errorHandler){
var xhr = new XMLHttpRequest();
var args = Object.keys(data).map(function(k){ return encodeURIComponent(k) + '=' + encodeURIComponent(data[k]) }).join('&')
xhr.open("GET", url + "?" + args, true);
xhr.onreadystatechange = function () {
if (xhr.readyState === 4) {
if (xhr.status === 200) {
try {
var js = JSON.parse(xhr.responseText);
handler(js)
} catch (error) {
console.error(error);
errorHandler()
}
} else{
errorHandler()
}
}
};
var js = JSON.stringify(data);
xhr.send(js);
}
function extraNetworksRequestMetadata(extraPage, cardName){
showError = function(){ extraNetworksShowMetadata("there was an error getting metadata"); }
requestGet("./sd_extra_networks/metadata", {"page": extraPage, "item": cardName}, function(data){
if(data && data.metadata){
extraNetworksShowMetadata(data.metadata)
} else{
showError()
}
}, showError)
}
...@@ -18,7 +18,7 @@ titles = { ...@@ -18,7 +18,7 @@ titles = {
"\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.", "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
"\u{1f4c2}": "Open images output directory", "\u{1f4c2}": "Open images output directory",
"\u{1f4be}": "Save style", "\u{1f4be}": "Save style",
"\u{1f5d1}": "Clear prompt", "\u{1f5d1}\ufe0f": "Clear prompt",
"\u{1f4cb}": "Apply selected styles to current prompt", "\u{1f4cb}": "Apply selected styles to current prompt",
"\u{1f4d2}": "Paste available values into the field", "\u{1f4d2}": "Paste available values into the field",
"\u{1f3b4}": "Show extra networks", "\u{1f3b4}": "Show extra networks",
...@@ -40,8 +40,7 @@ titles = { ...@@ -40,8 +40,7 @@ titles = {
"Inpaint at full resolution": "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image", "Inpaint at full resolution": "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image",
"Denoising strength": "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.", "Denoising strength": "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.",
"Denoising strength change factor": "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.",
"Skip": "Stop processing current image and continue processing.", "Skip": "Stop processing current image and continue processing.",
"Interrupt": "Stop processing images and return any results accumulated so far.", "Interrupt": "Stop processing images and return any results accumulated so far.",
"Save": "Write image to a directory (default - log/images) and generation parameters into csv file.", "Save": "Write image to a directory (default - log/images) and generation parameters into csv file.",
...@@ -71,8 +70,10 @@ titles = { ...@@ -71,8 +70,10 @@ titles = {
"Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg],[prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.", "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg],[prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
"Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle", "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle",
"Loopback": "Process an image, use it as an input, repeat.", "Loopback": "Performs img2img processing multiple times. Output images are used as input for the next loop.",
"Loops": "How many times to repeat processing an image and using it as input for the next iteration", "Loops": "How many times to process an image. Each output is used as the input of the next loop. If set to 1, behavior will be as if this script were not used.",
"Final denoising strength": "The denoising strength for the final loop of each image in the batch.",
"Denoising strength curve": "The denoising curve controls the rate of denoising strength change each loop. Aggressive: Most of the change will happen towards the start of the loops. Linear: Change will be constant through all loops. Lazy: Most of the change will happen towards the end of the loops.",
"Style 1": "Style to apply; styles have components for both positive and negative prompts and apply to both", "Style 1": "Style to apply; styles have components for both positive and negative prompts and apply to both",
"Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both", "Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both",
......
...@@ -50,7 +50,7 @@ function updateOnBackgroundChange() { ...@@ -50,7 +50,7 @@ function updateOnBackgroundChange() {
} }
function modalImageSwitch(offset) { function modalImageSwitch(offset) {
var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all") var allgalleryButtons = gradioApp().querySelectorAll(".gradio-gallery .thumbnail-item")
var galleryButtons = [] var galleryButtons = []
allgalleryButtons.forEach(function(elem) { allgalleryButtons.forEach(function(elem) {
if (elem.parentElement.offsetParent) { if (elem.parentElement.offsetParent) {
...@@ -59,7 +59,7 @@ function modalImageSwitch(offset) { ...@@ -59,7 +59,7 @@ function modalImageSwitch(offset) {
}) })
if (galleryButtons.length > 1) { if (galleryButtons.length > 1) {
var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2") var allcurrentButtons = gradioApp().querySelectorAll(".gradio-gallery .thumbnail-item.selected")
var currentButton = null var currentButton = null
allcurrentButtons.forEach(function(elem) { allcurrentButtons.forEach(function(elem) {
if (elem.parentElement.offsetParent) { if (elem.parentElement.offsetParent) {
...@@ -136,37 +136,29 @@ function modalKeyHandler(event) { ...@@ -136,37 +136,29 @@ function modalKeyHandler(event) {
} }
} }
function showGalleryImage() { function setupImageForLightbox(e) {
setTimeout(function() { if (e.dataset.modded)
fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain') return;
if (fullImg_preview != null) { e.dataset.modded = true;
fullImg_preview.forEach(function function_name(e) { e.style.cursor='pointer'
if (e.dataset.modded) e.style.userSelect='none'
return;
e.dataset.modded = true; var isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1
if(e && e.parentElement.tagName == 'DIV'){
e.style.cursor='pointer' // For Firefox, listening on click first switched to next image then shows the lightbox.
e.style.userSelect='none' // If you know how to fix this without switching to mousedown event, please.
// For other browsers the event is click to make it possiblr to drag picture.
var isFirefox = isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1 var event = isFirefox ? 'mousedown' : 'click'
// For Firefox, listening on click first switched to next image then shows the lightbox. e.addEventListener(event, function (evt) {
// If you know how to fix this without switching to mousedown event, please. if(!opts.js_modal_lightbox || evt.button != 0) return;
// For other browsers the event is click to make it possiblr to drag picture.
var event = isFirefox ? 'mousedown' : 'click' modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
evt.preventDefault()
e.addEventListener(event, function (evt) { showModal(evt)
if(!opts.js_modal_lightbox || evt.button != 0) return; }, true);
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
evt.preventDefault()
showModal(evt)
}, true);
}
});
}
}, 100);
} }
function modalZoomSet(modalImage, enable) { function modalZoomSet(modalImage, enable) {
...@@ -199,21 +191,21 @@ function modalTileImageToggle(event) { ...@@ -199,21 +191,21 @@ function modalTileImageToggle(event) {
} }
function galleryImageHandler(e) { function galleryImageHandler(e) {
if (e && e.parentElement.tagName == 'BUTTON') { //if (e && e.parentElement.tagName == 'BUTTON') {
e.onclick = showGalleryImage; e.onclick = showGalleryImage;
} //}
} }
onUiUpdate(function() { onUiUpdate(function() {
fullImg_preview = gradioApp().querySelectorAll('img.w-full') fullImg_preview = gradioApp().querySelectorAll('.gradio-gallery > div > img')
if (fullImg_preview != null) { if (fullImg_preview != null) {
fullImg_preview.forEach(galleryImageHandler); fullImg_preview.forEach(setupImageForLightbox);
} }
updateOnBackgroundChange(); updateOnBackgroundChange();
}) })
document.addEventListener("DOMContentLoaded", function() { document.addEventListener("DOMContentLoaded", function() {
const modalFragment = document.createDocumentFragment(); //const modalFragment = document.createDocumentFragment();
const modal = document.createElement('div') const modal = document.createElement('div')
modal.onclick = closeModal; modal.onclick = closeModal;
modal.id = "lightboxModal"; modal.id = "lightboxModal";
...@@ -277,9 +269,9 @@ document.addEventListener("DOMContentLoaded", function() { ...@@ -277,9 +269,9 @@ document.addEventListener("DOMContentLoaded", function() {
modal.appendChild(modalNext) modal.appendChild(modalNext)
gradioApp().appendChild(modal)
gradioApp().getRootNode().appendChild(modal)
document.body.appendChild(modalFragment); document.body.appendChild(modal);
}); });
// code related to showing and updating progressbar shown as the image is being made // code related to showing and updating progressbar shown as the image is being made
galleries = {}
storedGallerySelections = {}
galleryObservers = {}
function rememberGallerySelection(id_gallery){ function rememberGallerySelection(id_gallery){
storedGallerySelections[id_gallery] = getGallerySelectedIndex(id_gallery)
}
function getGallerySelectedIndex(id_gallery){
let galleryButtons = gradioApp().querySelectorAll('#'+id_gallery+' .gallery-item')
let galleryBtnSelected = gradioApp().querySelector('#'+id_gallery+' .gallery-item.\\!ring-2')
let currentlySelectedIndex = -1
galleryButtons.forEach(function(v, i){ if(v==galleryBtnSelected) { currentlySelectedIndex = i } })
return currentlySelectedIndex
} }
// this is a workaround for https://github.com/gradio-app/gradio/issues/2984 function getGallerySelectedIndex(id_gallery){
function check_gallery(id_gallery){
let gallery = gradioApp().getElementById(id_gallery)
// if gallery has no change, no need to setting up observer again.
if (gallery && galleries[id_gallery] !== gallery){
galleries[id_gallery] = gallery;
if(galleryObservers[id_gallery]){
galleryObservers[id_gallery].disconnect();
}
storedGallerySelections[id_gallery] = -1
galleryObservers[id_gallery] = new MutationObserver(function (){
let galleryButtons = gradioApp().querySelectorAll('#'+id_gallery+' .gallery-item')
let galleryBtnSelected = gradioApp().querySelector('#'+id_gallery+' .gallery-item.\\!ring-2')
let currentlySelectedIndex = getGallerySelectedIndex(id_gallery)
prevSelectedIndex = storedGallerySelections[id_gallery]
storedGallerySelections[id_gallery] = -1
if (prevSelectedIndex !== -1 && galleryButtons.length>prevSelectedIndex && !galleryBtnSelected) {
// automatically re-open previously selected index (if exists)
activeElement = gradioApp().activeElement;
let scrollX = window.scrollX;
let scrollY = window.scrollY;
galleryButtons[prevSelectedIndex].click();
showGalleryImage();
// When the gallery button is clicked, it gains focus and scrolls itself into view
// We need to scroll back to the previous position
setTimeout(function (){
window.scrollTo(scrollX, scrollY);
}, 50);
if(activeElement){
// i fought this for about an hour; i don't know why the focus is lost or why this helps recover it
// if someone has a better solution please by all means
setTimeout(function (){
activeElement.focus({
preventScroll: true // Refocus the element that was focused before the gallery was opened without scrolling to it
})
}, 1);
}
}
})
galleryObservers[id_gallery].observe( gallery, { childList:true, subtree:false })
}
} }
onUiUpdate(function(){
check_gallery('txt2img_gallery')
check_gallery('img2img_gallery')
})
function request(url, data, handler, errorHandler){ function request(url, data, handler, errorHandler){
var xhr = new XMLHttpRequest(); var xhr = new XMLHttpRequest();
var url = url; var url = url;
......
...@@ -86,7 +86,7 @@ function get_tab_index(tabId){ ...@@ -86,7 +86,7 @@ function get_tab_index(tabId){
var res = 0 var res = 0
gradioApp().getElementById(tabId).querySelector('div').querySelectorAll('button').forEach(function(button, i){ gradioApp().getElementById(tabId).querySelector('div').querySelectorAll('button').forEach(function(button, i){
if(button.className.indexOf('bg-white') != -1) if(button.className.indexOf('selected') != -1)
res = i res = i
}) })
...@@ -255,7 +255,6 @@ onUiUpdate(function(){ ...@@ -255,7 +255,6 @@ onUiUpdate(function(){
} }
prompt.parentElement.insertBefore(counter, prompt) prompt.parentElement.insertBefore(counter, prompt)
counter.classList.add("token-counter")
prompt.parentElement.style.position = "relative" prompt.parentElement.style.position = "relative"
promptTokecountUpdateFuncs[id] = function(){ update_token_counter(id_button); } promptTokecountUpdateFuncs[id] = function(){ update_token_counter(id_button); }
......
...@@ -14,7 +14,7 @@ parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.real ...@@ -14,7 +14,7 @@ parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.real
args, _ = parser.parse_known_args(sys.argv) args, _ = parser.parse_known_args(sys.argv)
script_path = os.path.dirname(__file__) script_path = os.path.dirname(__file__)
data_path = os.getcwd() data_path = args.data_dir
dir_repos = "repositories" dir_repos = "repositories"
dir_extensions = "extensions" dir_extensions = "extensions"
...@@ -231,7 +231,7 @@ def run_extensions_installers(settings_file): ...@@ -231,7 +231,7 @@ def run_extensions_installers(settings_file):
return return
for dirname_extension in list_extensions(settings_file): for dirname_extension in list_extensions(settings_file):
run_extension_installer(os.path.join(dir_extensions, dirname_extension)) run_extension_installer(os.path.join(data_path, dir_extensions, dirname_extension))
def prepare_environment(): def prepare_environment():
......
...@@ -18,7 +18,7 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_ ...@@ -18,7 +18,7 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_
from modules.textual_inversion.preprocess import preprocess from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin,Image from PIL import PngImagePlugin,Image
from modules.sd_models import checkpoints_list from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights
from modules.sd_models_config import find_checkpoint_config_near_filename from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models from modules.realesrgan_model import get_realesrgan_models
from modules import devices from modules import devices
...@@ -150,6 +150,8 @@ class Api: ...@@ -150,6 +150,8 @@ class Api:
self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse) self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse)
self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse) self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse)
self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse) self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse)
self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"])
self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"])
self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=ScriptsList) self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=ScriptsList)
def add_api_route(self, path: str, endpoint, **kwargs): def add_api_route(self, path: str, endpoint, **kwargs):
...@@ -412,6 +414,16 @@ class Api: ...@@ -412,6 +414,16 @@ class Api:
return {} return {}
def unloadapi(self):
unload_model_weights()
return {}
def reloadapi(self):
reload_model_weights()
return {}
def skip(self): def skip(self):
shared.state.skip() shared.state.skip()
......
...@@ -401,9 +401,14 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, ...@@ -401,9 +401,14 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component,
button.click( button.click(
fn=paste_func, fn=paste_func,
_js=f"recalculate_prompts_{tabname}",
inputs=[input_comp], inputs=[input_comp],
outputs=[x[0] for x in paste_fields], outputs=[x[0] for x in paste_fields],
) )
button.click(
fn=None,
_js=f"recalculate_prompts_{tabname}",
inputs=[],
outputs=[],
)
...@@ -645,6 +645,8 @@ Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]} ...@@ -645,6 +645,8 @@ Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}
def image_data(data): def image_data(data):
import gradio as gr
try: try:
image = Image.open(io.BytesIO(data)) image = Image.open(io.BytesIO(data))
textinfo, _ = read_info_from_image(image) textinfo, _ = read_info_from_image(image)
...@@ -660,7 +662,7 @@ def image_data(data): ...@@ -660,7 +662,7 @@ def image_data(data):
except Exception: except Exception:
pass pass
return '', None return gr.update(), None
def flatten(img, bgcolor): def flatten(img, bgcolor):
......
import torch import torch
import platform
from modules import paths from modules import paths
from modules.sd_hijack_utils import CondFunc from modules.sd_hijack_utils import CondFunc
from packaging import version from packaging import version
...@@ -32,6 +33,10 @@ if has_mps: ...@@ -32,6 +33,10 @@ if has_mps:
# MPS fix for randn in torchsde # MPS fix for randn in torchsde
CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps') CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps')
if platform.mac_ver()[0].startswith("13.2."):
# MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124)
CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760)
if version.parse(torch.__version__) < version.parse("1.13"): if version.parse(torch.__version__) < version.parse("1.13"):
# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
...@@ -49,4 +54,6 @@ if has_mps: ...@@ -49,4 +54,6 @@ if has_mps:
CondFunc('torch.cumsum', cumsum_fix_func, None) CondFunc('torch.cumsum', cumsum_fix_func, None)
CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None) CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None)
CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None) CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None)
if version.parse(torch.__version__) == version.parse("2.0"):
# MPS workaround for https://github.com/pytorch/pytorch/issues/96113
CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda *args, **kwargs: len(args) == 6)
...@@ -4,7 +4,6 @@ import shutil ...@@ -4,7 +4,6 @@ import shutil
import importlib import importlib
from urllib.parse import urlparse from urllib.parse import urlparse
from basicsr.utils.download_util import load_file_from_url
from modules import shared from modules import shared
from modules.upscaler import Upscaler, UpscalerLanczos, UpscalerNearest, UpscalerNone from modules.upscaler import Upscaler, UpscalerLanczos, UpscalerNearest, UpscalerNone
from modules.paths import script_path, models_path from modules.paths import script_path, models_path
...@@ -59,6 +58,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None ...@@ -59,6 +58,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
if model_url is not None and len(output) == 0: if model_url is not None and len(output) == 0:
if download_name is not None: if download_name is not None:
from basicsr.utils.download_util import load_file_from_url
dl = load_file_from_url(model_url, model_path, True, download_name) dl = load_file_from_url(model_url, model_path, True, download_name)
output.append(dl) output.append(dl)
else: else:
......
...@@ -689,6 +689,22 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: ...@@ -689,6 +689,22 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
image.info["parameters"] = text image.info["parameters"] = text
output_images.append(image) output_images.append(image)
if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay:
image_mask = p.mask_for_overlay.convert('RGB')
image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), p.mask_for_overlay.convert('L')).convert('RGBA')
if opts.save_mask:
images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask")
if opts.save_mask_composite:
images.save_image(image_mask_composite, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite")
if opts.return_mask:
output_images.append(image_mask)
if opts.return_mask_composite:
output_images.append(image_mask_composite)
del x_samples_ddim del x_samples_ddim
devices.torch_gc() devices.torch_gc()
......
...@@ -239,7 +239,15 @@ def load_scripts(): ...@@ -239,7 +239,15 @@ def load_scripts():
elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing): elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing):
postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module)) postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module))
for scriptfile in sorted(scripts_list): def orderby(basedir):
# 1st webui, 2nd extensions-builtin, 3rd extensions
priority = {os.path.join(paths.script_path, "extensions-builtin"):1, paths.script_path:0}
for key in priority:
if basedir.startswith(key):
return priority[key]
return 9999
for scriptfile in sorted(scripts_list, key=lambda x: [orderby(x.basedir), x]):
try: try:
if scriptfile.basedir != paths.script_path: if scriptfile.basedir != paths.script_path:
sys.path = [scriptfile.basedir] + sys.path sys.path = [scriptfile.basedir] + sys.path
...@@ -513,6 +521,18 @@ def reload_scripts(): ...@@ -513,6 +521,18 @@ def reload_scripts():
scripts_postproc = scripts_postprocessing.ScriptPostprocessingRunner() scripts_postproc = scripts_postprocessing.ScriptPostprocessingRunner()
def add_classes_to_gradio_component(comp):
"""
this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
"""
comp.elem_classes = ["gradio-" + comp.get_block_name(), *(comp.elem_classes or [])]
if getattr(comp, 'multiselect', False):
comp.elem_classes.append('multiselect')
def IOComponent_init(self, *args, **kwargs): def IOComponent_init(self, *args, **kwargs):
if scripts_current is not None: if scripts_current is not None:
scripts_current.before_component(self, **kwargs) scripts_current.before_component(self, **kwargs)
...@@ -521,6 +541,8 @@ def IOComponent_init(self, *args, **kwargs): ...@@ -521,6 +541,8 @@ def IOComponent_init(self, *args, **kwargs):
res = original_IOComponent_init(self, *args, **kwargs) res = original_IOComponent_init(self, *args, **kwargs)
add_classes_to_gradio_component(self)
script_callbacks.after_component_callback(self, **kwargs) script_callbacks.after_component_callback(self, **kwargs)
if scripts_current is not None: if scripts_current is not None:
......
...@@ -109,7 +109,7 @@ class ScriptPostprocessingRunner: ...@@ -109,7 +109,7 @@ class ScriptPostprocessingRunner:
inputs = [] inputs = []
for script in self.scripts_in_preferred_order(): for script in self.scripts_in_preferred_order():
with gr.Box() as group: with gr.Row() as group:
self.create_script_ui(script, inputs) self.create_script_ui(script, inputs)
script.group = group script.group = group
......
...@@ -337,7 +337,7 @@ def xformers_attention_forward(self, x, context=None, mask=None): ...@@ -337,7 +337,7 @@ def xformers_attention_forward(self, x, context=None, mask=None):
dtype = q.dtype dtype = q.dtype
if shared.opts.upcast_attn: if shared.opts.upcast_attn:
q, k = q.float(), k.float() q, k, v = q.float(), k.float(), v.float()
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v)) out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v))
...@@ -372,7 +372,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None): ...@@ -372,7 +372,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None):
dtype = q.dtype dtype = q.dtype
if shared.opts.upcast_attn: if shared.opts.upcast_attn:
q, k = q.float(), k.float() q, k, v = q.float(), k.float(), v.float()
# the output of sdp = (batch, num_heads, seq_len, head_dim) # the output of sdp = (batch, num_heads, seq_len, head_dim)
hidden_states = torch.nn.functional.scaled_dot_product_attention( hidden_states = torch.nn.functional.scaled_dot_product_attention(
......
...@@ -67,7 +67,7 @@ def hijack_ddpm_edit(): ...@@ -67,7 +67,7 @@ def hijack_ddpm_edit():
unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast) CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
if version.parse(torch.__version__) <= version.parse("1.13.1"): if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available():
CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast) CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast)
CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast) CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast)
CondFunc('open_clip.transformer.ResidualAttentionBlock.__init__', lambda orig_func, *args, **kwargs: kwargs.update({'act_layer': GELUHijack}) and False or orig_func(*args, **kwargs), lambda _, *args, **kwargs: kwargs.get('act_layer') is None or kwargs['act_layer'] == torch.nn.GELU) CondFunc('open_clip.transformer.ResidualAttentionBlock.__init__', lambda orig_func, *args, **kwargs: kwargs.update({'act_layer': GELUHijack}) and False or orig_func(*args, **kwargs), lambda _, *args, **kwargs: kwargs.get('act_layer') is None or kwargs['act_layer'] == torch.nn.GELU)
......
...@@ -178,7 +178,7 @@ def select_checkpoint(): ...@@ -178,7 +178,7 @@ def select_checkpoint():
return checkpoint_info return checkpoint_info
chckpoint_dict_replacements = { checkpoint_dict_replacements = {
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.', 'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.', 'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.', 'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
...@@ -186,7 +186,7 @@ chckpoint_dict_replacements = { ...@@ -186,7 +186,7 @@ chckpoint_dict_replacements = {
def transform_checkpoint_dict_key(k): def transform_checkpoint_dict_key(k):
for text, replacement in chckpoint_dict_replacements.items(): for text, replacement in checkpoint_dict_replacements.items():
if k.startswith(text): if k.startswith(text):
k = replacement + k[len(text):] k = replacement + k[len(text):]
...@@ -494,7 +494,7 @@ def reload_model_weights(sd_model=None, info=None): ...@@ -494,7 +494,7 @@ def reload_model_weights(sd_model=None, info=None):
if sd_model is None or checkpoint_config != sd_model.used_config: if sd_model is None or checkpoint_config != sd_model.used_config:
del sd_model del sd_model
checkpoints_loaded.clear() checkpoints_loaded.clear()
load_model(checkpoint_info, already_loaded_state_dict=state_dict, time_taken_to_load_state_dict=timer.records["load weights from disk"]) load_model(checkpoint_info, already_loaded_state_dict=state_dict)
return shared.sd_model return shared.sd_model
try: try:
...@@ -517,3 +517,23 @@ def reload_model_weights(sd_model=None, info=None): ...@@ -517,3 +517,23 @@ def reload_model_weights(sd_model=None, info=None):
print(f"Weights loaded in {timer.summary()}.") print(f"Weights loaded in {timer.summary()}.")
return sd_model return sd_model
def unload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
timer = Timer()
if shared.sd_model:
# shared.sd_model.cond_stage_model.to(devices.cpu)
# shared.sd_model.first_stage_model.to(devices.cpu)
shared.sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
sd_model = None
gc.collect()
devices.torch_gc()
torch.cuda.empty_cache()
print(f"Unloaded weights {timer.summary()}.")
return sd_model
\ No newline at end of file
...@@ -107,7 +107,8 @@ parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS o ...@@ -107,7 +107,8 @@ parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS o
parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None) parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None) parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None) parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
parser.add_argument("--gradio-queue", action='store_true', help="Uses gradio queue; experimental option; breaks restart UI button") parser.add_argument("--gradio-queue", action='store_true', help="does not do anything", default=True)
parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions")
parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers")
parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False)
parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False)
...@@ -332,6 +333,8 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" ...@@ -332,6 +333,8 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"),
"save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"),
"export_for_4chan": OptionInfo(True, "If the saved image file size is above the limit, or its either width or height are above the limit, save a downscaled copy as JPG"), "export_for_4chan": OptionInfo(True, "If the saved image file size is above the limit, or its either width or height are above the limit, save a downscaled copy as JPG"),
...@@ -454,6 +457,8 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { ...@@ -454,6 +457,8 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), {
options_templates.update(options_section(('ui', "User interface"), { options_templates.update(options_section(('ui', "User interface"), {
"return_grid": OptionInfo(True, "Show grid in results for web"), "return_grid": OptionInfo(True, "Show grid in results for web"),
"return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
"return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
......
...@@ -152,7 +152,11 @@ class EmbeddingDatabase: ...@@ -152,7 +152,11 @@ class EmbeddingDatabase:
name = data.get('name', name) name = data.get('name', name)
else: else:
data = extract_image_data_embed(embed_image) data = extract_image_data_embed(embed_image)
name = data.get('name', name) if data:
name = data.get('name', name)
else:
# if data is None, means this is not an embeding, just a preview image
return
elif ext in ['.BIN', '.PT']: elif ext in ['.BIN', '.PT']:
data = torch.load(path, map_location="cpu") data = torch.load(path, map_location="cpu")
elif ext in ['.SAFETENSORS']: elif ext in ['.SAFETENSORS']:
......
This diff is collapsed.
...@@ -129,8 +129,8 @@ Requested path was: {f} ...@@ -129,8 +129,8 @@ Requested path was: {f}
generation_info = None generation_info = None
with gr.Column(): with gr.Column():
with gr.Row(elem_id=f"image_buttons_{tabname}"): with gr.Row(elem_id=f"image_buttons_{tabname}", elem_classes="image-buttons"):
open_folder_button = gr.Button(folder_symbol, elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else f'open_folder_{tabname}') open_folder_button = gr.Button(folder_symbol, visible=not shared.cmd_opts.hide_ui_dir_config)
if tabname != "extras": if tabname != "extras":
save = gr.Button('Save', elem_id=f'save_{tabname}') save = gr.Button('Save', elem_id=f'save_{tabname}')
...@@ -160,6 +160,7 @@ Requested path was: {f} ...@@ -160,6 +160,7 @@ Requested path was: {f}
_js="function(x, y, z){ return [x, y, selected_gallery_index()] }", _js="function(x, y, z){ return [x, y, selected_gallery_index()] }",
inputs=[generation_info, html_info, html_info], inputs=[generation_info, html_info, html_info],
outputs=[html_info, html_info], outputs=[html_info, html_info],
show_progress=False,
) )
save.click( save.click(
......
import gradio as gr import gradio as gr
class ToolButton(gr.Button, gr.components.FormComponent): class FormComponent:
"""Small button with single emoji as text, fits inside gradio forms""" def get_expected_parent(self):
return gr.components.Form
def __init__(self, **kwargs):
super().__init__(variant="tool", **kwargs)
def get_block_name(self): gr.Dropdown.get_expected_parent = FormComponent.get_expected_parent
return "button"
class ToolButtonTop(gr.Button, gr.components.FormComponent): class ToolButton(FormComponent, gr.Button):
"""Small button with single emoji as text, with extra margin at top, fits inside gradio forms""" """Small button with single emoji as text, fits inside gradio forms"""
def __init__(self, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(variant="tool-top", **kwargs) classes = kwargs.pop("elem_classes", [])
super().__init__(*args, elem_classes=["tool", *classes], **kwargs)
def get_block_name(self): def get_block_name(self):
return "button" return "button"
class FormRow(gr.Row, gr.components.FormComponent): class FormRow(FormComponent, gr.Row):
"""Same as gr.Row but fits inside gradio forms""" """Same as gr.Row but fits inside gradio forms"""
def get_block_name(self): def get_block_name(self):
return "row" return "row"
class FormGroup(gr.Group, gr.components.FormComponent): class FormColumn(FormComponent, gr.Column):
"""Same as gr.Column but fits inside gradio forms"""
def get_block_name(self):
return "column"
class FormGroup(FormComponent, gr.Group):
"""Same as gr.Row but fits inside gradio forms""" """Same as gr.Row but fits inside gradio forms"""
def get_block_name(self): def get_block_name(self):
return "group" return "group"
class FormHTML(gr.HTML, gr.components.FormComponent): class FormHTML(FormComponent, gr.HTML):
"""Same as gr.HTML but fits inside gradio forms""" """Same as gr.HTML but fits inside gradio forms"""
def get_block_name(self): def get_block_name(self):
return "html" return "html"
class FormColorPicker(gr.ColorPicker, gr.components.FormComponent): class FormColorPicker(FormComponent, gr.ColorPicker):
"""Same as gr.ColorPicker but fits inside gradio forms""" """Same as gr.ColorPicker but fits inside gradio forms"""
def get_block_name(self): def get_block_name(self):
return "colorpicker" return "colorpicker"
class DropdownMulti(gr.Dropdown): class DropdownMulti(FormComponent, gr.Dropdown):
"""Same as gr.Dropdown but always multiselect""" """Same as gr.Dropdown but always multiselect"""
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(multiselect=True, **kwargs) super().__init__(multiselect=True, **kwargs)
......
import json import json
import os.path import os.path
import shutil
import sys import sys
import time import time
import traceback import traceback
...@@ -141,22 +140,20 @@ def install_extension_from_url(dirname, url): ...@@ -141,22 +140,20 @@ def install_extension_from_url(dirname, url):
try: try:
shutil.rmtree(tmpdir, True) shutil.rmtree(tmpdir, True)
with git.Repo.clone_from(url, tmpdir) as repo:
repo = git.Repo.clone_from(url, tmpdir) repo.remote().fetch()
repo.remote().fetch() for submodule in repo.submodules:
submodule.update()
try: try:
os.rename(tmpdir, target_dir) os.rename(tmpdir, target_dir)
except OSError as err: except OSError as err:
# TODO what does this do on windows? I think it'll be a different error code but I don't have a system to check it
# Shouldn't cause any new issues at least but we probably want to handle it there too.
if err.errno == errno.EXDEV: if err.errno == errno.EXDEV:
# Cross device link, typical in docker or when tmp/ and extensions/ are on different file systems # Cross device link, typical in docker or when tmp/ and extensions/ are on different file systems
# Since we can't use a rename, do the slower but more versitile shutil.move() # Since we can't use a rename, do the slower but more versitile shutil.move()
shutil.move(tmpdir, target_dir) shutil.move(tmpdir, target_dir)
else: else:
# Something else, not enough free space, permissions, etc. rethrow it so that it gets handled. # Something else, not enough free space, permissions, etc. rethrow it so that it gets handled.
raise(err) raise err
import launch import launch
launch.run_extension_installer(target_dir) launch.run_extension_installer(target_dir)
...@@ -244,7 +241,7 @@ def refresh_available_extensions_from_data(hide_tags, sort_column): ...@@ -244,7 +241,7 @@ def refresh_available_extensions_from_data(hide_tags, sort_column):
hidden += 1 hidden += 1
continue continue
install_code = f"""<input onclick="install_extension_from_index(this, '{html.escape(url)}')" type="button" value="{"Install" if not existing else "Installed"}" {"disabled=disabled" if existing else ""} class="gr-button gr-button-lg gr-button-secondary">""" install_code = f"""<button onclick="install_extension_from_index(this, '{html.escape(url)}')" {"disabled=disabled" if existing else ""} class="lg secondary gradio-button custom-button">{"Install" if not existing else "Installed"}</button>"""
tags_text = ", ".join([f"<span class='extension-tag' title='{tags.get(x, '')}'>{x}</span>" for x in extension_tags]) tags_text = ", ".join([f"<span class='extension-tag' title='{tags.get(x, '')}'>{x}</span>" for x in extension_tags])
......
...@@ -22,21 +22,37 @@ def register_page(page): ...@@ -22,21 +22,37 @@ def register_page(page):
allowed_dirs.update(set(sum([x.allowed_directories_for_previews() for x in extra_pages], []))) allowed_dirs.update(set(sum([x.allowed_directories_for_previews() for x in extra_pages], [])))
def add_pages_to_demo(app): def fetch_file(filename: str = ""):
def fetch_file(filename: str = ""): from starlette.responses import FileResponse
from starlette.responses import FileResponse
if not any([Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs]):
raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.")
ext = os.path.splitext(filename)[1].lower()
if ext not in (".png", ".jpg", ".webp"):
raise ValueError(f"File cannot be fetched: {filename}. Only png and jpg and webp.")
# would profit from returning 304
return FileResponse(filename, headers={"Accept-Ranges": "bytes"})
def get_metadata(page: str = "", item: str = ""):
from starlette.responses import JSONResponse
if not any([Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs]): page = next(iter([x for x in extra_pages if x.name == page]), None)
raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.") if page is None:
return JSONResponse({})
ext = os.path.splitext(filename)[1].lower() metadata = page.metadata.get(item)
if ext not in (".png", ".jpg", ".webp"): if metadata is None:
raise ValueError(f"File cannot be fetched: {filename}. Only png and jpg and webp.") return JSONResponse({})
# would profit from returning 304 return JSONResponse({"metadata": metadata})
return FileResponse(filename, headers={"Accept-Ranges": "bytes"})
def add_pages_to_demo(app):
app.add_api_route("/sd_extra_networks/thumb", fetch_file, methods=["GET"]) app.add_api_route("/sd_extra_networks/thumb", fetch_file, methods=["GET"])
app.add_api_route("/sd_extra_networks/metadata", get_metadata, methods=["GET"])
class ExtraNetworksPage: class ExtraNetworksPage:
...@@ -45,6 +61,7 @@ class ExtraNetworksPage: ...@@ -45,6 +61,7 @@ class ExtraNetworksPage:
self.name = title.lower() self.name = title.lower()
self.card_page = shared.html("extra-networks-card.html") self.card_page = shared.html("extra-networks-card.html")
self.allow_negative_prompt = False self.allow_negative_prompt = False
self.metadata = {}
def refresh(self): def refresh(self):
pass pass
...@@ -66,6 +83,8 @@ class ExtraNetworksPage: ...@@ -66,6 +83,8 @@ class ExtraNetworksPage:
view = shared.opts.extra_networks_default_view view = shared.opts.extra_networks_default_view
items_html = '' items_html = ''
self.metadata = {}
subdirs = {} subdirs = {}
for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]: for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]:
for x in glob.glob(os.path.join(parentdir, '**/*'), recursive=True): for x in glob.glob(os.path.join(parentdir, '**/*'), recursive=True):
...@@ -86,12 +105,16 @@ class ExtraNetworksPage: ...@@ -86,12 +105,16 @@ class ExtraNetworksPage:
subdirs = {"": 1, **subdirs} subdirs = {"": 1, **subdirs}
subdirs_html = "".join([f""" subdirs_html = "".join([f"""
<button class='gr-button gr-button-lg gr-button-secondary{" search-all" if subdir=="" else ""}' onclick='extraNetworksSearchButton("{tabname}_extra_tabs", event)'> <button class='lg secondary gradio-button custom-button{" search-all" if subdir=="" else ""}' onclick='extraNetworksSearchButton("{tabname}_extra_tabs", event)'>
{html.escape(subdir if subdir!="" else "all")} {html.escape(subdir if subdir!="" else "all")}
</button> </button>
""" for subdir in subdirs]) """ for subdir in subdirs])
for item in self.list_items(): for item in self.list_items():
metadata = item.get("metadata")
if metadata:
self.metadata[item["name"]] = metadata
items_html += self.create_html_for_item(item, tabname) items_html += self.create_html_for_item(item, tabname)
if items_html == '': if items_html == '':
...@@ -127,8 +150,7 @@ class ExtraNetworksPage: ...@@ -127,8 +150,7 @@ class ExtraNetworksPage:
metadata_button = "" metadata_button = ""
metadata = item.get("metadata") metadata = item.get("metadata")
if metadata: if metadata:
metadata_onclick = '"' + html.escape(f"""return extraNetworksShowMetadata(event, {json.dumps(metadata)})""") + '"' metadata_button = f"<div class='metadata-button' title='Show metadata' onclick='extraNetworksRequestMetadata(event, {json.dumps(self.name)}, {json.dumps(item['name'])})'></div>"
metadata_button = f"<div class='metadata-button' title='Show metadata' onclick={metadata_onclick}></div>"
args = { args = {
"preview_html": "style='background-image: url(\"" + html.escape(preview) + "\")'" if preview else '', "preview_html": "style='background-image: url(\"" + html.escape(preview) + "\")'" if preview else '',
...@@ -215,6 +237,7 @@ def create_ui(container, button, tabname): ...@@ -215,6 +237,7 @@ def create_ui(container, button, tabname):
with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs: with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs:
for page in ui.stored_extra_pages: for page in ui.stored_extra_pages:
with gr.Tab(page.title): with gr.Tab(page.title):
page_elem = gr.HTML(page.create_html(ui.tabname)) page_elem = gr.HTML(page.create_html(ui.tabname))
ui.pages.append(page_elem) ui.pages.append(page_elem)
......
...@@ -3,7 +3,7 @@ transformers==4.25.1 ...@@ -3,7 +3,7 @@ transformers==4.25.1
accelerate==0.12.0 accelerate==0.12.0
basicsr==1.4.2 basicsr==1.4.2
gfpgan==1.3.8 gfpgan==1.3.8
gradio==3.16.2 gradio==3.23
numpy==1.23.3 numpy==1.23.3
Pillow==9.4.0 Pillow==9.4.0
realesrgan==0.3.0 realesrgan==0.3.0
......
function gradioApp() { function gradioApp() {
const elems = document.getElementsByTagName('gradio-app') const elems = document.getElementsByTagName('gradio-app')
const gradioShadowRoot = elems.length == 0 ? null : elems[0].shadowRoot const elem = elems.length == 0 ? document : elems[0]
return !!gradioShadowRoot ? gradioShadowRoot : document;
elem.getElementById = function(id){ return document.getElementById(id) }
return elem.shadowRoot ? elem.shadowRoot : elem
} }
function get_uiCurrentTab() { function get_uiCurrentTab() {
......
import numpy as np import math
from tqdm import trange
import modules.scripts as scripts
import gradio as gr import gradio as gr
import modules.scripts as scripts
from modules import processing, shared, sd_samplers, images from modules import deepbooru, images, processing, shared
from modules.processing import Processed from modules.processing import Processed
from modules.sd_samplers import samplers from modules.shared import opts, state
from modules.shared import opts, cmd_opts, state
from modules import deepbooru
class Script(scripts.Script): class Script(scripts.Script):
...@@ -20,39 +16,68 @@ class Script(scripts.Script): ...@@ -20,39 +16,68 @@ class Script(scripts.Script):
def ui(self, is_img2img): def ui(self, is_img2img):
loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops")) loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops"))
denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=self.elem_id("denoising_strength_change_factor")) final_denoising_strength = gr.Slider(minimum=0, maximum=1, step=0.01, label='Final denoising strength', value=0.5, elem_id=self.elem_id("final_denoising_strength"))
denoising_curve = gr.Dropdown(label="Denoising strength curve", choices=["Aggressive", "Linear", "Lazy"], value="Linear")
append_interrogation = gr.Dropdown(label="Append interrogated prompt at each iteration", choices=["None", "CLIP", "DeepBooru"], value="None") append_interrogation = gr.Dropdown(label="Append interrogated prompt at each iteration", choices=["None", "CLIP", "DeepBooru"], value="None")
return [loops, denoising_strength_change_factor, append_interrogation] return [loops, final_denoising_strength, denoising_curve, append_interrogation]
def run(self, p, loops, denoising_strength_change_factor, append_interrogation): def run(self, p, loops, final_denoising_strength, denoising_curve, append_interrogation):
processing.fix_seed(p) processing.fix_seed(p)
batch_count = p.n_iter batch_count = p.n_iter
p.extra_generation_params = { p.extra_generation_params = {
"Denoising strength change factor": denoising_strength_change_factor, "Final denoising strength": final_denoising_strength,
"Denoising curve": denoising_curve
} }
p.batch_size = 1 p.batch_size = 1
p.n_iter = 1 p.n_iter = 1
output_images, info = None, None info = None
initial_seed = None initial_seed = None
initial_info = None initial_info = None
initial_denoising_strength = p.denoising_strength
grids = [] grids = []
all_images = [] all_images = []
original_init_image = p.init_images original_init_image = p.init_images
original_prompt = p.prompt original_prompt = p.prompt
original_inpainting_fill = p.inpainting_fill
state.job_count = loops * batch_count state.job_count = loops * batch_count
initial_color_corrections = [processing.setup_color_correction(p.init_images[0])] initial_color_corrections = [processing.setup_color_correction(p.init_images[0])]
for n in range(batch_count): def calculate_denoising_strength(loop):
history = [] strength = initial_denoising_strength
if loops == 1:
return strength
progress = loop / (loops - 1)
match denoising_curve:
case "Aggressive":
strength = math.sin((progress) * math.pi * 0.5)
case "Lazy":
strength = 1 - math.cos((progress) * math.pi * 0.5)
case _:
strength = progress
change = (final_denoising_strength - initial_denoising_strength) * strength
return initial_denoising_strength + change
history = []
for n in range(batch_count):
# Reset to original init image at the start of each batch # Reset to original init image at the start of each batch
p.init_images = original_init_image p.init_images = original_init_image
# Reset to original denoising strength
p.denoising_strength = initial_denoising_strength
last_image = None
for i in range(loops): for i in range(loops):
p.n_iter = 1 p.n_iter = 1
p.batch_size = 1 p.batch_size = 1
...@@ -72,26 +97,46 @@ class Script(scripts.Script): ...@@ -72,26 +97,46 @@ class Script(scripts.Script):
processed = processing.process_images(p) processed = processing.process_images(p)
# Generation cancelled.
if state.interrupted:
break
if initial_seed is None: if initial_seed is None:
initial_seed = processed.seed initial_seed = processed.seed
initial_info = processed.info initial_info = processed.info
init_img = processed.images[0]
p.init_images = [init_img]
p.seed = processed.seed + 1 p.seed = processed.seed + 1
p.denoising_strength = min(max(p.denoising_strength * denoising_strength_change_factor, 0.1), 1) p.denoising_strength = calculate_denoising_strength(i + 1)
history.append(processed.images[0])
if state.skipped:
break
last_image = processed.images[0]
p.init_images = [last_image]
p.inpainting_fill = 1 # Set "masked content" to "original" for next loop.
if batch_count == 1:
history.append(last_image)
all_images.append(last_image)
if batch_count > 1 and not state.skipped and not state.interrupted:
history.append(last_image)
all_images.append(last_image)
p.inpainting_fill = original_inpainting_fill
if state.interrupted:
break
if len(history) > 1:
grid = images.image_grid(history, rows=1) grid = images.image_grid(history, rows=1)
if opts.grid_save: if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", initial_seed, p.prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename, grid=True, p=p) images.save_image(grid, p.outpath_grids, "grid", initial_seed, p.prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename, grid=True, p=p)
grids.append(grid) if opts.return_grid:
all_images += history grids.append(grid)
if opts.return_grid: all_images = grids + all_images
all_images = grids + all_images
processed = Processed(p, all_images, initial_seed, initial_info) processed = Processed(p, all_images, initial_seed, initial_info)
......
...@@ -17,22 +17,24 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): ...@@ -17,22 +17,24 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
def ui(self): def ui(self):
selected_tab = gr.State(value=0) selected_tab = gr.State(value=0)
with gr.Tabs(elem_id="extras_resize_mode"): with gr.Column():
with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by: with FormRow():
upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") with gr.Tabs(elem_id="extras_resize_mode"):
with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by:
with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize")
with FormRow():
upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w") with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to:
upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h") with FormRow():
upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w")
upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h")
with FormRow(): upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
with FormRow():
with FormRow(): extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") with FormRow():
extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility")
tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab]) tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab])
tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab]) tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab])
......
...@@ -247,7 +247,7 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend ...@@ -247,7 +247,7 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend
state.job = f"{index(ix, iy, iz) + 1} out of {list_size}" state.job = f"{index(ix, iy, iz) + 1} out of {list_size}"
processed: Processed = cell(x, y, z) processed: Processed = cell(x, y, z, ix, iy, iz)
if processed_result is None: if processed_result is None:
# Use our first processed result object as a template container to hold our full results # Use our first processed result object as a template container to hold our full results
...@@ -558,8 +558,6 @@ class Script(scripts.Script): ...@@ -558,8 +558,6 @@ class Script(scripts.Script):
print(f"X/Y/Z plot will create {len(xs) * len(ys) * len(zs) * image_cell_count} images on {len(zs)} {len(xs)}x{len(ys)} grid{plural_s}{cell_console_text}. (Total steps to process: {total_steps})") print(f"X/Y/Z plot will create {len(xs) * len(ys) * len(zs) * image_cell_count} images on {len(zs)} {len(xs)}x{len(ys)} grid{plural_s}{cell_console_text}. (Total steps to process: {total_steps})")
shared.total_tqdm.updateTotal(total_steps) shared.total_tqdm.updateTotal(total_steps)
grid_infotext = [None]
state.xyz_plot_x = AxisInfo(x_opt, xs) state.xyz_plot_x = AxisInfo(x_opt, xs)
state.xyz_plot_y = AxisInfo(y_opt, ys) state.xyz_plot_y = AxisInfo(y_opt, ys)
state.xyz_plot_z = AxisInfo(z_opt, zs) state.xyz_plot_z = AxisInfo(z_opt, zs)
...@@ -588,7 +586,9 @@ class Script(scripts.Script): ...@@ -588,7 +586,9 @@ class Script(scripts.Script):
else: else:
second_axes_processed = 'y' second_axes_processed = 'y'
def cell(x, y, z): grid_infotext = [None] * (1 + len(zs))
def cell(x, y, z, ix, iy, iz):
if shared.state.interrupted: if shared.state.interrupted:
return Processed(p, [], p.seed, "") return Processed(p, [], p.seed, "")
...@@ -600,7 +600,9 @@ class Script(scripts.Script): ...@@ -600,7 +600,9 @@ class Script(scripts.Script):
res = process_images(pc) res = process_images(pc)
if grid_infotext[0] is None: # Sets subgrid infotexts
subgrid_index = 1 + iz
if grid_infotext[subgrid_index] is None and ix == 0 and iy == 0:
pc.extra_generation_params = copy(pc.extra_generation_params) pc.extra_generation_params = copy(pc.extra_generation_params)
pc.extra_generation_params['Script'] = self.title() pc.extra_generation_params['Script'] = self.title()
...@@ -616,6 +618,12 @@ class Script(scripts.Script): ...@@ -616,6 +618,12 @@ class Script(scripts.Script):
if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys]) pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys])
grid_infotext[subgrid_index] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds)
# Sets main grid infotext
if grid_infotext[0] is None and ix == 0 and iy == 0 and iz == 0:
pc.extra_generation_params = copy(pc.extra_generation_params)
if z_opt.label != 'Nothing': if z_opt.label != 'Nothing':
pc.extra_generation_params["Z Type"] = z_opt.label pc.extra_generation_params["Z Type"] = z_opt.label
pc.extra_generation_params["Z Values"] = z_values pc.extra_generation_params["Z Values"] = z_values
...@@ -650,6 +658,9 @@ class Script(scripts.Script): ...@@ -650,6 +658,9 @@ class Script(scripts.Script):
z_count = len(zs) z_count = len(zs)
# Set the grid infotexts to the real ones with extra_generation_params (1 main grid + z_count sub-grids)
processed.infotexts[:1+z_count] = grid_infotext[:1+z_count]
if not include_lone_images: if not include_lone_images:
# Don't need sub-images anymore, drop from list: # Don't need sub-images anymore, drop from list:
processed.images = processed.images[:z_count+1] processed.images = processed.images[:z_count+1]
......
This diff is collapsed.
...@@ -240,7 +240,7 @@ def webui(): ...@@ -240,7 +240,7 @@ def webui():
shared.demo = modules.ui.create_ui() shared.demo = modules.ui.create_ui()
startup_timer.record("create ui") startup_timer.record("create ui")
if cmd_opts.gradio_queue: if not cmd_opts.no_gradio_queue:
shared.demo.queue(64) shared.demo.queue(64)
gradio_auth_creds = [] gradio_auth_creds = []
...@@ -262,6 +262,9 @@ def webui(): ...@@ -262,6 +262,9 @@ def webui():
inbrowser=cmd_opts.autolaunch, inbrowser=cmd_opts.autolaunch,
prevent_thread_lock=True prevent_thread_lock=True
) )
for dep in shared.demo.dependencies:
dep['show_progress'] = False # disable gradio css animation on component update
# after initial launch, disable --autolaunch for subsequent restarts # after initial launch, disable --autolaunch for subsequent restarts
cmd_opts.autolaunch = False cmd_opts.autolaunch = False
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment