Commit b08e12d7 authored by nanahira's avatar nanahira

Merge branch 'master' of github.com:AUTOMATIC1111/stable-diffusion-webui

parents d6bfff76 82a973c0
...@@ -78,6 +78,8 @@ module.exports = { ...@@ -78,6 +78,8 @@ module.exports = {
//extraNetworks.js //extraNetworks.js
requestGet: "readonly", requestGet: "readonly",
popup: "readonly", popup: "readonly",
// profilerVisualization.js
createVisualizationTable: "readonly",
// from python // from python
localization: "readonly", localization: "readonly",
// progrssbar.js // progrssbar.js
...@@ -86,8 +88,6 @@ module.exports = { ...@@ -86,8 +88,6 @@ module.exports = {
// imageviewer.js // imageviewer.js
modalPrevImage: "readonly", modalPrevImage: "readonly",
modalNextImage: "readonly", modalNextImage: "readonly",
// token-counters.js
setupTokenCounters: "readonly",
// localStorage.js // localStorage.js
localSet: "readonly", localSet: "readonly",
localGet: "readonly", localGet: "readonly",
......
...@@ -91,7 +91,7 @@ body: ...@@ -91,7 +91,7 @@ body:
id: logs id: logs
attributes: attributes:
label: Console logs label: Console logs
description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occured. If it's very long, provide a link to pastebin or similar service. description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occurred. If it's very long, provide a link to pastebin or similar service.
render: Shell render: Shell
validations: validations:
required: true required: true
......
...@@ -11,8 +11,8 @@ jobs: ...@@ -11,8 +11,8 @@ jobs:
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
steps: steps:
- name: Checkout Code - name: Checkout Code
uses: actions/checkout@v3 uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v5
with: with:
python-version: 3.11 python-version: 3.11
# NB: there's no cache: pip here since we're not installing anything # NB: there's no cache: pip here since we're not installing anything
...@@ -20,7 +20,7 @@ jobs: ...@@ -20,7 +20,7 @@ jobs:
# not to have GHA download an (at the time of writing) 4 GB cache # not to have GHA download an (at the time of writing) 4 GB cache
# of PyTorch and other dependencies. # of PyTorch and other dependencies.
- name: Install Ruff - name: Install Ruff
run: pip install ruff==0.1.6 run: pip install ruff==0.3.3
- name: Run Ruff - name: Run Ruff
run: ruff . run: ruff .
lint-js: lint-js:
...@@ -29,9 +29,9 @@ jobs: ...@@ -29,9 +29,9 @@ jobs:
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
steps: steps:
- name: Checkout Code - name: Checkout Code
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Install Node.js - name: Install Node.js
uses: actions/setup-node@v3 uses: actions/setup-node@v4
with: with:
node-version: 18 node-version: 18
- run: npm i --ci - run: npm i --ci
......
...@@ -11,15 +11,21 @@ jobs: ...@@ -11,15 +11,21 @@ jobs:
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
steps: steps:
- name: Checkout Code - name: Checkout Code
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Set up Python 3.10 - name: Set up Python 3.10
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: 3.10.6 python-version: 3.10.6
cache: pip cache: pip
cache-dependency-path: | cache-dependency-path: |
**/requirements*txt **/requirements*txt
launch.py launch.py
- name: Cache models
id: cache-models
uses: actions/cache@v4
with:
path: models
key: "2023-12-30"
- name: Install test dependencies - name: Install test dependencies
run: pip install wait-for-it -r requirements-test.txt run: pip install wait-for-it -r requirements-test.txt
env: env:
...@@ -33,6 +39,8 @@ jobs: ...@@ -33,6 +39,8 @@ jobs:
TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu
WEBUI_LAUNCH_LIVE_OUTPUT: "1" WEBUI_LAUNCH_LIVE_OUTPUT: "1"
PYTHONUNBUFFERED: "1" PYTHONUNBUFFERED: "1"
- name: Print installed packages
run: pip freeze
- name: Start test server - name: Start test server
run: > run: >
python -m coverage run python -m coverage run
...@@ -49,7 +57,7 @@ jobs: ...@@ -49,7 +57,7 @@ jobs:
2>&1 | tee output.txt & 2>&1 | tee output.txt &
- name: Run tests - name: Run tests
run: | run: |
wait-for-it --service 127.0.0.1:7860 -t 600 wait-for-it --service 127.0.0.1:7860 -t 20
python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test
- name: Kill test server - name: Kill test server
if: always() if: always()
...@@ -60,13 +68,13 @@ jobs: ...@@ -60,13 +68,13 @@ jobs:
python -m coverage report -i python -m coverage report -i
python -m coverage html -i python -m coverage html -i
- name: Upload main app output - name: Upload main app output
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v4
if: always() if: always()
with: with:
name: output name: output
path: output.txt path: output.txt
- name: Upload coverage HTML - name: Upload coverage HTML
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v4
if: always() if: always()
with: with:
name: htmlcov name: htmlcov
......
...@@ -2,6 +2,7 @@ __pycache__ ...@@ -2,6 +2,7 @@ __pycache__
*.ckpt *.ckpt
*.safetensors *.safetensors
*.pth *.pth
.DS_Store
/ESRGAN/* /ESRGAN/*
/SwinIR/* /SwinIR/*
/repositories /repositories
...@@ -37,3 +38,7 @@ notification.mp3 ...@@ -37,3 +38,7 @@ notification.mp3
/node_modules /node_modules
/package-lock.json /package-lock.json
/.coverage* /.coverage*
/test/test_outputs
/cache
trace.json
/sysinfo-????-??-??-??-??.json
This diff is collapsed.
# Stable Diffusion web UI # Stable Diffusion web UI
A browser interface based on Gradio library for Stable Diffusion. A web interface for Stable Diffusion, implemented using Gradio library.
![](screenshot.png) ![](screenshot.png)
...@@ -78,7 +78,7 @@ A browser interface based on Gradio library for Stable Diffusion. ...@@ -78,7 +78,7 @@ A browser interface based on Gradio library for Stable Diffusion.
- Clip skip - Clip skip
- Hypernetworks - Hypernetworks
- Loras (same as Hypernetworks but more pretty) - Loras (same as Hypernetworks but more pretty)
- A separate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt - A separate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt
- Can select to load a different VAE from settings screen - Can select to load a different VAE from settings screen
- Estimated completion time in progress bar - Estimated completion time in progress bar
- API - API
...@@ -98,6 +98,7 @@ Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-di ...@@ -98,6 +98,7 @@ Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-di
- [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) - [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended)
- [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. - [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
- [Intel CPUs, Intel GPUs (both integrated and discrete)](https://github.com/openvinotoolkit/stable-diffusion-webui/wiki/Installation-on-Intel-Silicon) (external wiki page) - [Intel CPUs, Intel GPUs (both integrated and discrete)](https://github.com/openvinotoolkit/stable-diffusion-webui/wiki/Installation-on-Intel-Silicon) (external wiki page)
- [Ascend NPUs](https://github.com/wangshuai09/stable-diffusion-webui/wiki/Install-and-run-on-Ascend-NPUs) (external wiki page)
Alternatively, use online services (like Google Colab): Alternatively, use online services (like Google Colab):
...@@ -121,16 +122,38 @@ Alternatively, use online services (like Google Colab): ...@@ -121,16 +122,38 @@ Alternatively, use online services (like Google Colab):
# Debian-based: # Debian-based:
sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0 sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0
# Red Hat-based: # Red Hat-based:
sudo dnf install wget git python3 gperftools-libs libglvnd-glx sudo dnf install wget git python3 gperftools-libs libglvnd-glx
# openSUSE-based: # openSUSE-based:
sudo zypper install wget git python3 libtcmalloc4 libglvnd sudo zypper install wget git python3 libtcmalloc4 libglvnd
# Arch-based: # Arch-based:
sudo pacman -S wget git python3 sudo pacman -S wget git python3
``` ```
If your system is very new, you need to install python3.11 or python3.10:
```bash
# Ubuntu 24.04
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt update
sudo apt install python3.11
# Manjaro/Arch
sudo pacman -S yay
yay -S python311 # do not confuse with python3.11 package
# Only for 3.11
# Then set up env variable in launch script
export python_cmd="python3.11"
# or in webui-user.sh
python_cmd="python3.11"
```
2. Navigate to the directory you would like the webui to be installed and execute the following command: 2. Navigate to the directory you would like the webui to be installed and execute the following command:
```bash ```bash
wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh
``` ```
Or just clone the repo wherever you want:
```bash
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui
```
3. Run `webui.sh`. 3. Run `webui.sh`.
4. Check `webui-user.sh` for options. 4. Check `webui-user.sh` for options.
### Installation on Apple Silicon ### Installation on Apple Silicon
...@@ -149,13 +172,14 @@ For the purposes of getting Google and other search engines to crawl the wiki, h ...@@ -149,13 +172,14 @@ For the purposes of getting Google and other search engines to crawl the wiki, h
## Credits ## Credits
Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file. Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file.
- Stable Diffusion - https://github.com/Stability-AI/stablediffusion, https://github.com/CompVis/taming-transformers - Stable Diffusion - https://github.com/Stability-AI/stablediffusion, https://github.com/CompVis/taming-transformers, https://github.com/mcmonkey4eva/sd3-ref
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git - k-diffusion - https://github.com/crowsonkb/k-diffusion.git
- GFPGAN - https://github.com/TencentARC/GFPGAN.git - Spandrel - https://github.com/chaiNNer-org/spandrel implementing
- CodeFormer - https://github.com/sczhou/CodeFormer - GFPGAN - https://github.com/TencentARC/GFPGAN.git
- ESRGAN - https://github.com/xinntao/ESRGAN - CodeFormer - https://github.com/sczhou/CodeFormer
- SwinIR - https://github.com/JingyunLiang/SwinIR - ESRGAN - https://github.com/xinntao/ESRGAN
- Swin2SR - https://github.com/mv-lab/swin2sr - SwinIR - https://github.com/JingyunLiang/SwinIR
- Swin2SR - https://github.com/mv-lab/swin2sr
- LDSR - https://github.com/Hafiidz/latent-diffusion - LDSR - https://github.com/Hafiidz/latent-diffusion
- MiDaS - https://github.com/isl-org/MiDaS - MiDaS - https://github.com/isl-org/MiDaS
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion - Ideas for optimizations - https://github.com/basujindal/stable-diffusion
......
[default.extend-words]
# Part of "RGBa" (Pillow's pre-multiplied alpha RGB mode)
Ba = "Ba"
# HSA is something AMD uses for their GPUs
HSA = "HSA"
...@@ -40,7 +40,7 @@ model: ...@@ -40,7 +40,7 @@ model:
use_spatial_transformer: True use_spatial_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 768 context_dim: 768
use_checkpoint: True use_checkpoint: False
legacy: False legacy: False
first_stage_config: first_stage_config:
......
...@@ -41,7 +41,7 @@ model: ...@@ -41,7 +41,7 @@ model:
use_linear_in_transformer: True use_linear_in_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 1024 context_dim: 1024
use_checkpoint: True use_checkpoint: False
legacy: False legacy: False
first_stage_config: first_stage_config:
......
...@@ -45,7 +45,7 @@ model: ...@@ -45,7 +45,7 @@ model:
use_spatial_transformer: True use_spatial_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 768 context_dim: 768
use_checkpoint: True use_checkpoint: False
legacy: False legacy: False
first_stage_config: first_stage_config:
......
model:
target: modules.models.sd3.sd3_model.SD3Inferencer
params:
shift: 3
state_dict: null
model:
target: sgm.models.diffusion.DiffusionEngine
params:
scale_factor: 0.13025
disable_first_stage_autocast: True
denoiser_config:
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
params:
num_idx: 1000
weighting_config:
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
scaling_config:
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
discretization_config:
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
network_config:
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
params:
adm_in_channels: 2816
num_classes: sequential
use_checkpoint: False
in_channels: 9
out_channels: 4
model_channels: 320
attention_resolutions: [4, 2]
num_res_blocks: 2
channel_mult: [1, 2, 4]
num_head_channels: 64
use_spatial_transformer: True
use_linear_in_transformer: True
transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
context_dim: 2048
spatial_transformer_attn_type: softmax-xformers
legacy: False
conditioner_config:
target: sgm.modules.GeneralConditioner
params:
emb_models:
# crossattn cond
- is_trainable: False
input_key: txt
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
params:
layer: hidden
layer_idx: 11
# crossattn and vector cond
- is_trainable: False
input_key: txt
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
params:
arch: ViT-bigG-14
version: laion2b_s39b_b160k
freeze: True
layer: penultimate
always_return_pooled: True
legacy: False
# vector cond
- is_trainable: False
input_key: original_size_as_tuple
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
params:
outdim: 256 # multiplied by two
# vector cond
- is_trainable: False
input_key: crop_coords_top_left
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
params:
outdim: 256 # multiplied by two
# vector cond
- is_trainable: False
input_key: target_size_as_tuple
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
params:
outdim: 256 # multiplied by two
first_stage_config:
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
attn_type: vanilla-xformers
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult: [1, 2, 4, 4]
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
...@@ -40,7 +40,7 @@ model: ...@@ -40,7 +40,7 @@ model:
use_spatial_transformer: True use_spatial_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 768 context_dim: 768
use_checkpoint: True use_checkpoint: False
legacy: False legacy: False
first_stage_config: first_stage_config:
......
...@@ -40,7 +40,7 @@ model: ...@@ -40,7 +40,7 @@ model:
use_spatial_transformer: True use_spatial_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 768 context_dim: 768
use_checkpoint: True use_checkpoint: False
legacy: False legacy: False
first_stage_config: first_stage_config:
......
...@@ -301,7 +301,7 @@ class DDPMV1(pl.LightningModule): ...@@ -301,7 +301,7 @@ class DDPMV1(pl.LightningModule):
elif self.parameterization == "x0": elif self.parameterization == "x0":
target = x_start target = x_start
else: else:
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
...@@ -572,7 +572,7 @@ class LatentDiffusionV1(DDPMV1): ...@@ -572,7 +572,7 @@ class LatentDiffusionV1(DDPMV1):
:param h: height :param h: height
:param w: width :param w: width
:return: normalized distance to image border, :return: normalized distance to image border,
wtith min distance = 0 at border and max dist = 0.5 at image center with min distance = 0 at border and max dist = 0.5 at image center
""" """
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
arr = self.meshgrid(h, w) / lower_right_corner arr = self.meshgrid(h, w) / lower_right_corner
...@@ -880,7 +880,7 @@ class LatentDiffusionV1(DDPMV1): ...@@ -880,7 +880,7 @@ class LatentDiffusionV1(DDPMV1):
def apply_model(self, x_noisy, t, cond, return_ids=False): def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict): if isinstance(cond, dict):
# hybrid case, cond is exptected to be a dict # hybrid case, cond is expected to be a dict
pass pass
else: else:
if not isinstance(cond, list): if not isinstance(cond, list):
...@@ -916,7 +916,7 @@ class LatentDiffusionV1(DDPMV1): ...@@ -916,7 +916,7 @@ class LatentDiffusionV1(DDPMV1):
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
elif self.cond_stage_key == 'coordinates_bbox': elif self.cond_stage_key == 'coordinates_bbox':
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' assert 'original_image_size' in self.split_input_params, 'BoundingBoxRescaling is missing original_image_size'
# assuming padding of unfold is always 0 and its dilation is always 1 # assuming padding of unfold is always 0 and its dilation is always 1
n_patches_per_row = int((w - ks[0]) / stride[0] + 1) n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
...@@ -926,7 +926,7 @@ class LatentDiffusionV1(DDPMV1): ...@@ -926,7 +926,7 @@ class LatentDiffusionV1(DDPMV1):
num_downs = self.first_stage_model.encoder.num_resolutions - 1 num_downs = self.first_stage_model.encoder.num_resolutions - 1
rescale_latent = 2 ** (num_downs) rescale_latent = 2 ** (num_downs)
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we # get top left positions of patches as conforming for the bbbox tokenizer, therefore we
# need to rescale the tl patch coordinates to be in between (0,1) # need to rescale the tl patch coordinates to be in between (0,1)
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
......
...@@ -9,6 +9,8 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): ...@@ -9,6 +9,8 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
self.errors = {} self.errors = {}
"""mapping of network names to the number of errors the network had during operation""" """mapping of network names to the number of errors the network had during operation"""
remove_symbols = str.maketrans('', '', ":,")
def activate(self, p, params_list): def activate(self, p, params_list):
additional = shared.opts.sd_lora additional = shared.opts.sd_lora
...@@ -43,22 +45,15 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): ...@@ -43,22 +45,15 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims) networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims)
if shared.opts.lora_add_hashes_to_infotext: if shared.opts.lora_add_hashes_to_infotext:
network_hashes = [] if not getattr(p, "is_hr_pass", False) or not hasattr(p, "lora_hashes"):
for item in networks.loaded_networks: p.lora_hashes = {}
shorthash = item.network_on_disk.shorthash
if not shorthash:
continue
alias = item.mentioned_name
if not alias:
continue
alias = alias.replace(":", "").replace(",", "") for item in networks.loaded_networks:
if item.network_on_disk.shorthash and item.mentioned_name:
network_hashes.append(f"{alias}: {shorthash}") p.lora_hashes[item.mentioned_name.translate(self.remove_symbols)] = item.network_on_disk.shorthash
if network_hashes: if p.lora_hashes:
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes) p.extra_generation_params["Lora hashes"] = ', '.join(f'{k}: {v}' for k, v in p.lora_hashes.items())
def deactivate(self, p): def deactivate(self, p):
if self.errors: if self.errors:
......
...@@ -30,7 +30,7 @@ def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: ...@@ -30,7 +30,7 @@ def factorization(dimension: int, factor:int=-1) -> tuple[int, int]:
In LoRA with Kroneckor Product, first value is a value for weight scale. In LoRA with Kroneckor Product, first value is a value for weight scale.
secon value is a value for weight. secon value is a value for weight.
Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different. Because of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different.
examples) examples)
factor factor
......
...@@ -3,7 +3,11 @@ import os ...@@ -3,7 +3,11 @@ import os
from collections import namedtuple from collections import namedtuple
import enum import enum
import torch.nn as nn
import torch.nn.functional as F
from modules import sd_models, cache, errors, hashes, shared from modules import sd_models, cache, errors, hashes, shared
import modules.models.sd3.mmdit
NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module']) NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module'])
...@@ -26,7 +30,6 @@ class NetworkOnDisk: ...@@ -26,7 +30,6 @@ class NetworkOnDisk:
def read_metadata(): def read_metadata():
metadata = sd_models.read_metadata_from_safetensors(filename) metadata = sd_models.read_metadata_from_safetensors(filename)
metadata.pop('ssmd_cover_images', None) # those are cover images, and they are too big to display in UI as text
return metadata return metadata
...@@ -112,14 +115,49 @@ class NetworkModule: ...@@ -112,14 +115,49 @@ class NetworkModule:
self.sd_key = weights.sd_key self.sd_key = weights.sd_key
self.sd_module = weights.sd_module self.sd_module = weights.sd_module
if hasattr(self.sd_module, 'weight'): if isinstance(self.sd_module, modules.models.sd3.mmdit.QkvLinear):
s = self.sd_module.weight.shape
self.shape = (s[0] // 3, s[1])
elif hasattr(self.sd_module, 'weight'):
self.shape = self.sd_module.weight.shape self.shape = self.sd_module.weight.shape
elif isinstance(self.sd_module, nn.MultiheadAttention):
# For now, only self-attn use Pytorch's MHA
# So assume all qkvo proj have same shape
self.shape = self.sd_module.out_proj.weight.shape
else:
self.shape = None
self.ops = None
self.extra_kwargs = {}
if isinstance(self.sd_module, nn.Conv2d):
self.ops = F.conv2d
self.extra_kwargs = {
'stride': self.sd_module.stride,
'padding': self.sd_module.padding
}
elif isinstance(self.sd_module, nn.Linear):
self.ops = F.linear
elif isinstance(self.sd_module, nn.LayerNorm):
self.ops = F.layer_norm
self.extra_kwargs = {
'normalized_shape': self.sd_module.normalized_shape,
'eps': self.sd_module.eps
}
elif isinstance(self.sd_module, nn.GroupNorm):
self.ops = F.group_norm
self.extra_kwargs = {
'num_groups': self.sd_module.num_groups,
'eps': self.sd_module.eps
}
self.dim = None self.dim = None
self.bias = weights.w.get("bias") self.bias = weights.w.get("bias")
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
self.scale = weights.w["scale"].item() if "scale" in weights.w else None self.scale = weights.w["scale"].item() if "scale" in weights.w else None
self.dora_scale = weights.w.get("dora_scale", None)
self.dora_norm_dims = len(self.shape) - 1
def multiplier(self): def multiplier(self):
if 'transformer' in self.sd_key[:20]: if 'transformer' in self.sd_key[:20]:
return self.network.te_multiplier return self.network.te_multiplier
...@@ -134,10 +172,31 @@ class NetworkModule: ...@@ -134,10 +172,31 @@ class NetworkModule:
return 1.0 return 1.0
def apply_weight_decompose(self, updown, orig_weight):
# Match the device/dtype
orig_weight = orig_weight.to(updown.dtype)
dora_scale = self.dora_scale.to(device=orig_weight.device, dtype=updown.dtype)
updown = updown.to(orig_weight.device)
merged_scale1 = updown + orig_weight
merged_scale1_norm = (
merged_scale1.transpose(0, 1)
.reshape(merged_scale1.shape[1], -1)
.norm(dim=1, keepdim=True)
.reshape(merged_scale1.shape[1], *[1] * self.dora_norm_dims)
.transpose(0, 1)
)
dora_merged = (
merged_scale1 * (dora_scale / merged_scale1_norm)
)
final_updown = dora_merged - orig_weight
return final_updown
def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
if self.bias is not None: if self.bias is not None:
updown = updown.reshape(self.bias.shape) updown = updown.reshape(self.bias.shape)
updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype) updown += self.bias.to(orig_weight.device, dtype=updown.dtype)
updown = updown.reshape(output_shape) updown = updown.reshape(output_shape)
if len(output_shape) == 4: if len(output_shape) == 4:
...@@ -149,11 +208,21 @@ class NetworkModule: ...@@ -149,11 +208,21 @@ class NetworkModule:
if ex_bias is not None: if ex_bias is not None:
ex_bias = ex_bias * self.multiplier() ex_bias = ex_bias * self.multiplier()
return updown * self.calc_scale() * self.multiplier(), ex_bias updown = updown * self.calc_scale()
if self.dora_scale is not None:
updown = self.apply_weight_decompose(updown, orig_weight)
return updown * self.multiplier(), ex_bias
def calc_updown(self, target): def calc_updown(self, target):
raise NotImplementedError() raise NotImplementedError()
def forward(self, x, y): def forward(self, x, y):
raise NotImplementedError() """A general forward implementation for all modules"""
if self.ops is None:
raise NotImplementedError()
else:
updown, ex_bias = self.calc_updown(self.sd_module.weight)
return y + self.ops(x, weight=updown, bias=ex_bias, **self.extra_kwargs)
...@@ -18,9 +18,9 @@ class NetworkModuleFull(network.NetworkModule): ...@@ -18,9 +18,9 @@ class NetworkModuleFull(network.NetworkModule):
def calc_updown(self, orig_weight): def calc_updown(self, orig_weight):
output_shape = self.weight.shape output_shape = self.weight.shape
updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype) updown = self.weight.to(orig_weight.device)
if self.ex_bias is not None: if self.ex_bias is not None:
ex_bias = self.ex_bias.to(orig_weight.device, dtype=orig_weight.dtype) ex_bias = self.ex_bias.to(orig_weight.device)
else: else:
ex_bias = None ex_bias = None
......
...@@ -22,12 +22,12 @@ class NetworkModuleGLora(network.NetworkModule): ...@@ -22,12 +22,12 @@ class NetworkModuleGLora(network.NetworkModule):
self.w2b = weights.w["b2.weight"] self.w2b = weights.w["b2.weight"]
def calc_updown(self, orig_weight): def calc_updown(self, orig_weight):
w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) w1a = self.w1a.to(orig_weight.device)
w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) w1b = self.w1b.to(orig_weight.device)
w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) w2a = self.w2a.to(orig_weight.device)
w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) w2b = self.w2b.to(orig_weight.device)
output_shape = [w1a.size(0), w1b.size(1)] output_shape = [w1a.size(0), w1b.size(1)]
updown = ((w2b @ w1b) + ((orig_weight @ w2a) @ w1a)) updown = ((w2b @ w1b) + ((orig_weight.to(dtype = w1a.dtype) @ w2a) @ w1a))
return self.finalize_updown(updown, orig_weight, output_shape) return self.finalize_updown(updown, orig_weight, output_shape)
...@@ -27,16 +27,16 @@ class NetworkModuleHada(network.NetworkModule): ...@@ -27,16 +27,16 @@ class NetworkModuleHada(network.NetworkModule):
self.t2 = weights.w.get("hada_t2") self.t2 = weights.w.get("hada_t2")
def calc_updown(self, orig_weight): def calc_updown(self, orig_weight):
w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) w1a = self.w1a.to(orig_weight.device)
w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) w1b = self.w1b.to(orig_weight.device)
w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) w2a = self.w2a.to(orig_weight.device)
w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) w2b = self.w2b.to(orig_weight.device)
output_shape = [w1a.size(0), w1b.size(1)] output_shape = [w1a.size(0), w1b.size(1)]
if self.t1 is not None: if self.t1 is not None:
output_shape = [w1a.size(1), w1b.size(1)] output_shape = [w1a.size(1), w1b.size(1)]
t1 = self.t1.to(orig_weight.device, dtype=orig_weight.dtype) t1 = self.t1.to(orig_weight.device)
updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b) updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b)
output_shape += t1.shape[2:] output_shape += t1.shape[2:]
else: else:
...@@ -45,7 +45,7 @@ class NetworkModuleHada(network.NetworkModule): ...@@ -45,7 +45,7 @@ class NetworkModuleHada(network.NetworkModule):
updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape) updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape)
if self.t2 is not None: if self.t2 is not None:
t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype) t2 = self.t2.to(orig_weight.device)
updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
else: else:
updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape) updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape)
......
...@@ -17,7 +17,7 @@ class NetworkModuleIa3(network.NetworkModule): ...@@ -17,7 +17,7 @@ class NetworkModuleIa3(network.NetworkModule):
self.on_input = weights.w["on_input"].item() self.on_input = weights.w["on_input"].item()
def calc_updown(self, orig_weight): def calc_updown(self, orig_weight):
w = self.w.to(orig_weight.device, dtype=orig_weight.dtype) w = self.w.to(orig_weight.device)
output_shape = [w.size(0), orig_weight.size(1)] output_shape = [w.size(0), orig_weight.size(1)]
if self.on_input: if self.on_input:
......
...@@ -37,22 +37,22 @@ class NetworkModuleLokr(network.NetworkModule): ...@@ -37,22 +37,22 @@ class NetworkModuleLokr(network.NetworkModule):
def calc_updown(self, orig_weight): def calc_updown(self, orig_weight):
if self.w1 is not None: if self.w1 is not None:
w1 = self.w1.to(orig_weight.device, dtype=orig_weight.dtype) w1 = self.w1.to(orig_weight.device)
else: else:
w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) w1a = self.w1a.to(orig_weight.device)
w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) w1b = self.w1b.to(orig_weight.device)
w1 = w1a @ w1b w1 = w1a @ w1b
if self.w2 is not None: if self.w2 is not None:
w2 = self.w2.to(orig_weight.device, dtype=orig_weight.dtype) w2 = self.w2.to(orig_weight.device)
elif self.t2 is None: elif self.t2 is None:
w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) w2a = self.w2a.to(orig_weight.device)
w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) w2b = self.w2b.to(orig_weight.device)
w2 = w2a @ w2b w2 = w2a @ w2b
else: else:
t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype) t2 = self.t2.to(orig_weight.device)
w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) w2a = self.w2a.to(orig_weight.device)
w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) w2b = self.w2b.to(orig_weight.device)
w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)] output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)]
......
import torch import torch
import lyco_helpers import lyco_helpers
import modules.models.sd3.mmdit
import network import network
from modules import devices from modules import devices
...@@ -10,6 +11,13 @@ class ModuleTypeLora(network.ModuleType): ...@@ -10,6 +11,13 @@ class ModuleTypeLora(network.ModuleType):
if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]): if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]):
return NetworkModuleLora(net, weights) return NetworkModuleLora(net, weights)
if all(x in weights.w for x in ["lora_A.weight", "lora_B.weight"]):
w = weights.w.copy()
weights.w.clear()
weights.w.update({"lora_up.weight": w["lora_B.weight"], "lora_down.weight": w["lora_A.weight"]})
return NetworkModuleLora(net, weights)
return None return None
...@@ -29,7 +37,7 @@ class NetworkModuleLora(network.NetworkModule): ...@@ -29,7 +37,7 @@ class NetworkModuleLora(network.NetworkModule):
if weight is None and none_ok: if weight is None and none_ok:
return None return None
is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention] is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention, modules.models.sd3.mmdit.QkvLinear]
is_conv = type(self.sd_module) in [torch.nn.Conv2d] is_conv = type(self.sd_module) in [torch.nn.Conv2d]
if is_linear: if is_linear:
...@@ -61,13 +69,13 @@ class NetworkModuleLora(network.NetworkModule): ...@@ -61,13 +69,13 @@ class NetworkModuleLora(network.NetworkModule):
return module return module
def calc_updown(self, orig_weight): def calc_updown(self, orig_weight):
up = self.up_model.weight.to(orig_weight.device, dtype=orig_weight.dtype) up = self.up_model.weight.to(orig_weight.device)
down = self.down_model.weight.to(orig_weight.device, dtype=orig_weight.dtype) down = self.down_model.weight.to(orig_weight.device)
output_shape = [up.size(0), down.size(1)] output_shape = [up.size(0), down.size(1)]
if self.mid_model is not None: if self.mid_model is not None:
# cp-decomposition # cp-decomposition
mid = self.mid_model.weight.to(orig_weight.device, dtype=orig_weight.dtype) mid = self.mid_model.weight.to(orig_weight.device)
updown = lyco_helpers.rebuild_cp_decomposition(up, down, mid) updown = lyco_helpers.rebuild_cp_decomposition(up, down, mid)
output_shape += mid.shape[2:] output_shape += mid.shape[2:]
else: else:
......
...@@ -18,10 +18,10 @@ class NetworkModuleNorm(network.NetworkModule): ...@@ -18,10 +18,10 @@ class NetworkModuleNorm(network.NetworkModule):
def calc_updown(self, orig_weight): def calc_updown(self, orig_weight):
output_shape = self.w_norm.shape output_shape = self.w_norm.shape
updown = self.w_norm.to(orig_weight.device, dtype=orig_weight.dtype) updown = self.w_norm.to(orig_weight.device)
if self.b_norm is not None: if self.b_norm is not None:
ex_bias = self.b_norm.to(orig_weight.device, dtype=orig_weight.dtype) ex_bias = self.b_norm.to(orig_weight.device)
else: else:
ex_bias = None ex_bias = None
......
import torch import torch
import network import network
from lyco_helpers import factorization
from einops import rearrange from einops import rearrange
...@@ -22,16 +21,17 @@ class NetworkModuleOFT(network.NetworkModule): ...@@ -22,16 +21,17 @@ class NetworkModuleOFT(network.NetworkModule):
self.org_module: list[torch.Module] = [self.sd_module] self.org_module: list[torch.Module] = [self.sd_module]
self.scale = 1.0 self.scale = 1.0
self.is_R = False
self.is_boft = False
# kohya-ss # kohya-ss/New LyCORIS OFT/BOFT
if "oft_blocks" in weights.w.keys(): if "oft_blocks" in weights.w.keys():
self.is_kohya = True
self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size)
self.alpha = weights.w["alpha"] # alpha is constraint self.alpha = weights.w.get("alpha", None) # alpha is constraint
self.dim = self.oft_blocks.shape[0] # lora dim self.dim = self.oft_blocks.shape[0] # lora dim
# LyCORIS # Old LyCORIS OFT
elif "oft_diag" in weights.w.keys(): elif "oft_diag" in weights.w.keys():
self.is_kohya = False self.is_R = True
self.oft_blocks = weights.w["oft_diag"] self.oft_blocks = weights.w["oft_diag"]
# self.alpha is unused # self.alpha is unused
self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size)
...@@ -47,36 +47,72 @@ class NetworkModuleOFT(network.NetworkModule): ...@@ -47,36 +47,72 @@ class NetworkModuleOFT(network.NetworkModule):
elif is_other_linear: elif is_other_linear:
self.out_dim = self.sd_module.embed_dim self.out_dim = self.sd_module.embed_dim
if self.is_kohya: # LyCORIS BOFT
self.constraint = self.alpha * self.out_dim if self.oft_blocks.dim() == 4:
self.num_blocks = self.dim self.is_boft = True
self.block_size = self.out_dim // self.dim self.rescale = weights.w.get('rescale', None)
else: if self.rescale is not None and not is_other_linear:
self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1))
self.num_blocks = self.dim
self.block_size = self.out_dim // self.dim
self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim
if self.is_R:
self.constraint = None self.constraint = None
self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) self.block_size = self.dim
self.num_blocks = self.out_dim // self.dim
elif self.is_boft:
self.boft_m = self.oft_blocks.shape[0]
self.num_blocks = self.oft_blocks.shape[1]
self.block_size = self.oft_blocks.shape[2]
self.boft_b = self.block_size
def calc_updown(self, orig_weight): def calc_updown(self, orig_weight):
oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) oft_blocks = self.oft_blocks.to(orig_weight.device)
eye = torch.eye(self.block_size, device=self.oft_blocks.device) eye = torch.eye(self.block_size, device=oft_blocks.device)
if self.is_kohya: if not self.is_R:
block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix block_Q = oft_blocks - oft_blocks.transpose(-1, -2) # ensure skew-symmetric orthogonal matrix
norm_Q = torch.norm(block_Q.flatten()) if self.constraint != 0:
new_norm_Q = torch.clamp(norm_Q, max=self.constraint) norm_Q = torch.norm(block_Q.flatten())
block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device))
block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse()) oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse())
R = oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) R = oft_blocks.to(orig_weight.device)
# This errors out for MultiheadAttention, might need to be handled up-stream if not self.is_boft:
merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) # This errors out for MultiheadAttention, might need to be handled up-stream
merged_weight = torch.einsum( merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size)
'k n m, k n ... -> k m ...', merged_weight = torch.einsum(
R, 'k n m, k n ... -> k m ...',
merged_weight R,
) merged_weight
merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') )
merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...')
updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight else:
# TODO: determine correct value for scale
scale = 1.0
m = self.boft_m
b = self.boft_b
r_b = b // 2
inp = orig_weight
for i in range(m):
bi = R[i] # b_num, b_size, b_size
if i == 0:
# Apply multiplier/scale and rescale into first weight
bi = bi * scale + (1 - scale) * eye
inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b)
inp = rearrange(inp, "(d b) ... -> d b ...", b=b)
inp = torch.einsum("b i j, b j ... -> b i ...", bi, inp)
inp = rearrange(inp, "d b ... -> (d b) ...")
inp = rearrange(inp, "(c k g) ... -> (c g k) ...", g=2, k=2**i * r_b)
merged_weight = inp
# Rescale mechanism
if self.rescale is not None:
merged_weight = self.rescale.to(merged_weight) * merged_weight
updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype)
output_shape = orig_weight.shape output_shape = orig_weight.shape
return self.finalize_updown(updown, orig_weight, output_shape) return self.finalize_updown(updown, orig_weight, output_shape)
This diff is collapsed.
import os import os
from modules import paths from modules import paths
from modules.paths_internal import normalized_filepath
def preload(parser): def preload(parser):
parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora')) parser.add_argument("--lora-dir", type=normalized_filepath, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
parser.add_argument("--lyco-dir-backcompat", type=str, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS')) parser.add_argument("--lyco-dir-backcompat", type=normalized_filepath, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS'))
...@@ -36,9 +36,12 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra ...@@ -36,9 +36,12 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks), "sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}), "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"), "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
"lora_bundled_ti_to_infotext": shared.OptionInfo(True, "Add Lora name as TI hashes for bundled Textual Inversion").info('"Add Textual Inversion hashes to infotext" needs to be enabled'),
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"), "lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}), "lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
"lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}), "lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
"lora_not_found_warning_console": shared.OptionInfo(False, "Lora not found warning in console"),
"lora_not_found_gradio_warning": shared.OptionInfo(False, "Lora not found warning popup in webui"),
})) }))
......
...@@ -21,10 +21,12 @@ re_comma = re.compile(r" *, *") ...@@ -21,10 +21,12 @@ re_comma = re.compile(r" *, *")
def build_tags(metadata): def build_tags(metadata):
tags = {} tags = {}
for _, tags_dict in metadata.get("ss_tag_frequency", {}).items(): ss_tag_frequency = metadata.get("ss_tag_frequency", {})
for tag, tag_count in tags_dict.items(): if ss_tag_frequency is not None and hasattr(ss_tag_frequency, 'items'):
tag = tag.strip() for _, tags_dict in ss_tag_frequency.items():
tags[tag] = tags.get(tag, 0) + int(tag_count) for tag, tag_count in tags_dict.items():
tag = tag.strip()
tags[tag] = tags.get(tag, 0) + int(tag_count)
if tags and is_non_comma_tagset(tags): if tags and is_non_comma_tagset(tags):
new_tags = {} new_tags = {}
...@@ -54,12 +56,13 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor) ...@@ -54,12 +56,13 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
self.slider_preferred_weight = None self.slider_preferred_weight = None
self.edit_notes = None self.edit_notes = None
def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, notes): def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, negative_text, notes):
user_metadata = self.get_user_metadata(name) user_metadata = self.get_user_metadata(name)
user_metadata["description"] = desc user_metadata["description"] = desc
user_metadata["sd version"] = sd_version user_metadata["sd version"] = sd_version
user_metadata["activation text"] = activation_text user_metadata["activation text"] = activation_text
user_metadata["preferred weight"] = preferred_weight user_metadata["preferred weight"] = preferred_weight
user_metadata["negative text"] = negative_text
user_metadata["notes"] = notes user_metadata["notes"] = notes
self.write_user_metadata(name, user_metadata) self.write_user_metadata(name, user_metadata)
...@@ -127,6 +130,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor) ...@@ -127,6 +130,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
gr.HighlightedText.update(value=gradio_tags, visible=True if tags else False), gr.HighlightedText.update(value=gradio_tags, visible=True if tags else False),
user_metadata.get('activation text', ''), user_metadata.get('activation text', ''),
float(user_metadata.get('preferred weight', 0.0)), float(user_metadata.get('preferred weight', 0.0)),
user_metadata.get('negative text', ''),
gr.update(visible=True if tags else False), gr.update(visible=True if tags else False),
gr.update(value=self.generate_random_prompt_from_tags(tags), visible=True if tags else False), gr.update(value=self.generate_random_prompt_from_tags(tags), visible=True if tags else False),
] ]
...@@ -147,6 +151,8 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor) ...@@ -147,6 +151,8 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
v = random.random() * max_count v = random.random() * max_count
if count > v: if count > v:
for x in "({[]})":
tag = tag.replace(x, '\\' + x)
res.append(tag) res.append(tag)
return ", ".join(sorted(res)) return ", ".join(sorted(res))
...@@ -162,7 +168,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor) ...@@ -162,7 +168,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
self.taginfo = gr.HighlightedText(label="Training dataset tags") self.taginfo = gr.HighlightedText(label="Training dataset tags")
self.edit_activation_text = gr.Text(label='Activation text', info="Will be added to prompt along with Lora") self.edit_activation_text = gr.Text(label='Activation text', info="Will be added to prompt along with Lora")
self.slider_preferred_weight = gr.Slider(label='Preferred weight', info="Set to 0 to disable", minimum=0.0, maximum=2.0, step=0.01) self.slider_preferred_weight = gr.Slider(label='Preferred weight', info="Set to 0 to disable", minimum=0.0, maximum=2.0, step=0.01)
self.edit_negative_text = gr.Text(label='Negative prompt', info="Will be added to negative prompts")
with gr.Row() as row_random_prompt: with gr.Row() as row_random_prompt:
with gr.Column(scale=8): with gr.Column(scale=8):
random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False) random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False)
...@@ -198,6 +204,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor) ...@@ -198,6 +204,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
self.taginfo, self.taginfo,
self.edit_activation_text, self.edit_activation_text,
self.slider_preferred_weight, self.slider_preferred_weight,
self.edit_negative_text,
row_random_prompt, row_random_prompt,
random_prompt, random_prompt,
] ]
...@@ -211,7 +218,9 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor) ...@@ -211,7 +218,9 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
self.select_sd_version, self.select_sd_version,
self.edit_activation_text, self.edit_activation_text,
self.slider_preferred_weight, self.slider_preferred_weight,
self.edit_negative_text,
self.edit_notes, self.edit_notes,
] ]
self.setup_save_handler(self.button_save, self.save_lora_user_metadata, edited_components) self.setup_save_handler(self.button_save, self.save_lora_user_metadata, edited_components)
...@@ -24,13 +24,16 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): ...@@ -24,13 +24,16 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
alias = lora_on_disk.get_alias() alias = lora_on_disk.get_alias()
search_terms = [self.search_terms_from_path(lora_on_disk.filename)]
if lora_on_disk.hash:
search_terms.append(lora_on_disk.hash)
item = { item = {
"name": name, "name": name,
"filename": lora_on_disk.filename, "filename": lora_on_disk.filename,
"shorthash": lora_on_disk.shorthash, "shorthash": lora_on_disk.shorthash,
"preview": self.find_preview(path), "preview": self.find_preview(path) or self.find_embedded_preview(path, name, lora_on_disk.metadata),
"description": self.find_description(path), "description": self.find_description(path),
"search_term": self.search_terms_from_path(lora_on_disk.filename) + " " + (lora_on_disk.hash or ""), "search_terms": search_terms,
"local_preview": f"{path}.{shared.opts.samples_format}", "local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": lora_on_disk.metadata, "metadata": lora_on_disk.metadata,
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)}, "sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
...@@ -45,6 +48,11 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): ...@@ -45,6 +48,11 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
if activation_text: if activation_text:
item["prompt"] += " + " + quote_js(" " + activation_text) item["prompt"] += " + " + quote_js(" " + activation_text)
negative_prompt = item["user_metadata"].get("negative text")
item["negative_prompt"] = quote_js("")
if negative_prompt:
item["negative_prompt"] = quote_js('(' + negative_prompt + ':1)')
sd_version = item["user_metadata"].get("sd version") sd_version = item["user_metadata"].get("sd version")
if sd_version in network.SdVersion.__members__: if sd_version in network.SdVersion.__members__:
item["sd_version"] = sd_version item["sd_version"] = sd_version
...@@ -52,7 +60,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): ...@@ -52,7 +60,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
else: else:
sd_version = lora_on_disk.sd_version sd_version = lora_on_disk.sd_version
if shared.opts.lora_show_all or not enable_filter: if shared.opts.lora_show_all or not enable_filter or not shared.sd_model:
pass pass
elif sd_version == network.SdVersion.Unknown: elif sd_version == network.SdVersion.Unknown:
model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1 model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1
......
import sys import sys
import PIL.Image import PIL.Image
import numpy as np
import torch
from tqdm import tqdm
import modules.upscaler import modules.upscaler
from modules import devices, modelloader, script_callbacks, errors from modules import devices, errors, modelloader, script_callbacks, shared, upscaler_utils
from scunet_model_arch import SCUNet
from modules.modelloader import load_file_from_url
from modules.shared import opts
class UpscalerScuNET(modules.upscaler.Upscaler): class UpscalerScuNET(modules.upscaler.Upscaler):
...@@ -42,100 +35,37 @@ class UpscalerScuNET(modules.upscaler.Upscaler): ...@@ -42,100 +35,37 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
scalers.append(scaler_data2) scalers.append(scaler_data2)
self.scalers = scalers self.scalers = scalers
@staticmethod
@torch.no_grad()
def tiled_inference(img, model):
# test the image tile by tile
h, w = img.shape[2:]
tile = opts.SCUNET_tile
tile_overlap = opts.SCUNET_tile_overlap
if tile == 0:
return model(img)
device = devices.get_device_for('scunet')
assert tile % 8 == 0, "tile size should be a multiple of window_size"
sf = 1
stride = tile - tile_overlap
h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
E = torch.zeros(1, 3, h * sf, w * sf, dtype=img.dtype, device=device)
W = torch.zeros_like(E, dtype=devices.dtype, device=device)
with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="ScuNET tiles") as pbar:
for h_idx in h_idx_list:
for w_idx in w_idx_list:
in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
out_patch = model(in_patch)
out_patch_mask = torch.ones_like(out_patch)
E[
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
].add_(out_patch)
W[
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
].add_(out_patch_mask)
pbar.update(1)
output = E.div_(W)
return output
def do_upscale(self, img: PIL.Image.Image, selected_file): def do_upscale(self, img: PIL.Image.Image, selected_file):
devices.torch_gc() devices.torch_gc()
try: try:
model = self.load_model(selected_file) model = self.load_model(selected_file)
except Exception as e: except Exception as e:
print(f"ScuNET: Unable to load model from {selected_file}: {e}", file=sys.stderr) print(f"ScuNET: Unable to load model from {selected_file}: {e}", file=sys.stderr)
return img return img
device = devices.get_device_for('scunet') img = upscaler_utils.upscale_2(
tile = opts.SCUNET_tile img,
h, w = img.height, img.width model,
np_img = np.array(img) tile_size=shared.opts.SCUNET_tile,
np_img = np_img[:, :, ::-1] # RGB to BGR tile_overlap=shared.opts.SCUNET_tile_overlap,
np_img = np_img.transpose((2, 0, 1)) / 255 # HWC to CHW scale=1, # ScuNET is a denoising model, not an upscaler
torch_img = torch.from_numpy(np_img).float().unsqueeze(0).to(device) # type: ignore desc='ScuNET',
)
if tile > h or tile > w:
_img = torch.zeros(1, 3, max(h, tile), max(w, tile), dtype=torch_img.dtype, device=torch_img.device)
_img[:, :, :h, :w] = torch_img # pad image
torch_img = _img
torch_output = self.tiled_inference(torch_img, model).squeeze(0)
torch_output = torch_output[:, :h * 1, :w * 1] # remove padding, if any
np_output: np.ndarray = torch_output.float().cpu().clamp_(0, 1).numpy()
del torch_img, torch_output
devices.torch_gc() devices.torch_gc()
return img
output = np_output.transpose((1, 2, 0)) # CHW to HWC
output = output[:, :, ::-1] # BGR to RGB
return PIL.Image.fromarray((output * 255).astype(np.uint8))
def load_model(self, path: str): def load_model(self, path: str):
device = devices.get_device_for('scunet') device = devices.get_device_for('scunet')
if path.startswith("http"): if path.startswith("http"):
# TODO: this doesn't use `path` at all? # TODO: this doesn't use `path` at all?
filename = load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth") filename = modelloader.load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth")
else: else:
filename = path filename = path
model = SCUNet(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64) return modelloader.load_spandrel_model(filename, device=device, expected_architecture='SCUNet')
model.load_state_dict(torch.load(filename), strict=True)
model.eval()
for _, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
return model
def on_ui_settings(): def on_ui_settings():
import gradio as gr import gradio as gr
from modules import shared
shared.opts.add_option("SCUNET_tile", shared.OptionInfo(256, "Tile size for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")).info("0 = no tiling")) shared.opts.add_option("SCUNET_tile", shared.OptionInfo(256, "Tile size for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")).info("0 = no tiling"))
shared.opts.add_option("SCUNET_tile_overlap", shared.OptionInfo(8, "Tile overlap for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, section=('upscaling', "Upscaling")).info("Low values = visible seam")) shared.opts.add_option("SCUNET_tile_overlap", shared.OptionInfo(8, "Tile overlap for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, section=('upscaling', "Upscaling")).info("Low values = visible seam"))
......
This diff is collapsed.
import logging
import sys import sys
import platform
import numpy as np
import torch import torch
from PIL import Image from PIL import Image
from tqdm import tqdm
from modules import modelloader, devices, script_callbacks, shared from modules import devices, modelloader, script_callbacks, shared, upscaler_utils
from modules.shared import opts, state
from swinir_model_arch import SwinIR
from swinir_model_arch_v2 import Swin2SR
from modules.upscaler import Upscaler, UpscalerData from modules.upscaler import Upscaler, UpscalerData
SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth" SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth"
device_swinir = devices.get_device_for('swinir') logger = logging.getLogger(__name__)
class UpscalerSwinIR(Upscaler): class UpscalerSwinIR(Upscaler):
...@@ -37,26 +32,28 @@ class UpscalerSwinIR(Upscaler): ...@@ -37,26 +32,28 @@ class UpscalerSwinIR(Upscaler):
scalers.append(model_data) scalers.append(model_data)
self.scalers = scalers self.scalers = scalers
def do_upscale(self, img, model_file): def do_upscale(self, img: Image.Image, model_file: str) -> Image.Image:
use_compile = hasattr(opts, 'SWIN_torch_compile') and opts.SWIN_torch_compile \ current_config = (model_file, shared.opts.SWIN_tile)
and int(torch.__version__.split('.')[0]) >= 2 and platform.system() != "Windows"
current_config = (model_file, opts.SWIN_tile)
if use_compile and self._cached_model_config == current_config: if self._cached_model_config == current_config:
model = self._cached_model model = self._cached_model
else: else:
self._cached_model = None
try: try:
model = self.load_model(model_file) model = self.load_model(model_file)
except Exception as e: except Exception as e:
print(f"Failed loading SwinIR model {model_file}: {e}", file=sys.stderr) print(f"Failed loading SwinIR model {model_file}: {e}", file=sys.stderr)
return img return img
model = model.to(device_swinir, dtype=devices.dtype) self._cached_model = model
if use_compile: self._cached_model_config = current_config
model = torch.compile(model)
self._cached_model = model img = upscaler_utils.upscale_2(
self._cached_model_config = current_config img,
img = upscale(img, model) model,
tile_size=shared.opts.SWIN_tile,
tile_overlap=shared.opts.SWIN_tile_overlap,
scale=model.scale,
desc="SwinIR",
)
devices.torch_gc() devices.torch_gc()
return img return img
...@@ -69,115 +66,22 @@ class UpscalerSwinIR(Upscaler): ...@@ -69,115 +66,22 @@ class UpscalerSwinIR(Upscaler):
) )
else: else:
filename = path filename = path
if filename.endswith(".v2.pth"):
model = Swin2SR(
upscale=scale,
in_chans=3,
img_size=64,
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6],
embed_dim=180,
num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2,
upsampler="nearest+conv",
resi_connection="1conv",
)
params = None
else:
model = SwinIR(
upscale=scale,
in_chans=3,
img_size=64,
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
embed_dim=240,
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
mlp_ratio=2,
upsampler="nearest+conv",
resi_connection="3conv",
)
params = "params_ema"
pretrained_model = torch.load(filename) model_descriptor = modelloader.load_spandrel_model(
if params is not None: filename,
model.load_state_dict(pretrained_model[params], strict=True) device=self._get_device(),
else: prefer_half=(devices.dtype == torch.float16),
model.load_state_dict(pretrained_model, strict=True) expected_architecture="SwinIR",
return model )
if getattr(shared.opts, 'SWIN_torch_compile', False):
try:
def upscale( model_descriptor.model.compile()
img, except Exception:
model, logger.warning("Failed to compile SwinIR model, fallback to JIT", exc_info=True)
tile=None, return model_descriptor
tile_overlap=None,
window_size=8, def _get_device(self):
scale=4, return devices.get_device_for('swinir')
):
tile = tile or opts.SWIN_tile
tile_overlap = tile_overlap or opts.SWIN_tile_overlap
img = np.array(img)
img = img[:, :, ::-1]
img = np.moveaxis(img, 2, 0) / 255
img = torch.from_numpy(img).float()
img = img.unsqueeze(0).to(device_swinir, dtype=devices.dtype)
with torch.no_grad(), devices.autocast():
_, _, h_old, w_old = img.size()
h_pad = (h_old // window_size + 1) * window_size - h_old
w_pad = (w_old // window_size + 1) * window_size - w_old
img = torch.cat([img, torch.flip(img, [2])], 2)[:, :, : h_old + h_pad, :]
img = torch.cat([img, torch.flip(img, [3])], 3)[:, :, :, : w_old + w_pad]
output = inference(img, model, tile, tile_overlap, window_size, scale)
output = output[..., : h_old * scale, : w_old * scale]
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
if output.ndim == 3:
output = np.transpose(
output[[2, 1, 0], :, :], (1, 2, 0)
) # CHW-RGB to HCW-BGR
output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
return Image.fromarray(output, "RGB")
def inference(img, model, tile, tile_overlap, window_size, scale):
# test the image tile by tile
b, c, h, w = img.size()
tile = min(tile, h, w)
assert tile % window_size == 0, "tile size should be a multiple of window_size"
sf = scale
stride = tile - tile_overlap
h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
E = torch.zeros(b, c, h * sf, w * sf, dtype=devices.dtype, device=device_swinir).type_as(img)
W = torch.zeros_like(E, dtype=devices.dtype, device=device_swinir)
with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar:
for h_idx in h_idx_list:
if state.interrupted or state.skipped:
break
for w_idx in w_idx_list:
if state.interrupted or state.skipped:
break
in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
out_patch = model(in_patch)
out_patch_mask = torch.ones_like(out_patch)
E[
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
].add_(out_patch)
W[
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
].add_(out_patch_mask)
pbar.update(1)
output = E.div_(W)
return output
def on_ui_settings(): def on_ui_settings():
...@@ -185,8 +89,7 @@ def on_ui_settings(): ...@@ -185,8 +89,7 @@ def on_ui_settings():
shared.opts.add_option("SWIN_tile", shared.OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling"))) shared.opts.add_option("SWIN_tile", shared.OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")))
shared.opts.add_option("SWIN_tile_overlap", shared.OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}, section=('upscaling', "Upscaling"))) shared.opts.add_option("SWIN_tile_overlap", shared.OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}, section=('upscaling', "Upscaling")))
if int(torch.__version__.split('.')[0]) >= 2 and platform.system() != "Windows": # torch.compile() require pytorch 2.0 or above, and not on Windows shared.opts.add_option("SWIN_torch_compile", shared.OptionInfo(False, "Use torch.compile to accelerate SwinIR.", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")).info("Takes longer on first run"))
shared.opts.add_option("SWIN_torch_compile", shared.OptionInfo(False, "Use torch.compile to accelerate SwinIR.", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")).info("Takes longer on first run"))
script_callbacks.on_ui_settings(on_ui_settings) script_callbacks.on_ui_settings(on_ui_settings)
This diff is collapsed.
This diff is collapsed.
...@@ -29,6 +29,7 @@ onUiLoaded(async() => { ...@@ -29,6 +29,7 @@ onUiLoaded(async() => {
}); });
function getActiveTab(elements, all = false) { function getActiveTab(elements, all = false) {
if (!elements.img2imgTabs) return null;
const tabs = elements.img2imgTabs.querySelectorAll("button"); const tabs = elements.img2imgTabs.querySelectorAll("button");
if (all) return tabs; if (all) return tabs;
...@@ -43,6 +44,7 @@ onUiLoaded(async() => { ...@@ -43,6 +44,7 @@ onUiLoaded(async() => {
// Get tab ID // Get tab ID
function getTabId(elements) { function getTabId(elements) {
const activeTab = getActiveTab(elements); const activeTab = getActiveTab(elements);
if (!activeTab) return null;
return tabNameToElementId[activeTab.innerText]; return tabNameToElementId[activeTab.innerText];
} }
...@@ -218,6 +220,8 @@ onUiLoaded(async() => { ...@@ -218,6 +220,8 @@ onUiLoaded(async() => {
canvas_hotkey_fullscreen: "KeyS", canvas_hotkey_fullscreen: "KeyS",
canvas_hotkey_move: "KeyF", canvas_hotkey_move: "KeyF",
canvas_hotkey_overlap: "KeyO", canvas_hotkey_overlap: "KeyO",
canvas_hotkey_shrink_brush: "KeyQ",
canvas_hotkey_grow_brush: "KeyW",
canvas_disabled_functions: [], canvas_disabled_functions: [],
canvas_show_tooltip: true, canvas_show_tooltip: true,
canvas_auto_expand: true, canvas_auto_expand: true,
...@@ -227,6 +231,8 @@ onUiLoaded(async() => { ...@@ -227,6 +231,8 @@ onUiLoaded(async() => {
const functionMap = { const functionMap = {
"Zoom": "canvas_hotkey_zoom", "Zoom": "canvas_hotkey_zoom",
"Adjust brush size": "canvas_hotkey_adjust", "Adjust brush size": "canvas_hotkey_adjust",
"Hotkey shrink brush": "canvas_hotkey_shrink_brush",
"Hotkey enlarge brush": "canvas_hotkey_grow_brush",
"Moving canvas": "canvas_hotkey_move", "Moving canvas": "canvas_hotkey_move",
"Fullscreen": "canvas_hotkey_fullscreen", "Fullscreen": "canvas_hotkey_fullscreen",
"Reset Zoom": "canvas_hotkey_reset", "Reset Zoom": "canvas_hotkey_reset",
...@@ -248,6 +254,7 @@ onUiLoaded(async() => { ...@@ -248,6 +254,7 @@ onUiLoaded(async() => {
let isMoving = false; let isMoving = false;
let mouseX, mouseY; let mouseX, mouseY;
let activeElement; let activeElement;
let interactedWithAltKey = false;
const elements = Object.fromEntries( const elements = Object.fromEntries(
Object.keys(elementIDs).map(id => [ Object.keys(elementIDs).map(id => [
...@@ -273,7 +280,7 @@ onUiLoaded(async() => { ...@@ -273,7 +280,7 @@ onUiLoaded(async() => {
const targetElement = gradioApp().querySelector(elemId); const targetElement = gradioApp().querySelector(elemId);
if (!targetElement) { if (!targetElement) {
console.log("Element not found"); console.log("Element not found", elemId);
return; return;
} }
...@@ -288,7 +295,7 @@ onUiLoaded(async() => { ...@@ -288,7 +295,7 @@ onUiLoaded(async() => {
// Create tooltip // Create tooltip
function createTooltip() { function createTooltip() {
const toolTipElemnt = const toolTipElement =
targetElement.querySelector(".image-container"); targetElement.querySelector(".image-container");
const tooltip = document.createElement("div"); const tooltip = document.createElement("div");
tooltip.className = "canvas-tooltip"; tooltip.className = "canvas-tooltip";
...@@ -351,7 +358,7 @@ onUiLoaded(async() => { ...@@ -351,7 +358,7 @@ onUiLoaded(async() => {
tooltip.appendChild(tooltipContent); tooltip.appendChild(tooltipContent);
// Add a hint element to the target element // Add a hint element to the target element
toolTipElemnt.appendChild(tooltip); toolTipElement.appendChild(tooltip);
} }
//Show tool tip if setting enable //Show tool tip if setting enable
...@@ -361,9 +368,9 @@ onUiLoaded(async() => { ...@@ -361,9 +368,9 @@ onUiLoaded(async() => {
// In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui. // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
function fixCanvas() { function fixCanvas() {
const activeTab = getActiveTab(elements).textContent.trim(); const activeTab = getActiveTab(elements)?.textContent.trim();
if (activeTab !== "img2img") { if (activeTab && activeTab !== "img2img") {
const img = targetElement.querySelector(`${elemId} img`); const img = targetElement.querySelector(`${elemId} img`);
if (img && img.style.display !== "none") { if (img && img.style.display !== "none") {
...@@ -504,6 +511,10 @@ onUiLoaded(async() => { ...@@ -504,6 +511,10 @@ onUiLoaded(async() => {
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) { if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) {
e.preventDefault(); e.preventDefault();
if (hotkeysConfig.canvas_hotkey_zoom === "Alt") {
interactedWithAltKey = true;
}
let zoomPosX, zoomPosY; let zoomPosX, zoomPosY;
let delta = 0.2; let delta = 0.2;
if (elemData[elemId].zoomLevel > 7) { if (elemData[elemId].zoomLevel > 7) {
...@@ -686,7 +697,9 @@ onUiLoaded(async() => { ...@@ -686,7 +697,9 @@ onUiLoaded(async() => {
const hotkeyActions = { const hotkeyActions = {
[hotkeysConfig.canvas_hotkey_reset]: resetZoom, [hotkeysConfig.canvas_hotkey_reset]: resetZoom,
[hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap, [hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap,
[hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen [hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen,
[hotkeysConfig.canvas_hotkey_shrink_brush]: () => adjustBrushSize(elemId, 10),
[hotkeysConfig.canvas_hotkey_grow_brush]: () => adjustBrushSize(elemId, -10)
}; };
const action = hotkeyActions[event.code]; const action = hotkeyActions[event.code];
...@@ -777,23 +790,29 @@ onUiLoaded(async() => { ...@@ -777,23 +790,29 @@ onUiLoaded(async() => {
targetElement.addEventListener("mouseleave", handleMouseLeave); targetElement.addEventListener("mouseleave", handleMouseLeave);
// Reset zoom when click on another tab // Reset zoom when click on another tab
elements.img2imgTabs.addEventListener("click", resetZoom); if (elements.img2imgTabs) {
elements.img2imgTabs.addEventListener("click", () => { elements.img2imgTabs.addEventListener("click", resetZoom);
// targetElement.style.width = ""; elements.img2imgTabs.addEventListener("click", () => {
if (parseInt(targetElement.style.width) > 865) { // targetElement.style.width = "";
setTimeout(fitToElement, 0); if (parseInt(targetElement.style.width) > 865) {
} setTimeout(fitToElement, 0);
}); }
});
}
targetElement.addEventListener("wheel", e => { targetElement.addEventListener("wheel", e => {
// change zoom level // change zoom level
const operation = e.deltaY > 0 ? "-" : "+"; const operation = (e.deltaY || -e.wheelDelta) > 0 ? "-" : "+";
changeZoomLevel(operation, e); changeZoomLevel(operation, e);
// Handle brush size adjustment with ctrl key pressed // Handle brush size adjustment with ctrl key pressed
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) { if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) {
e.preventDefault(); e.preventDefault();
if (hotkeysConfig.canvas_hotkey_adjust === "Alt") {
interactedWithAltKey = true;
}
// Increase or decrease brush size based on scroll direction // Increase or decrease brush size based on scroll direction
adjustBrushSize(elemId, e.deltaY); adjustBrushSize(elemId, e.deltaY);
} }
...@@ -833,6 +852,20 @@ onUiLoaded(async() => { ...@@ -833,6 +852,20 @@ onUiLoaded(async() => {
document.addEventListener("keydown", handleMoveKeyDown); document.addEventListener("keydown", handleMoveKeyDown);
document.addEventListener("keyup", handleMoveKeyUp); document.addEventListener("keyup", handleMoveKeyUp);
// Prevent firefox from opening main menu when alt is used as a hotkey for zoom or brush size
function handleAltKeyUp(e) {
if (e.key !== "Alt" || !interactedWithAltKey) {
return;
}
e.preventDefault();
interactedWithAltKey = false;
}
document.addEventListener("keyup", handleAltKeyUp);
// Detect zoom level and update the pan speed. // Detect zoom level and update the pan speed.
function updatePanPosition(movementX, movementY) { function updatePanPosition(movementX, movementY) {
let panSpeed = 2; let panSpeed = 2;
......
...@@ -4,12 +4,14 @@ from modules import shared ...@@ -4,12 +4,14 @@ from modules import shared
shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), { shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), {
"canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), "canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
"canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), "canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
"canvas_hotkey_shrink_brush": shared.OptionInfo("Q", "Shrink the brush size"),
"canvas_hotkey_grow_brush": shared.OptionInfo("W", "Enlarge the brush size"),
"canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"), "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
"canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "), "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas position"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"), "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, needed for testing"),
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
"canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"), "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"), "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
"canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}), "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size","Hotkey enlarge brush","Hotkey shrink brush","Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
})) }))
import math import math
import gradio as gr import gradio as gr
from modules import scripts, shared, ui_components, ui_settings, generation_parameters_copypaste from modules import scripts, shared, ui_components, ui_settings, infotext_utils, errors
from modules.ui_components import FormColumn from modules.ui_components import FormColumn
...@@ -25,7 +25,7 @@ class ExtraOptionsSection(scripts.Script): ...@@ -25,7 +25,7 @@ class ExtraOptionsSection(scripts.Script):
extra_options = shared.opts.extra_options_img2img if is_img2img else shared.opts.extra_options_txt2img extra_options = shared.opts.extra_options_img2img if is_img2img else shared.opts.extra_options_txt2img
elem_id_tabname = "extra_options_" + ("img2img" if is_img2img else "txt2img") elem_id_tabname = "extra_options_" + ("img2img" if is_img2img else "txt2img")
mapping = {k: v for v, k in generation_parameters_copypaste.infotext_to_setting_name_mapping} mapping = {k: v for v, k in infotext_utils.infotext_to_setting_name_mapping}
with gr.Blocks() as interface: with gr.Blocks() as interface:
with gr.Accordion("Options", open=False, elem_id=elem_id_tabname) if shared.opts.extra_options_accordion and extra_options else gr.Group(elem_id=elem_id_tabname): with gr.Accordion("Options", open=False, elem_id=elem_id_tabname) if shared.opts.extra_options_accordion and extra_options else gr.Group(elem_id=elem_id_tabname):
...@@ -42,7 +42,11 @@ class ExtraOptionsSection(scripts.Script): ...@@ -42,7 +42,11 @@ class ExtraOptionsSection(scripts.Script):
setting_name = extra_options[index] setting_name = extra_options[index]
with FormColumn(): with FormColumn():
comp = ui_settings.create_setting_component(setting_name) try:
comp = ui_settings.create_setting_component(setting_name)
except KeyError:
errors.report(f"Can't add extra options for {setting_name} in ui")
continue
self.comps.append(comp) self.comps.append(comp)
self.setting_names.append(setting_name) self.setting_names.append(setting_name)
......
import hypertile import hypertile
from modules import scripts, script_callbacks, shared from modules import scripts, script_callbacks, shared
from scripts.hypertile_xyz import add_axis_options
class ScriptHypertile(scripts.Script): class ScriptHypertile(scripts.Script):
...@@ -93,7 +92,6 @@ def on_ui_settings(): ...@@ -93,7 +92,6 @@ def on_ui_settings():
"hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"), "hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"),
"hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"), "hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"),
"hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"), "hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"),
"hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"), "hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"),
"hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"), "hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"),
"hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"), "hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"),
...@@ -105,5 +103,20 @@ def on_ui_settings(): ...@@ -105,5 +103,20 @@ def on_ui_settings():
shared.opts.add_option(name, opt) shared.opts.add_option(name, opt)
def add_axis_options():
xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module
xyz_grid.axis_options.extend([
xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet_secondpass', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_unet"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] Unet Max Depth'), choices=lambda: [str(x) for x in range(4)]),
xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_unet"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] Unet Max Tile Size')),
xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_unet"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] Unet Swap Size')),
xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, xyz_grid.apply_override('hypertile_enable_vae', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_vae"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] VAE Max Depth'), choices=lambda: [str(x) for x in range(4)]),
xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_vae"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] VAE Max Tile Size')),
xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_vae"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] VAE Swap Size')),
])
script_callbacks.on_ui_settings(on_ui_settings) script_callbacks.on_ui_settings(on_ui_settings)
script_callbacks.on_before_ui(add_axis_options) script_callbacks.on_before_ui(add_axis_options)
from modules import scripts
from modules.shared import opts
xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module
def int_applier(value_name:str, min_range:int = -1, max_range:int = -1):
"""
Returns a function that applies the given value to the given value_name in opts.data.
"""
def validate(value_name:str, value:str):
value = int(value)
# validate value
if not min_range == -1:
assert value >= min_range, f"Value {value} for {value_name} must be greater than or equal to {min_range}"
if not max_range == -1:
assert value <= max_range, f"Value {value} for {value_name} must be less than or equal to {max_range}"
def apply_int(p, x, xs):
validate(value_name, x)
opts.data[value_name] = int(x)
return apply_int
def bool_applier(value_name:str):
"""
Returns a function that applies the given value to the given value_name in opts.data.
"""
def validate(value_name:str, value:str):
assert value.lower() in ["true", "false"], f"Value {value} for {value_name} must be either true or false"
def apply_bool(p, x, xs):
validate(value_name, x)
value_boolean = x.lower() == "true"
opts.data[value_name] = value_boolean
return apply_bool
def add_axis_options():
extra_axis_options = [
xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, bool_applier("hypertile_enable_unet"), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, bool_applier("hypertile_enable_unet_secondpass"), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, int_applier("hypertile_max_depth_unet", 0, 3), choices=lambda: [str(x) for x in range(4)]),
xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, int_applier("hypertile_max_tile_unet", 0, 512)),
xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, int_applier("hypertile_swap_size_unet", 0, 64)),
xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, bool_applier("hypertile_enable_vae"), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, int_applier("hypertile_max_depth_vae", 0, 3), choices=lambda: [str(x) for x in range(4)]),
xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, int_applier("hypertile_max_tile_vae", 0, 512)),
xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, int_applier("hypertile_swap_size_vae", 0, 64)),
]
set_a = {opt.label for opt in xyz_grid.axis_options}
set_b = {opt.label for opt in extra_axis_options}
if set_a.intersection(set_b):
return
xyz_grid.axis_options.extend(extra_axis_options)
...@@ -28,7 +28,7 @@ def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, thr ...@@ -28,7 +28,7 @@ def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, thr
class ScriptPostprocessingAutosizedCrop(scripts_postprocessing.ScriptPostprocessing): class ScriptPostprocessingAutosizedCrop(scripts_postprocessing.ScriptPostprocessing):
name = "Auto-sized crop" name = "Auto-sized crop"
order = 4000 order = 4020
def ui(self): def ui(self):
with ui_components.InputAccordion(False, label="Auto-sized crop") as enable: with ui_components.InputAccordion(False, label="Auto-sized crop") as enable:
......
...@@ -4,7 +4,7 @@ import gradio as gr ...@@ -4,7 +4,7 @@ import gradio as gr
class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostprocessing): class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostprocessing):
name = "Caption" name = "Caption"
order = 4000 order = 4040
def ui(self): def ui(self):
with ui_components.InputAccordion(False, label="Caption") as enable: with ui_components.InputAccordion(False, label="Caption") as enable:
...@@ -25,6 +25,6 @@ class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostprocessing): ...@@ -25,6 +25,6 @@ class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostprocessing):
captions.append(deepbooru.model.tag(pp.image)) captions.append(deepbooru.model.tag(pp.image))
if "BLIP" in option: if "BLIP" in option:
captions.append(shared.interrogator.generate_caption(pp.image)) captions.append(shared.interrogator.interrogate(pp.image.convert("RGB")))
pp.caption = ", ".join([x for x in captions if x]) pp.caption = ", ".join([x for x in captions if x])
...@@ -6,7 +6,7 @@ import gradio as gr ...@@ -6,7 +6,7 @@ import gradio as gr
class ScriptPostprocessingCreateFlippedCopies(scripts_postprocessing.ScriptPostprocessing): class ScriptPostprocessingCreateFlippedCopies(scripts_postprocessing.ScriptPostprocessing):
name = "Create flipped copies" name = "Create flipped copies"
order = 4000 order = 4030
def ui(self): def ui(self):
with ui_components.InputAccordion(False, label="Create flipped copies") as enable: with ui_components.InputAccordion(False, label="Create flipped copies") as enable:
......
...@@ -7,7 +7,7 @@ from modules.textual_inversion import autocrop ...@@ -7,7 +7,7 @@ from modules.textual_inversion import autocrop
class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing): class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing):
name = "Auto focal point crop" name = "Auto focal point crop"
order = 4000 order = 4010
def ui(self): def ui(self):
with ui_components.InputAccordion(False, label="Auto focal point crop") as enable: with ui_components.InputAccordion(False, label="Auto focal point crop") as enable:
......
...@@ -61,7 +61,7 @@ class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostproces ...@@ -61,7 +61,7 @@ class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostproces
ratio = (pp.image.height * width) / (pp.image.width * height) ratio = (pp.image.height * width) / (pp.image.width * height)
inverse_xy = True inverse_xy = True
if ratio >= 1.0 and ratio > split_threshold: if ratio >= 1.0 or ratio > split_threshold:
return return
result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio) result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio)
......
This diff is collapsed.
<div class='card' style={style} onclick={card_clicked} data-name="{name}" {sort_keys}> <div class="card" style="{style}" onclick="{card_clicked}" data-name="{name}" {sort_keys}>
{background_image} {background_image}
<div class="button-row"> <div class="button-row">{copy_path_button}{metadata_button}{edit_button}</div>
{metadata_button} <div class="actions">
{edit_button} <div class="additional">{search_terms}</div>
</div> <span class="name">{name}</span>
<div class='actions'> <span class="description">{description}</span>
<div class='additional'>
<span style="display:none" class='search_term{search_only}'>{search_term}</span>
</div>
<span class='name'>{name}</span>
<span class='description'>{description}</span>
</div> </div>
</div> </div>
<div class="copy-path-button card-button"
title="Copy path to clipboard"
onclick="extraNetworksCopyCardPath(event)"
data-clipboard-text="{filename}">
</div>
\ No newline at end of file
<div class="edit-button card-button"
title="Edit metadata"
onclick="extraNetworksEditUserMetadata(event, '{tabname}', '{extra_networks_tabname}')">
</div>
\ No newline at end of file
<div class="metadata-button card-button"
title="Show internal metadata"
onclick="extraNetworksRequestMetadata(event, '{extra_networks_tabname}')">
</div>
\ No newline at end of file
<div class="extra-network-pane-content-dirs">
<div id='{tabname}_{extra_networks_tabname}_dirs' class='extra-network-dirs'>
{dirs_html}
</div>
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards'>
{items_html}
</div>
</div>
<div class="extra-network-pane-content-tree resize-handle-row">
<div id='{tabname}_{extra_networks_tabname}_tree' class='extra-network-tree' style='flex-basis: {extra_networks_tree_view_default_width}px'>
{tree_html}
</div>
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards' style='flex-grow: 1;'>
{items_html}
</div>
</div>
\ No newline at end of file
<div id='{tabname}_{extra_networks_tabname}_pane' class='extra-network-pane {tree_view_div_default_display_class}'>
<div class="extra-network-control" id="{tabname}_{extra_networks_tabname}_controls" style="display:none" >
<div class="extra-network-control--search">
<input
id="{tabname}_{extra_networks_tabname}_extra_search"
class="extra-network-control--search-text"
type="search"
placeholder="Search"
>
</div>
<small>Sort: </small>
<div
id="{tabname}_{extra_networks_tabname}_extra_sort_path"
class="extra-network-control--sort{sort_path_active}"
data-sortkey="default"
title="Sort by path"
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
>
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
</div>
<div
id="{tabname}_{extra_networks_tabname}_extra_sort_name"
class="extra-network-control--sort{sort_name_active}"
data-sortkey="name"
title="Sort by name"
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
>
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
</div>
<div
id="{tabname}_{extra_networks_tabname}_extra_sort_date_created"
class="extra-network-control--sort{sort_date_created_active}"
data-sortkey="date_created"
title="Sort by date created"
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
>
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
</div>
<div
id="{tabname}_{extra_networks_tabname}_extra_sort_date_modified"
class="extra-network-control--sort{sort_date_modified_active}"
data-sortkey="date_modified"
title="Sort by date modified"
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
>
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
</div>
<small> </small>
<div
id="{tabname}_{extra_networks_tabname}_extra_sort_dir"
class="extra-network-control--sort-dir"
data-sortdir="{data_sortdir}"
title="Sort ascending"
onclick="extraNetworksControlSortDirOnClick(event, '{tabname}', '{extra_networks_tabname}');"
>
<i class="extra-network-control--icon extra-network-control--sort-dir-icon"></i>
</div>
<small> </small>
<div
id="{tabname}_{extra_networks_tabname}_extra_tree_view"
class="extra-network-control--tree-view {tree_view_btn_extra_class}"
title="Enable Tree View"
onclick="extraNetworksControlTreeViewOnClick(event, '{tabname}', '{extra_networks_tabname}');"
>
<i class="extra-network-control--icon extra-network-control--tree-view-icon"></i>
</div>
<div
id="{tabname}_{extra_networks_tabname}_extra_refresh"
class="extra-network-control--refresh"
title="Refresh page"
onclick="extraNetworksControlRefreshOnClick(event, '{tabname}', '{extra_networks_tabname}');"
>
<i class="extra-network-control--icon extra-network-control--refresh-icon"></i>
</div>
</div>
{pane_content}
</div>
<span data-filterable-item-text hidden>{search_terms}</span>
<div class="tree-list-content {subclass}"
type="button"
onclick="extraNetworksTreeOnClick(event, '{tabname}', '{extra_networks_tabname}');{onclick_extra}"
data-path="{data_path}"
data-hash="{data_hash}"
>
<span class='tree-list-item-action tree-list-item-action--leading'>
{action_list_item_action_leading}
</span>
<span class="tree-list-item-visual tree-list-item-visual--leading">
{action_list_item_visual_leading}
</span>
<span class="tree-list-item-label tree-list-item-label--truncate">
{action_list_item_label}
</span>
<span class="tree-list-item-visual tree-list-item-visual--trailing">
{action_list_item_visual_trailing}
</span>
<span class="tree-list-item-action tree-list-item-action--trailing">
{action_list_item_action_trailing}
</span>
</div>
\ No newline at end of file
This diff is collapsed.
...@@ -50,17 +50,17 @@ function dimensionChange(e, is_width, is_height) { ...@@ -50,17 +50,17 @@ function dimensionChange(e, is_width, is_height) {
var scaledx = targetElement.naturalWidth * viewportscale; var scaledx = targetElement.naturalWidth * viewportscale;
var scaledy = targetElement.naturalHeight * viewportscale; var scaledy = targetElement.naturalHeight * viewportscale;
var cleintRectTop = (viewportOffset.top + window.scrollY); var clientRectTop = (viewportOffset.top + window.scrollY);
var cleintRectLeft = (viewportOffset.left + window.scrollX); var clientRectLeft = (viewportOffset.left + window.scrollX);
var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight / 2); var clientRectCentreY = clientRectTop + (targetElement.clientHeight / 2);
var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth / 2); var clientRectCentreX = clientRectLeft + (targetElement.clientWidth / 2);
var arscale = Math.min(scaledx / currentWidth, scaledy / currentHeight); var arscale = Math.min(scaledx / currentWidth, scaledy / currentHeight);
var arscaledx = currentWidth * arscale; var arscaledx = currentWidth * arscale;
var arscaledy = currentHeight * arscale; var arscaledy = currentHeight * arscale;
var arRectTop = cleintRectCentreY - (arscaledy / 2); var arRectTop = clientRectCentreY - (arscaledy / 2);
var arRectLeft = cleintRectCentreX - (arscaledx / 2); var arRectLeft = clientRectCentreX - (arscaledx / 2);
var arRectWidth = arscaledx; var arRectWidth = arscaledx;
var arRectHeight = arscaledy; var arRectHeight = arscaledy;
......
...@@ -8,9 +8,6 @@ var contextMenuInit = function() { ...@@ -8,9 +8,6 @@ var contextMenuInit = function() {
}; };
function showContextMenu(event, element, menuEntries) { function showContextMenu(event, element, menuEntries) {
let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop;
let oldMenu = gradioApp().querySelector('#context-menu'); let oldMenu = gradioApp().querySelector('#context-menu');
if (oldMenu) { if (oldMenu) {
oldMenu.remove(); oldMenu.remove();
...@@ -23,10 +20,8 @@ var contextMenuInit = function() { ...@@ -23,10 +20,8 @@ var contextMenuInit = function() {
contextMenu.style.background = baseStyle.background; contextMenu.style.background = baseStyle.background;
contextMenu.style.color = baseStyle.color; contextMenu.style.color = baseStyle.color;
contextMenu.style.fontFamily = baseStyle.fontFamily; contextMenu.style.fontFamily = baseStyle.fontFamily;
contextMenu.style.top = posy + 'px'; contextMenu.style.top = event.pageY + 'px';
contextMenu.style.left = posx + 'px'; contextMenu.style.left = event.pageX + 'px';
const contextMenuList = document.createElement('ul'); const contextMenuList = document.createElement('ul');
contextMenuList.className = 'context-menu-items'; contextMenuList.className = 'context-menu-items';
...@@ -43,21 +38,6 @@ var contextMenuInit = function() { ...@@ -43,21 +38,6 @@ var contextMenuInit = function() {
}); });
gradioApp().appendChild(contextMenu); gradioApp().appendChild(contextMenu);
let menuWidth = contextMenu.offsetWidth + 4;
let menuHeight = contextMenu.offsetHeight + 4;
let windowWidth = window.innerWidth;
let windowHeight = window.innerHeight;
if ((windowWidth - posx) < menuWidth) {
contextMenu.style.left = windowWidth - menuWidth + "px";
}
if ((windowHeight - posy) < menuHeight) {
contextMenu.style.top = windowHeight - menuHeight + "px";
}
} }
function appendContextMenuOption(targetElementSelector, entryName, entryFunction) { function appendContextMenuOption(targetElementSelector, entryName, entryFunction) {
...@@ -107,16 +87,23 @@ var contextMenuInit = function() { ...@@ -107,16 +87,23 @@ var contextMenuInit = function() {
oldMenu.remove(); oldMenu.remove();
} }
}); });
gradioApp().addEventListener("contextmenu", function(e) { ['contextmenu', 'touchstart'].forEach((eventType) => {
let oldMenu = gradioApp().querySelector('#context-menu'); gradioApp().addEventListener(eventType, function(e) {
if (oldMenu) { let ev = e;
oldMenu.remove(); if (eventType.startsWith('touch')) {
} if (e.touches.length !== 2) return;
menuSpecs.forEach(function(v, k) { ev = e.touches[0];
if (e.composedPath()[0].matches(k)) { }
showContextMenu(e, e.composedPath()[0], v); let oldMenu = gradioApp().querySelector('#context-menu');
e.preventDefault(); if (oldMenu) {
oldMenu.remove();
} }
menuSpecs.forEach(function(v, k) {
if (e.composedPath()[0].matches(k)) {
showContextMenu(ev, e.composedPath()[0], v);
e.preventDefault();
}
});
}); });
}); });
eventListenerApplied = true; eventListenerApplied = true;
......
...@@ -56,6 +56,15 @@ function eventHasFiles(e) { ...@@ -56,6 +56,15 @@ function eventHasFiles(e) {
return false; return false;
} }
function isURL(url) {
try {
const _ = new URL(url);
return true;
} catch {
return false;
}
}
function dragDropTargetIsPrompt(target) { function dragDropTargetIsPrompt(target) {
if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true; if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true;
if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true; if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true;
...@@ -74,22 +83,39 @@ window.document.addEventListener('dragover', e => { ...@@ -74,22 +83,39 @@ window.document.addEventListener('dragover', e => {
e.dataTransfer.dropEffect = 'copy'; e.dataTransfer.dropEffect = 'copy';
}); });
window.document.addEventListener('drop', e => { window.document.addEventListener('drop', async e => {
const target = e.composedPath()[0]; const target = e.composedPath()[0];
if (!eventHasFiles(e)) return; const url = e.dataTransfer.getData('text/uri-list') || e.dataTransfer.getData('text/plain');
if (!eventHasFiles(e) && !isURL(url)) return;
if (dragDropTargetIsPrompt(target)) { if (dragDropTargetIsPrompt(target)) {
e.stopPropagation(); e.stopPropagation();
e.preventDefault(); e.preventDefault();
let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image"; const isImg2img = get_tab_index('tabs') == 1;
let prompt_image_target = isImg2img ? "img2img_prompt_image" : "txt2img_prompt_image";
const imgParent = gradioApp().getElementById(prompt_target); const imgParent = gradioApp().getElementById(prompt_image_target);
const files = e.dataTransfer.files; const files = e.dataTransfer.files;
const fileInput = imgParent.querySelector('input[type="file"]'); const fileInput = imgParent.querySelector('input[type="file"]');
if (fileInput) { if (eventHasFiles(e) && fileInput) {
fileInput.files = files; fileInput.files = files;
fileInput.dispatchEvent(new Event('change')); fileInput.dispatchEvent(new Event('change'));
} else if (url) {
try {
const request = await fetch(url);
if (!request.ok) {
console.error('Error fetching URL:', url, request.status);
return;
}
const data = new DataTransfer();
data.items.add(new File([await request.blob()], 'image.png'));
fileInput.files = data.files;
fileInput.dispatchEvent(new Event('change'));
} catch (error) {
console.error('Error fetching URL:', url, error);
return;
}
} }
} }
......
...@@ -64,6 +64,14 @@ function keyupEditAttention(event) { ...@@ -64,6 +64,14 @@ function keyupEditAttention(event) {
selectionEnd++; selectionEnd++;
} }
// deselect surrounding whitespace
while (text[selectionStart] == " " && selectionStart < selectionEnd) {
selectionStart++;
}
while (text[selectionEnd - 1] == " " && selectionEnd > selectionStart) {
selectionEnd--;
}
target.setSelectionRange(selectionStart, selectionEnd); target.setSelectionRange(selectionStart, selectionEnd);
return true; return true;
} }
......
...@@ -2,8 +2,11 @@ ...@@ -2,8 +2,11 @@
function extensions_apply(_disabled_list, _update_list, disable_all) { function extensions_apply(_disabled_list, _update_list, disable_all) {
var disable = []; var disable = [];
var update = []; var update = [];
const extensions_input = gradioApp().querySelectorAll('#extensions input[type="checkbox"]');
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) { if (extensions_input.length == 0) {
throw Error("Extensions page not yet loaded.");
}
extensions_input.forEach(function(x) {
if (x.name.startsWith("enable_") && !x.checked) { if (x.name.startsWith("enable_") && !x.checked) {
disable.push(x.name.substring(7)); disable.push(x.name.substring(7));
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -45,8 +45,15 @@ function formatTime(secs) { ...@@ -45,8 +45,15 @@ function formatTime(secs) {
} }
} }
var originalAppTitle = undefined;
onUiLoaded(function() {
originalAppTitle = document.title;
});
function setTitle(progress) { function setTitle(progress) {
var title = 'Stable Diffusion'; var title = originalAppTitle;
if (opts.show_progress_in_title && progress) { if (opts.show_progress_in_title && progress) {
title = '[' + progress.trim() + '] ' + title; title = '[' + progress.trim() + '] ' + title;
...@@ -69,6 +76,26 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre ...@@ -69,6 +76,26 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
var dateStart = new Date(); var dateStart = new Date();
var wasEverActive = false; var wasEverActive = false;
var parentProgressbar = progressbarContainer.parentNode; var parentProgressbar = progressbarContainer.parentNode;
var wakeLock = null;
var requestWakeLock = async function() {
if (!opts.prevent_screen_sleep_during_generation || wakeLock) return;
try {
wakeLock = await navigator.wakeLock.request('screen');
} catch (err) {
console.error('Wake Lock is not supported.');
}
};
var releaseWakeLock = async function() {
if (!opts.prevent_screen_sleep_during_generation || !wakeLock) return;
try {
await wakeLock.release();
wakeLock = null;
} catch (err) {
console.error('Wake Lock release failed', err);
}
};
var divProgress = document.createElement('div'); var divProgress = document.createElement('div');
divProgress.className = 'progressDiv'; divProgress.className = 'progressDiv';
...@@ -82,6 +109,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre ...@@ -82,6 +109,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
var livePreview = null; var livePreview = null;
var removeProgressBar = function() { var removeProgressBar = function() {
releaseWakeLock();
if (!divProgress) return; if (!divProgress) return;
setTitle(""); setTitle("");
...@@ -93,6 +121,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre ...@@ -93,6 +121,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
}; };
var funProgress = function(id_task) { var funProgress = function(id_task) {
requestWakeLock();
request("./internal/progress", {id_task: id_task, live_preview: false}, function(res) { request("./internal/progress", {id_task: id_task, live_preview: false}, function(res) {
if (res.completed) { if (res.completed) {
removeProgressBar(); removeProgressBar();
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment