Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
a8a256f9
Commit
a8a256f9
authored
Aug 08, 2023
by
AUTOMATIC1111
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
REMOVE
parent
8285a149
Changes
8
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
7 additions
and
336 deletions
+7
-336
modules/processing.py
modules/processing.py
+0
-3
modules/sd_hijack.py
modules/sd_hijack.py
+1
-3
modules/sd_hijack_inpainting.py
modules/sd_hijack_inpainting.py
+0
-95
modules/sd_samplers.py
modules/sd_samplers.py
+2
-5
modules/sd_samplers_cfg_denoiser.py
modules/sd_samplers_cfg_denoiser.py
+0
-1
modules/sd_samplers_compvis.py
modules/sd_samplers_compvis.py
+0
-224
modules/sd_samplers_kdiffusion.py
modules/sd_samplers_kdiffusion.py
+1
-2
modules/sd_samplers_timesteps.py
modules/sd_samplers_timesteps.py
+3
-3
No files found.
modules/processing.py
View file @
a8a256f9
...
@@ -1112,9 +1112,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
...
@@ -1112,9 +1112,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
img2img_sampler_name
=
self
.
hr_sampler_name
or
self
.
sampler_name
img2img_sampler_name
=
self
.
hr_sampler_name
or
self
.
sampler_name
if
self
.
sampler_name
in
[
'PLMS'
,
'UniPC'
]:
# PLMS/UniPC do not support img2img so we just silently switch to DDIM
img2img_sampler_name
=
'DDIM'
self
.
sampler
=
sd_samplers
.
create_sampler
(
img2img_sampler_name
,
self
.
sd_model
)
self
.
sampler
=
sd_samplers
.
create_sampler
(
img2img_sampler_name
,
self
.
sd_model
)
if
self
.
latent_scale_mode
is
not
None
:
if
self
.
latent_scale_mode
is
not
None
:
...
...
modules/sd_hijack.py
View file @
a8a256f9
...
@@ -5,7 +5,7 @@ from types import MethodType
...
@@ -5,7 +5,7 @@ from types import MethodType
from
modules
import
devices
,
sd_hijack_optimizations
,
shared
,
script_callbacks
,
errors
,
sd_unet
from
modules
import
devices
,
sd_hijack_optimizations
,
shared
,
script_callbacks
,
errors
,
sd_unet
from
modules.hypernetworks
import
hypernetwork
from
modules.hypernetworks
import
hypernetwork
from
modules.shared
import
cmd_opts
from
modules.shared
import
cmd_opts
from
modules
import
sd_hijack_clip
,
sd_hijack_open_clip
,
sd_hijack_unet
,
sd_hijack_xlmr
,
xlmr
,
sd_hijack_inpainting
from
modules
import
sd_hijack_clip
,
sd_hijack_open_clip
,
sd_hijack_unet
,
sd_hijack_xlmr
,
xlmr
import
ldm.modules.attention
import
ldm.modules.attention
import
ldm.modules.diffusionmodules.model
import
ldm.modules.diffusionmodules.model
...
@@ -34,8 +34,6 @@ ldm.modules.diffusionmodules.model.print = shared.ldm_print
...
@@ -34,8 +34,6 @@ ldm.modules.diffusionmodules.model.print = shared.ldm_print
ldm
.
util
.
print
=
shared
.
ldm_print
ldm
.
util
.
print
=
shared
.
ldm_print
ldm
.
models
.
diffusion
.
ddpm
.
print
=
shared
.
ldm_print
ldm
.
models
.
diffusion
.
ddpm
.
print
=
shared
.
ldm_print
sd_hijack_inpainting
.
do_inpainting_hijack
()
optimizers
=
[]
optimizers
=
[]
current_optimizer
:
sd_hijack_optimizations
.
SdOptimization
=
None
current_optimizer
:
sd_hijack_optimizations
.
SdOptimization
=
None
...
...
modules/sd_hijack_inpainting.py
deleted
100644 → 0
View file @
8285a149
import
torch
import
ldm.models.diffusion.ddpm
import
ldm.models.diffusion.ddim
import
ldm.models.diffusion.plms
from
ldm.models.diffusion.ddim
import
noise_like
from
ldm.models.diffusion.sampling_util
import
norm_thresholding
@
torch
.
no_grad
()
def
p_sample_plms
(
self
,
x
,
c
,
t
,
index
,
repeat_noise
=
False
,
use_original_steps
=
False
,
quantize_denoised
=
False
,
temperature
=
1.
,
noise_dropout
=
0.
,
score_corrector
=
None
,
corrector_kwargs
=
None
,
unconditional_guidance_scale
=
1.
,
unconditional_conditioning
=
None
,
old_eps
=
None
,
t_next
=
None
,
dynamic_threshold
=
None
):
b
,
*
_
,
device
=
*
x
.
shape
,
x
.
device
def
get_model_output
(
x
,
t
):
if
unconditional_conditioning
is
None
or
unconditional_guidance_scale
==
1.
:
e_t
=
self
.
model
.
apply_model
(
x
,
t
,
c
)
else
:
x_in
=
torch
.
cat
([
x
]
*
2
)
t_in
=
torch
.
cat
([
t
]
*
2
)
if
isinstance
(
c
,
dict
):
assert
isinstance
(
unconditional_conditioning
,
dict
)
c_in
=
{}
for
k
in
c
:
if
isinstance
(
c
[
k
],
list
):
c_in
[
k
]
=
[
torch
.
cat
([
unconditional_conditioning
[
k
][
i
],
c
[
k
][
i
]])
for
i
in
range
(
len
(
c
[
k
]))
]
else
:
c_in
[
k
]
=
torch
.
cat
([
unconditional_conditioning
[
k
],
c
[
k
]])
else
:
c_in
=
torch
.
cat
([
unconditional_conditioning
,
c
])
e_t_uncond
,
e_t
=
self
.
model
.
apply_model
(
x_in
,
t_in
,
c_in
)
.
chunk
(
2
)
e_t
=
e_t_uncond
+
unconditional_guidance_scale
*
(
e_t
-
e_t_uncond
)
if
score_corrector
is
not
None
:
assert
self
.
model
.
parameterization
==
"eps"
e_t
=
score_corrector
.
modify_score
(
self
.
model
,
e_t
,
x
,
t
,
c
,
**
corrector_kwargs
)
return
e_t
alphas
=
self
.
model
.
alphas_cumprod
if
use_original_steps
else
self
.
ddim_alphas
alphas_prev
=
self
.
model
.
alphas_cumprod_prev
if
use_original_steps
else
self
.
ddim_alphas_prev
sqrt_one_minus_alphas
=
self
.
model
.
sqrt_one_minus_alphas_cumprod
if
use_original_steps
else
self
.
ddim_sqrt_one_minus_alphas
sigmas
=
self
.
model
.
ddim_sigmas_for_original_num_steps
if
use_original_steps
else
self
.
ddim_sigmas
def
get_x_prev_and_pred_x0
(
e_t
,
index
):
# select parameters corresponding to the currently considered timestep
a_t
=
torch
.
full
((
b
,
1
,
1
,
1
),
alphas
[
index
],
device
=
device
)
a_prev
=
torch
.
full
((
b
,
1
,
1
,
1
),
alphas_prev
[
index
],
device
=
device
)
sigma_t
=
torch
.
full
((
b
,
1
,
1
,
1
),
sigmas
[
index
],
device
=
device
)
sqrt_one_minus_at
=
torch
.
full
((
b
,
1
,
1
,
1
),
sqrt_one_minus_alphas
[
index
],
device
=
device
)
# current prediction for x_0
pred_x0
=
(
x
-
sqrt_one_minus_at
*
e_t
)
/
a_t
.
sqrt
()
if
quantize_denoised
:
pred_x0
,
_
,
*
_
=
self
.
model
.
first_stage_model
.
quantize
(
pred_x0
)
if
dynamic_threshold
is
not
None
:
pred_x0
=
norm_thresholding
(
pred_x0
,
dynamic_threshold
)
# direction pointing to x_t
dir_xt
=
(
1.
-
a_prev
-
sigma_t
**
2
)
.
sqrt
()
*
e_t
noise
=
sigma_t
*
noise_like
(
x
.
shape
,
device
,
repeat_noise
)
*
temperature
if
noise_dropout
>
0.
:
noise
=
torch
.
nn
.
functional
.
dropout
(
noise
,
p
=
noise_dropout
)
x_prev
=
a_prev
.
sqrt
()
*
pred_x0
+
dir_xt
+
noise
return
x_prev
,
pred_x0
e_t
=
get_model_output
(
x
,
t
)
if
len
(
old_eps
)
==
0
:
# Pseudo Improved Euler (2nd order)
x_prev
,
pred_x0
=
get_x_prev_and_pred_x0
(
e_t
,
index
)
e_t_next
=
get_model_output
(
x_prev
,
t_next
)
e_t_prime
=
(
e_t
+
e_t_next
)
/
2
elif
len
(
old_eps
)
==
1
:
# 2nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime
=
(
3
*
e_t
-
old_eps
[
-
1
])
/
2
elif
len
(
old_eps
)
==
2
:
# 3nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime
=
(
23
*
e_t
-
16
*
old_eps
[
-
1
]
+
5
*
old_eps
[
-
2
])
/
12
elif
len
(
old_eps
)
>=
3
:
# 4nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime
=
(
55
*
e_t
-
59
*
old_eps
[
-
1
]
+
37
*
old_eps
[
-
2
]
-
9
*
old_eps
[
-
3
])
/
24
x_prev
,
pred_x0
=
get_x_prev_and_pred_x0
(
e_t_prime
,
index
)
return
x_prev
,
pred_x0
,
e_t
def
do_inpainting_hijack
():
ldm
.
models
.
diffusion
.
plms
.
PLMSSampler
.
p_sample_plms
=
p_sample_plms
modules/sd_samplers.py
View file @
a8a256f9
from
modules
import
sd_samplers_
compvis
,
sd_samplers_
kdiffusion
,
sd_samplers_timesteps
,
shared
from
modules
import
sd_samplers_kdiffusion
,
sd_samplers_timesteps
,
shared
# imports for functions that previously were here and are used by other modules
# imports for functions that previously were here and are used by other modules
from
modules.sd_samplers_common
import
samples_to_image_grid
,
sample_to_image
# noqa: F401
from
modules.sd_samplers_common
import
samples_to_image_grid
,
sample_to_image
# noqa: F401
all_samplers
=
[
all_samplers
=
[
*
sd_samplers_kdiffusion
.
samplers_data_k_diffusion
,
*
sd_samplers_kdiffusion
.
samplers_data_k_diffusion
,
*
sd_samplers_compvis
.
samplers_data_compvis
,
*
sd_samplers_timesteps
.
samplers_data_timesteps
,
*
sd_samplers_timesteps
.
samplers_data_timesteps
,
]
]
all_samplers_map
=
{
x
.
name
:
x
for
x
in
all_samplers
}
all_samplers_map
=
{
x
.
name
:
x
for
x
in
all_samplers
}
...
@@ -42,10 +41,8 @@ def set_samplers():
...
@@ -42,10 +41,8 @@ def set_samplers():
global
samplers
,
samplers_for_img2img
global
samplers
,
samplers_for_img2img
hidden
=
set
(
shared
.
opts
.
hide_samplers
)
hidden
=
set
(
shared
.
opts
.
hide_samplers
)
hidden_img2img
=
set
(
shared
.
opts
.
hide_samplers
+
[
'PLMS'
,
'UniPC'
])
samplers
=
[
x
for
x
in
all_samplers
if
x
.
name
not
in
hidden
]
samplers
=
[
x
for
x
in
all_samplers
if
x
.
name
not
in
hidden
]
samplers_for_img2img
=
[
x
for
x
in
all_samplers
if
x
.
name
not
in
hidden
_img2img
]
samplers_for_img2img
=
[
x
for
x
in
all_samplers
if
x
.
name
not
in
hidden
]
samplers_map
.
clear
()
samplers_map
.
clear
()
for
sampler
in
all_samplers
:
for
sampler
in
all_samplers
:
...
...
modules/sd_samplers_cfg_denoiser.py
View file @
a8a256f9
from
collections
import
deque
import
torch
import
torch
from
modules
import
prompt_parser
,
devices
,
sd_samplers_common
from
modules
import
prompt_parser
,
devices
,
sd_samplers_common
...
...
modules/sd_samplers_compvis.py
deleted
100644 → 0
View file @
8285a149
This diff is collapsed.
Click to expand it.
modules/sd_samplers_kdiffusion.py
View file @
a8a256f9
from
collections
import
deque
import
torch
import
torch
import
inspect
import
inspect
import
k_diffusion.sampling
import
k_diffusion.sampling
from
modules
import
devices
,
sd_samplers_common
,
sd_samplers_extra
,
sd_samplers_cfg_denoiser
from
modules
import
sd_samplers_common
,
sd_samplers_extra
,
sd_samplers_cfg_denoiser
from
modules.shared
import
opts
from
modules.shared
import
opts
import
modules.shared
as
shared
import
modules.shared
as
shared
...
...
modules/sd_samplers_timesteps.py
View file @
a8a256f9
...
@@ -7,9 +7,9 @@ from modules.shared import opts
...
@@ -7,9 +7,9 @@ from modules.shared import opts
import
modules.shared
as
shared
import
modules.shared
as
shared
samplers_timesteps
=
[
samplers_timesteps
=
[
(
'
k_DDIM'
,
sd_samplers_timesteps_impl
.
ddim
,
[
'k_
ddim'
],
{}),
(
'
DDIM'
,
sd_samplers_timesteps_impl
.
ddim
,
[
'
ddim'
],
{}),
(
'
k_PLMS'
,
sd_samplers_timesteps_impl
.
plms
,
[
'k_
plms'
],
{}),
(
'
PLMS'
,
sd_samplers_timesteps_impl
.
plms
,
[
'
plms'
],
{}),
(
'
k_UniPC'
,
sd_samplers_timesteps_impl
.
unipc
,
[
'k_
unipc'
],
{}),
(
'
UniPC'
,
sd_samplers_timesteps_impl
.
unipc
,
[
'
unipc'
],
{}),
]
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment