Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
9eb2f786
Commit
9eb2f786
authored
May 15, 2024
by
huchenlei
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Precompute is_sdxl_inpaint flag
parent
1c0a0c4c
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
22 additions
and
22 deletions
+22
-22
modules/processing.py
modules/processing.py
+11
-17
modules/sd_models.py
modules/sd_models.py
+7
-0
modules/sd_models_xl.py
modules/sd_models_xl.py
+4
-5
No files found.
modules/processing.py
View file @
9eb2f786
...
...
@@ -115,20 +115,17 @@ def txt2img_image_conditioning(sd_model, x, width, height):
return
x
.
new_zeros
(
x
.
shape
[
0
],
2
*
sd_model
.
noise_augmentor
.
time_embed
.
dim
,
dtype
=
x
.
dtype
,
device
=
x
.
device
)
else
:
sd
=
sd_model
.
model
.
state_dict
()
diffusion_model_input
=
sd
.
get
(
'diffusion_model.input_blocks.0.0.weight'
,
None
)
if
diffusion_model_input
is
not
None
:
if
diffusion_model_input
.
shape
[
1
]
==
9
:
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
image_conditioning
=
torch
.
ones
(
x
.
shape
[
0
],
3
,
height
,
width
,
device
=
x
.
device
)
*
0.5
image_conditioning
=
images_tensor_to_samples
(
image_conditioning
,
approximation_indexes
.
get
(
opts
.
sd_vae_encode_method
))
if
sd_model
.
model
.
is_sdxl_inpaint
:
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
image_conditioning
=
torch
.
ones
(
x
.
shape
[
0
],
3
,
height
,
width
,
device
=
x
.
device
)
*
0.5
image_conditioning
=
images_tensor_to_samples
(
image_conditioning
,
approximation_indexes
.
get
(
opts
.
sd_vae_encode_method
))
# Add the fake full 1s mask to the first dimension.
image_conditioning
=
torch
.
nn
.
functional
.
pad
(
image_conditioning
,
(
0
,
0
,
0
,
0
,
1
,
0
),
value
=
1.0
)
image_conditioning
=
image_conditioning
.
to
(
x
.
dtype
)
# Add the fake full 1s mask to the first dimension.
image_conditioning
=
torch
.
nn
.
functional
.
pad
(
image_conditioning
,
(
0
,
0
,
0
,
0
,
1
,
0
),
value
=
1.0
)
image_conditioning
=
image_conditioning
.
to
(
x
.
dtype
)
return
image_conditioning
return
image_conditioning
# Dummy zero conditioning if we're not using inpainting or unclip models.
# Still takes up a bit of memory, but no encoder call.
...
...
@@ -390,11 +387,8 @@ class StableDiffusionProcessing:
if
self
.
sampler
.
conditioning_key
==
"crossattn-adm"
:
return
self
.
unclip_image_conditioning
(
source_image
)
sd
=
self
.
sampler
.
model_wrap
.
inner_model
.
model
.
state_dict
()
diffusion_model_input
=
sd
.
get
(
'diffusion_model.input_blocks.0.0.weight'
,
None
)
if
diffusion_model_input
is
not
None
:
if
diffusion_model_input
.
shape
[
1
]
==
9
:
return
self
.
inpainting_image_conditioning
(
source_image
,
latent_image
,
image_mask
=
image_mask
)
if
self
.
sampler
.
model_wrap
.
inner_model
.
model
.
is_sdxl_inpaint
:
return
self
.
inpainting_image_conditioning
(
source_image
,
latent_image
,
image_mask
=
image_mask
)
# Dummy zero conditioning if we're not using inpainting or depth model.
return
latent_image
.
new_zeros
(
latent_image
.
shape
[
0
],
5
,
1
,
1
)
...
...
modules/sd_models.py
View file @
9eb2f786
...
...
@@ -380,6 +380,13 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
model
.
is_sd2
=
not
model
.
is_sdxl
and
hasattr
(
model
.
cond_stage_model
,
'model'
)
model
.
is_sd1
=
not
model
.
is_sdxl
and
not
model
.
is_sd2
model
.
is_ssd
=
model
.
is_sdxl
and
'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight'
not
in
state_dict
.
keys
()
# Set is_sdxl_inpaint flag.
diffusion_model_input
=
state_dict
.
get
(
'diffusion_model.input_blocks.0.0.weight'
,
None
)
model
.
is_sdxl_inpaint
=
(
model
.
is_sdxl
and
diffusion_model_input
is
not
None
and
diffusion_model_input
.
shape
[
1
]
==
9
)
if
model
.
is_sdxl
:
sd_models_xl
.
extend_sdxl
(
model
)
...
...
modules/sd_models_xl.py
View file @
9eb2f786
...
...
@@ -35,11 +35,10 @@ def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch:
def
apply_model
(
self
:
sgm
.
models
.
diffusion
.
DiffusionEngine
,
x
,
t
,
cond
):
sd
=
self
.
model
.
state_dict
()
diffusion_model_input
=
sd
.
get
(
'diffusion_model.input_blocks.0.0.weight'
,
None
)
if
diffusion_model_input
is
not
None
:
if
diffusion_model_input
.
shape
[
1
]
==
9
:
x
=
torch
.
cat
([
x
]
+
cond
[
'c_concat'
],
dim
=
1
)
"""WARNING: This function is called once per denoising iteration. DO NOT add
expensive functionc calls such as `model.state_dict`. """
if
self
.
model
.
is_sdxl_inpaint
:
x
=
torch
.
cat
([
x
]
+
cond
[
'c_concat'
],
dim
=
1
)
return
self
.
model
(
x
,
t
,
cond
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment