Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
b705c9b7
Commit
b705c9b7
authored
Mar 26, 2023
by
AUTOMATIC
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'lora_sd2' into lora_inplace
parents
80b26d2a
7cb31a27
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
19 additions
and
2 deletions
+19
-2
extensions-builtin/Lora/lora.py
extensions-builtin/Lora/lora.py
+19
-2
No files found.
extensions-builtin/Lora/lora.py
View file @
b705c9b7
...
...
@@ -14,7 +14,7 @@ re_unet_up_blocks = re.compile(r"lora_unet_up_blocks_(\d+)_attentions_(\d+)_(.+)
re_text_block
=
re
.
compile
(
r"lora_te_text_model_encoder_layers_(\d+)_(.+)"
)
def
convert_diffusers_name_to_compvis
(
key
):
def
convert_diffusers_name_to_compvis
(
key
,
is_sd2
):
def
match
(
match_list
,
regex
):
r
=
re
.
match
(
regex
,
key
)
if
not
r
:
...
...
@@ -36,6 +36,14 @@ def convert_diffusers_name_to_compvis(key):
return
f
"diffusion_model_output_blocks_{m[0] * 3 + m[1]}_1_{m[2]}"
if
match
(
m
,
re_text_block
):
if
is_sd2
:
if
'mlp_fc1'
in
m
[
1
]:
return
f
"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"
elif
'mlp_fc2'
in
m
[
1
]:
return
f
"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"
else
:
return
f
"model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"
return
f
"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"
return
key
...
...
@@ -102,9 +110,10 @@ def load_lora(name, filename):
sd
=
sd_models
.
read_state_dict
(
filename
)
keys_failed_to_match
=
[]
is_sd2
=
'model_transformer_resblocks'
in
shared
.
sd_model
.
lora_layer_mapping
for
key_diffusers
,
weight
in
sd
.
items
():
fullkey
=
convert_diffusers_name_to_compvis
(
key_diffusers
)
fullkey
=
convert_diffusers_name_to_compvis
(
key_diffusers
,
is_sd2
)
key
,
lora_key
=
fullkey
.
split
(
"."
,
1
)
sd_module
=
shared
.
sd_model
.
lora_layer_mapping
.
get
(
key
,
None
)
...
...
@@ -123,9 +132,13 @@ def load_lora(name, filename):
if
type
(
sd_module
)
==
torch
.
nn
.
Linear
:
module
=
torch
.
nn
.
Linear
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
bias
=
False
)
elif
type
(
sd_module
)
==
torch
.
nn
.
modules
.
linear
.
NonDynamicallyQuantizableLinear
:
module
=
torch
.
nn
.
modules
.
linear
.
NonDynamicallyQuantizableLinear
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
bias
=
False
)
elif
type
(
sd_module
)
==
torch
.
nn
.
Conv2d
:
module
=
torch
.
nn
.
Conv2d
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
(
1
,
1
),
bias
=
False
)
else
:
print
(
f
'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}'
)
continue
assert
False
,
f
'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}'
with
torch
.
no_grad
():
...
...
@@ -242,6 +255,10 @@ def lora_Conv2d_load_state_dict(self: torch.nn.Conv2d, *args, **kwargs):
return
torch
.
nn
.
Conv2d_load_state_dict_before_lora
(
self
,
*
args
,
**
kwargs
)
def
lora_NonDynamicallyQuantizableLinear_forward
(
self
,
input
):
return
lora_forward
(
self
,
input
,
torch
.
nn
.
NonDynamicallyQuantizableLinear_forward_before_lora
(
self
,
input
))
def
list_available_loras
():
available_loras
.
clear
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment