Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
f6c8201e
Commit
f6c8201e
authored
Nov 03, 2023
by
v0xie
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
refactor: move factorization to lyco_helpers, separate calc_updown for kohya and kb
parent
fe1967a4
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
77 additions
and
101 deletions
+77
-101
extensions-builtin/Lora/lyco_helpers.py
extensions-builtin/Lora/lyco_helpers.py
+47
-0
extensions-builtin/Lora/network_oft.py
extensions-builtin/Lora/network_oft.py
+30
-101
No files found.
extensions-builtin/Lora/lyco_helpers.py
View file @
f6c8201e
...
@@ -19,3 +19,50 @@ def rebuild_cp_decomposition(up, down, mid):
...
@@ -19,3 +19,50 @@ def rebuild_cp_decomposition(up, down, mid):
up
=
up
.
reshape
(
up
.
size
(
0
),
-
1
)
up
=
up
.
reshape
(
up
.
size
(
0
),
-
1
)
down
=
down
.
reshape
(
down
.
size
(
0
),
-
1
)
down
=
down
.
reshape
(
down
.
size
(
0
),
-
1
)
return
torch
.
einsum
(
'n m k l, i n, m j -> i j k l'
,
mid
,
up
,
down
)
return
torch
.
einsum
(
'n m k l, i n, m j -> i j k l'
,
mid
,
up
,
down
)
# copied from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/lokr.py
def
factorization
(
dimension
:
int
,
factor
:
int
=-
1
)
->
tuple
[
int
,
int
]:
'''
return a tuple of two value of input dimension decomposed by the number closest to factor
second value is higher or equal than first value.
In LoRA with Kroneckor Product, first value is a value for weight scale.
secon value is a value for weight.
Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different.
examples)
factor
-1 2 4 8 16 ...
127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127
128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16
250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25
360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30
512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32
1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64
'''
if
factor
>
0
and
(
dimension
%
factor
)
==
0
:
m
=
factor
n
=
dimension
//
factor
if
m
>
n
:
n
,
m
=
m
,
n
return
m
,
n
if
factor
<
0
:
factor
=
dimension
m
,
n
=
1
,
dimension
length
=
m
+
n
while
m
<
n
:
new_m
=
m
+
1
while
dimension
%
new_m
!=
0
:
new_m
+=
1
new_n
=
dimension
//
new_m
if
new_m
+
new_n
>
length
or
new_m
>
factor
:
break
else
:
m
,
n
=
new_m
,
new_n
if
m
>
n
:
n
,
m
=
m
,
n
return
m
,
n
extensions-builtin/Lora/network_oft.py
View file @
f6c8201e
import
torch
import
torch
import
network
import
network
from
lyco_helpers
import
factorization
from
einops
import
rearrange
from
einops
import
rearrange
from
modules
import
devices
class
ModuleTypeOFT
(
network
.
ModuleType
):
class
ModuleTypeOFT
(
network
.
ModuleType
):
...
@@ -11,7 +11,8 @@ class ModuleTypeOFT(network.ModuleType):
...
@@ -11,7 +11,8 @@ class ModuleTypeOFT(network.ModuleType):
return
None
return
None
# adapted from kohya's implementation https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py
# adapted from kohya-ss' implementation https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py
# and KohakuBlueleaf's implementation https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/diag_oft.py
class
NetworkModuleOFT
(
network
.
NetworkModule
):
class
NetworkModuleOFT
(
network
.
NetworkModule
):
def
__init__
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
def
__init__
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
...
@@ -19,6 +20,7 @@ class NetworkModuleOFT(network.NetworkModule):
...
@@ -19,6 +20,7 @@ class NetworkModuleOFT(network.NetworkModule):
self
.
lin_module
=
None
self
.
lin_module
=
None
self
.
org_module
:
list
[
torch
.
Module
]
=
[
self
.
sd_module
]
self
.
org_module
:
list
[
torch
.
Module
]
=
[
self
.
sd_module
]
# kohya-ss
# kohya-ss
if
"oft_blocks"
in
weights
.
w
.
keys
():
if
"oft_blocks"
in
weights
.
w
.
keys
():
self
.
is_kohya
=
True
self
.
is_kohya
=
True
...
@@ -37,61 +39,31 @@ class NetworkModuleOFT(network.NetworkModule):
...
@@ -37,61 +39,31 @@ class NetworkModuleOFT(network.NetworkModule):
is_linear
=
type
(
self
.
sd_module
)
in
[
torch
.
nn
.
Linear
,
torch
.
nn
.
modules
.
linear
.
NonDynamicallyQuantizableLinear
]
is_linear
=
type
(
self
.
sd_module
)
in
[
torch
.
nn
.
Linear
,
torch
.
nn
.
modules
.
linear
.
NonDynamicallyQuantizableLinear
]
is_conv
=
type
(
self
.
sd_module
)
in
[
torch
.
nn
.
Conv2d
]
is_conv
=
type
(
self
.
sd_module
)
in
[
torch
.
nn
.
Conv2d
]
is_other_linear
=
type
(
self
.
sd_module
)
in
[
torch
.
nn
.
MultiheadAttention
]
is_other_linear
=
type
(
self
.
sd_module
)
in
[
torch
.
nn
.
MultiheadAttention
]
#if "Linear" in self.sd_module.__class__.__name__ or is_linear:
if
is_linear
:
if
is_linear
:
self
.
out_dim
=
self
.
sd_module
.
out_features
self
.
out_dim
=
self
.
sd_module
.
out_features
#elif hasattr(self.sd_module, "embed_dim"):
# self.out_dim = self.sd_module.embed_dim
#else:
# raise ValueError("Linear sd_module must have out_features or embed_dim")
elif
is_other_linear
:
elif
is_other_linear
:
self
.
out_dim
=
self
.
sd_module
.
embed_dim
self
.
out_dim
=
self
.
sd_module
.
embed_dim
#self.org_weight = self.org_module[0].weight
# if hasattr(self.sd_module, "in_proj_weight"):
# self.in_proj_dim = self.sd_module.in_proj_weight.shape[1]
# if hasattr(self.sd_module, "out_proj_weight"):
# self.out_proj_dim = self.sd_module.out_proj_weight.shape[0]
# self.in_proj_dim = self.sd_module.in_proj_weight.shape[1]
elif
is_conv
:
elif
is_conv
:
self
.
out_dim
=
self
.
sd_module
.
out_channels
self
.
out_dim
=
self
.
sd_module
.
out_channels
else
:
else
:
raise
ValueError
(
"sd_module must be Linear or Conv"
)
raise
ValueError
(
"sd_module must be Linear or Conv"
)
if
self
.
is_kohya
:
if
self
.
is_kohya
:
self
.
num_blocks
=
self
.
dim
self
.
num_blocks
=
self
.
dim
self
.
block_size
=
self
.
out_dim
//
self
.
num_blocks
self
.
block_size
=
self
.
out_dim
//
self
.
num_blocks
self
.
constraint
=
self
.
alpha
*
self
.
out_dim
self
.
constraint
=
self
.
alpha
*
self
.
out_dim
#elif is_linear or is_conv:
else
:
else
:
self
.
block_size
,
self
.
num_blocks
=
factorization
(
self
.
out_dim
,
self
.
dim
)
self
.
block_size
,
self
.
num_blocks
=
factorization
(
self
.
out_dim
,
self
.
dim
)
self
.
constraint
=
None
self
.
constraint
=
None
# if is_other_linear:
# weight = self.oft_blocks.reshape(self.oft_blocks.shape[0], -1)
# module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
# with torch.no_grad():
# if weight.shape != module.weight.shape:
# weight = weight.reshape(module.weight.shape)
# module.weight.copy_(weight)
# module.to(device=devices.cpu, dtype=devices.dtype)
# module.weight.requires_grad_(False)
# self.lin_module = module
#return module
def
merge_weight
(
self
,
R_weight
,
org_weight
):
def
merge_weight
(
self
,
R_weight
,
org_weight
):
R_weight
=
R_weight
.
to
(
org_weight
.
device
,
dtype
=
org_weight
.
dtype
)
R_weight
=
R_weight
.
to
(
org_weight
.
device
,
dtype
=
org_weight
.
dtype
)
if
org_weight
.
dim
()
==
4
:
if
org_weight
.
dim
()
==
4
:
weight
=
torch
.
einsum
(
"oihw, op -> pihw"
,
org_weight
,
R_weight
)
weight
=
torch
.
einsum
(
"oihw, op -> pihw"
,
org_weight
,
R_weight
)
else
:
else
:
weight
=
torch
.
einsum
(
"oi, op -> pi"
,
org_weight
,
R_weight
)
weight
=
torch
.
einsum
(
"oi, op -> pi"
,
org_weight
,
R_weight
)
#weight = torch.einsum(
# "k n m, k n ... -> k m ...",
# self.oft_diag * scale + torch.eye(self.block_size, device=device),
# org_weight
#)
return
weight
return
weight
def
get_weight
(
self
,
oft_blocks
,
multiplier
=
None
):
def
get_weight
(
self
,
oft_blocks
,
multiplier
=
None
):
...
@@ -111,48 +83,51 @@ class NetworkModuleOFT(network.NetworkModule):
...
@@ -111,48 +83,51 @@ class NetworkModuleOFT(network.NetworkModule):
block_R_weighted
=
multiplier
*
block_R
+
(
1
-
multiplier
)
*
m_I
block_R_weighted
=
multiplier
*
block_R
+
(
1
-
multiplier
)
*
m_I
R
=
torch
.
block_diag
(
*
block_R_weighted
)
R
=
torch
.
block_diag
(
*
block_R_weighted
)
return
R
return
R
#return self.oft_blocks
def
calc_updown_kohya
(
self
,
orig_weight
,
multiplier
):
R
=
self
.
get_weight
(
self
.
oft_blocks
,
multiplier
)
merged_weight
=
self
.
merge_weight
(
R
,
orig_weight
)
def
calc_updown
(
self
,
orig_weight
):
updown
=
merged_weight
.
to
(
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
-
orig_weight
multiplier
=
self
.
multiplier
()
*
self
.
calc_scale
()
output_shape
=
orig_weight
.
shape
is_other_linear
=
type
(
self
.
sd_module
)
in
[
torch
.
nn
.
MultiheadAttention
]
orig_weight
=
orig_weight
if
self
.
is_kohya
and
not
is_other_linear
:
return
self
.
finalize_updown
(
updown
,
orig_weight
,
output_shape
)
R
=
self
.
get_weight
(
self
.
oft_blocks
,
multiplier
)
#R = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype)
def
calc_updown_kb
(
self
,
orig_weight
,
multiplier
):
merged_weight
=
self
.
merge_weight
(
R
,
orig_weight
)
is_other_linear
=
type
(
self
.
sd_module
)
in
[
torch
.
nn
.
MultiheadAttention
]
elif
not
self
.
is_kohya
and
not
is_other_linear
:
if
not
is_other_linear
:
if
is_other_linear
and
orig_weight
.
shape
[
0
]
!=
orig_weight
.
shape
[
1
]:
if
is_other_linear
and
orig_weight
.
shape
[
0
]
!=
orig_weight
.
shape
[
1
]:
orig_weight
=
orig_weight
.
permute
(
1
,
0
)
orig_weight
=
orig_weight
.
permute
(
1
,
0
)
R
=
self
.
oft_blocks
.
to
(
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
R
=
self
.
oft_blocks
.
to
(
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
merged_weight
=
rearrange
(
orig_weight
,
'(k n) ... -> k n ...'
,
k
=
self
.
num_blocks
,
n
=
self
.
block_size
)
merged_weight
=
rearrange
(
orig_weight
,
'(k n) ... -> k n ...'
,
k
=
self
.
num_blocks
,
n
=
self
.
block_size
)
#orig_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.block_size, n=self.num_blocks)
merged_weight
=
torch
.
einsum
(
merged_weight
=
torch
.
einsum
(
'k n m, k n ... -> k m ...'
,
'k n m, k n ... -> k m ...'
,
R
*
multiplier
+
torch
.
eye
(
self
.
block_size
,
device
=
orig_weight
.
device
),
R
*
multiplier
+
torch
.
eye
(
self
.
block_size
,
device
=
orig_weight
.
device
),
merged_weight
merged_weight
)
)
merged_weight
=
rearrange
(
merged_weight
,
'k m ... -> (k m) ...'
)
merged_weight
=
rearrange
(
merged_weight
,
'k m ... -> (k m) ...'
)
if
is_other_linear
and
orig_weight
.
shape
[
0
]
!=
orig_weight
.
shape
[
1
]:
if
is_other_linear
and
orig_weight
.
shape
[
0
]
!=
orig_weight
.
shape
[
1
]:
orig_weight
=
orig_weight
.
permute
(
1
,
0
)
orig_weight
=
orig_weight
.
permute
(
1
,
0
)
#merged_weight=merged_weight.permute(1, 0)
updown
=
merged_weight
.
to
(
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
-
orig_weight
updown
=
merged_weight
.
to
(
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
-
orig_weight
#updown = weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight
output_shape
=
orig_weight
.
shape
output_shape
=
orig_weight
.
shape
else
:
else
:
#
skip
for now
#
FIXME: skip MultiheadAttention
for now
updown
=
torch
.
zeros
([
orig_weight
.
shape
[
1
],
orig_weight
.
shape
[
1
]],
device
=
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
updown
=
torch
.
zeros
([
orig_weight
.
shape
[
1
],
orig_weight
.
shape
[
1
]],
device
=
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
output_shape
=
(
orig_weight
.
shape
[
1
],
orig_weight
.
shape
[
1
])
output_shape
=
(
orig_weight
.
shape
[
1
],
orig_weight
.
shape
[
1
])
#if self.lin_module is not None:
# R = self.lin_module.weight.to(orig_weight.device, dtype=orig_weight.dtype)
# weight = torch.mul(torch.mul(R, multiplier), orig_weight)
#else:
orig_weight
=
orig_weight
return
self
.
finalize_updown
(
updown
,
orig_weight
,
output_shape
)
return
self
.
finalize_updown
(
updown
,
orig_weight
,
output_shape
)
def
calc_updown
(
self
,
orig_weight
):
multiplier
=
self
.
multiplier
()
*
self
.
calc_scale
()
if
self
.
is_kohya
:
return
self
.
calc_updown_kohya
(
orig_weight
,
multiplier
)
else
:
return
self
.
calc_updown_kb
(
orig_weight
,
multiplier
)
# override to remove the multiplier/scale factor; it's already multiplied in get_weight
# override to remove the multiplier/scale factor; it's already multiplied in get_weight
def
finalize_updown
(
self
,
updown
,
orig_weight
,
output_shape
,
ex_bias
=
None
):
def
finalize_updown
(
self
,
updown
,
orig_weight
,
output_shape
,
ex_bias
=
None
):
#return super().finalize_updown(updown, orig_weight, output_shape, ex_bias)
#return super().finalize_updown(updown, orig_weight, output_shape, ex_bias)
...
@@ -172,49 +147,3 @@ class NetworkModuleOFT(network.NetworkModule):
...
@@ -172,49 +147,3 @@ class NetworkModuleOFT(network.NetworkModule):
ex_bias
=
ex_bias
*
self
.
multiplier
()
ex_bias
=
ex_bias
*
self
.
multiplier
()
return
updown
,
ex_bias
return
updown
,
ex_bias
# copied from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/lokr.py
def
factorization
(
dimension
:
int
,
factor
:
int
=-
1
)
->
tuple
[
int
,
int
]:
'''
return a tuple of two value of input dimension decomposed by the number closest to factor
second value is higher or equal than first value.
In LoRA with Kroneckor Product, first value is a value for weight scale.
secon value is a value for weight.
Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different.
examples)
factor
-1 2 4 8 16 ...
127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127
128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16
250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25
360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30
512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32
1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64
'''
if
factor
>
0
and
(
dimension
%
factor
)
==
0
:
m
=
factor
n
=
dimension
//
factor
if
m
>
n
:
n
,
m
=
m
,
n
return
m
,
n
if
factor
<
0
:
factor
=
dimension
m
,
n
=
1
,
dimension
length
=
m
+
n
while
m
<
n
:
new_m
=
m
+
1
while
dimension
%
new_m
!=
0
:
new_m
+=
1
new_n
=
dimension
//
new_m
if
new_m
+
new_n
>
length
or
new_m
>
factor
:
break
else
:
m
,
n
=
new_m
,
new_n
if
m
>
n
:
n
,
m
=
m
,
n
return
m
,
n
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment