Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
7bb0fbed
Commit
7bb0fbed
authored
Jul 18, 2023
by
lambertae
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
code styling
parent
37e048a7
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
5 additions
and
9 deletions
+5
-9
modules/sd_samplers_kdiffusion.py
modules/sd_samplers_kdiffusion.py
+5
-9
No files found.
modules/sd_samplers_kdiffusion.py
View file @
7bb0fbed
...
@@ -35,17 +35,15 @@ samplers_k_diffusion = [
...
@@ -35,17 +35,15 @@ samplers_k_diffusion = [
@
torch
.
no_grad
()
@
torch
.
no_grad
()
def
restart_sampler
(
model
,
x
,
sigmas
,
extra_args
=
None
,
callback
=
None
,
disable
=
None
,
s_noise
=
1.
,
restart_list
=
{
0.1
:
[
10
,
2
,
2
]}
):
def
restart_sampler
(
model
,
x
,
sigmas
,
extra_args
=
None
,
callback
=
None
,
disable
=
None
,
s_noise
=
1.
):
"""Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)"""
"""Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)"""
'''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}'''
'''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}'''
restart_list
=
{
0.1
:
[
10
,
2
,
2
]}
from
tqdm.auto
import
trange
,
tqdm
from
tqdm.auto
import
trange
extra_args
=
{}
if
extra_args
is
None
else
extra_args
extra_args
=
{}
if
extra_args
is
None
else
extra_args
s_in
=
x
.
new_ones
([
x
.
shape
[
0
]])
s_in
=
x
.
new_ones
([
x
.
shape
[
0
]])
step_id
=
0
step_id
=
0
from
k_diffusion.sampling
import
to_d
,
append_zero
from
k_diffusion.sampling
import
to_d
,
append_zero
def
heun_step
(
x
,
old_sigma
,
new_sigma
):
def
heun_step
(
x
,
old_sigma
,
new_sigma
):
nonlocal
step_id
nonlocal
step_id
denoised
=
model
(
x
,
old_sigma
*
s_in
,
**
extra_args
)
denoised
=
model
(
x
,
old_sigma
*
s_in
,
**
extra_args
)
...
@@ -70,8 +68,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
...
@@ -70,8 +68,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
for
key
,
value
in
restart_list
.
items
():
for
key
,
value
in
restart_list
.
items
():
temp_list
[
int
(
torch
.
argmin
(
abs
(
sigmas
-
key
),
dim
=
0
))]
=
value
temp_list
[
int
(
torch
.
argmin
(
abs
(
sigmas
-
key
),
dim
=
0
))]
=
value
restart_list
=
temp_list
restart_list
=
temp_list
def
get_sigmas_karras
(
n
,
sigma_min
,
sigma_max
,
rho
=
7.
,
device
=
'cpu'
):
def
get_sigmas_karras
(
n
,
sigma_min
,
sigma_max
,
rho
=
7.
,
device
=
'cpu'
):
ramp
=
torch
.
linspace
(
0
,
1
,
n
)
.
to
(
device
)
ramp
=
torch
.
linspace
(
0
,
1
,
n
)
.
to
(
device
)
min_inv_rho
=
(
sigma_min
**
(
1
/
rho
))
min_inv_rho
=
(
sigma_min
**
(
1
/
rho
))
...
@@ -82,7 +78,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
...
@@ -82,7 +78,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
max_inv_rho
=
max_inv_rho
.
to
(
device
)
max_inv_rho
=
max_inv_rho
.
to
(
device
)
sigmas
=
(
max_inv_rho
+
ramp
*
(
min_inv_rho
-
max_inv_rho
))
**
rho
sigmas
=
(
max_inv_rho
+
ramp
*
(
min_inv_rho
-
max_inv_rho
))
**
rho
return
append_zero
(
sigmas
)
.
to
(
device
)
return
append_zero
(
sigmas
)
.
to
(
device
)
for
i
in
trange
(
len
(
sigmas
)
-
1
,
disable
=
disable
):
for
i
in
trange
(
len
(
sigmas
)
-
1
,
disable
=
disable
):
x
=
heun_step
(
x
,
sigmas
[
i
],
sigmas
[
i
+
1
])
x
=
heun_step
(
x
,
sigmas
[
i
],
sigmas
[
i
+
1
])
if
i
+
1
in
restart_list
:
if
i
+
1
in
restart_list
:
...
@@ -91,7 +86,8 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
...
@@ -91,7 +86,8 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
max_idx
=
int
(
torch
.
argmin
(
abs
(
sigmas
-
restart_max
),
dim
=
0
))
max_idx
=
int
(
torch
.
argmin
(
abs
(
sigmas
-
restart_max
),
dim
=
0
))
if
max_idx
<
min_idx
:
if
max_idx
<
min_idx
:
sigma_restart
=
get_sigmas_karras
(
restart_steps
,
sigmas
[
min_idx
],
sigmas
[
max_idx
],
device
=
sigmas
.
device
)[:
-
1
]
# remove the zero at the end
sigma_restart
=
get_sigmas_karras
(
restart_steps
,
sigmas
[
min_idx
],
sigmas
[
max_idx
],
device
=
sigmas
.
device
)[:
-
1
]
# remove the zero at the end
for
times
in
range
(
restart_times
):
while
restart_times
>
0
:
restart_times
-=
1
x
=
x
+
torch
.
randn_like
(
x
)
*
s_noise
*
(
sigmas
[
max_idx
]
**
2
-
sigmas
[
min_idx
]
**
2
)
**
0.5
x
=
x
+
torch
.
randn_like
(
x
)
*
s_noise
*
(
sigmas
[
max_idx
]
**
2
-
sigmas
[
min_idx
]
**
2
)
**
0.5
for
(
old_sigma
,
new_sigma
)
in
zip
(
sigma_restart
[:
-
1
],
sigma_restart
[
1
:]):
for
(
old_sigma
,
new_sigma
)
in
zip
(
sigma_restart
[:
-
1
],
sigma_restart
[
1
:]):
x
=
heun_step
(
x
,
old_sigma
,
new_sigma
)
x
=
heun_step
(
x
,
old_sigma
,
new_sigma
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment