Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
d78377ea
Commit
d78377ea
authored
Jul 08, 2023
by
AUTOMATIC1111
Committed by
GitHub
Jul 08, 2023
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #11593 from akx/better-status-reporting-1
Better status reporting, part 1
parents
ae74b44c
fc049a2f
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
47 additions
and
32 deletions
+47
-32
modules/api/api.py
modules/api/api.py
+21
-23
modules/call_queue.py
modules/call_queue.py
+1
-1
modules/extras.py
modules/extras.py
+1
-2
modules/interrogate.py
modules/interrogate.py
+1
-2
modules/postprocessing.py
modules/postprocessing.py
+1
-2
modules/shared.py
modules/shared.py
+11
-2
webui.py
webui.py
+11
-0
No files found.
modules/api/api.py
View file @
d78377ea
...
@@ -330,7 +330,7 @@ class Api:
...
@@ -330,7 +330,7 @@ class Api:
p
.
outpath_grids
=
opts
.
outdir_txt2img_grids
p
.
outpath_grids
=
opts
.
outdir_txt2img_grids
p
.
outpath_samples
=
opts
.
outdir_txt2img_samples
p
.
outpath_samples
=
opts
.
outdir_txt2img_samples
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
"scripts_txt2img"
)
if
selectable_scripts
is
not
None
:
if
selectable_scripts
is
not
None
:
p
.
script_args
=
script_args
p
.
script_args
=
script_args
processed
=
scripts
.
scripts_txt2img
.
run
(
p
,
*
p
.
script_args
)
# Need to pass args as list here
processed
=
scripts
.
scripts_txt2img
.
run
(
p
,
*
p
.
script_args
)
# Need to pass args as list here
...
@@ -387,7 +387,7 @@ class Api:
...
@@ -387,7 +387,7 @@ class Api:
p
.
outpath_grids
=
opts
.
outdir_img2img_grids
p
.
outpath_grids
=
opts
.
outdir_img2img_grids
p
.
outpath_samples
=
opts
.
outdir_img2img_samples
p
.
outpath_samples
=
opts
.
outdir_img2img_samples
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
"scripts_img2img"
)
if
selectable_scripts
is
not
None
:
if
selectable_scripts
is
not
None
:
p
.
script_args
=
script_args
p
.
script_args
=
script_args
processed
=
scripts
.
scripts_img2img
.
run
(
p
,
*
p
.
script_args
)
# Need to pass args as list here
processed
=
scripts
.
scripts_img2img
.
run
(
p
,
*
p
.
script_args
)
# Need to pass args as list here
...
@@ -396,7 +396,6 @@ class Api:
...
@@ -396,7 +396,6 @@ class Api:
processed
=
process_images
(
p
)
processed
=
process_images
(
p
)
shared
.
state
.
end
()
shared
.
state
.
end
()
b64images
=
list
(
map
(
encode_pil_to_base64
,
processed
.
images
))
if
send_images
else
[]
b64images
=
list
(
map
(
encode_pil_to_base64
,
processed
.
images
))
if
send_images
else
[]
if
not
img2imgreq
.
include_init_images
:
if
not
img2imgreq
.
include_init_images
:
...
@@ -603,44 +602,42 @@ class Api:
...
@@ -603,44 +602,42 @@ class Api:
def
create_embedding
(
self
,
args
:
dict
):
def
create_embedding
(
self
,
args
:
dict
):
try
:
try
:
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
"create_embedding"
)
filename
=
create_embedding
(
**
args
)
# create empty embedding
filename
=
create_embedding
(
**
args
)
# create empty embedding
sd_hijack
.
model_hijack
.
embedding_db
.
load_textual_inversion_embeddings
()
# reload embeddings so new one can be immediately used
sd_hijack
.
model_hijack
.
embedding_db
.
load_textual_inversion_embeddings
()
# reload embeddings so new one can be immediately used
shared
.
state
.
end
()
return
models
.
CreateResponse
(
info
=
f
"create embedding filename: {filename}"
)
return
models
.
CreateResponse
(
info
=
f
"create embedding filename: {filename}"
)
except
AssertionError
as
e
:
except
AssertionError
as
e
:
shared
.
state
.
end
()
return
models
.
TrainResponse
(
info
=
f
"create embedding error: {e}"
)
return
models
.
TrainResponse
(
info
=
f
"create embedding error: {e}"
)
finally
:
shared
.
state
.
end
()
def
create_hypernetwork
(
self
,
args
:
dict
):
def
create_hypernetwork
(
self
,
args
:
dict
):
try
:
try
:
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
"create_hypernetwork"
)
filename
=
create_hypernetwork
(
**
args
)
# create empty embedding
filename
=
create_hypernetwork
(
**
args
)
# create empty embedding
shared
.
state
.
end
()
return
models
.
CreateResponse
(
info
=
f
"create hypernetwork filename: {filename}"
)
return
models
.
CreateResponse
(
info
=
f
"create hypernetwork filename: {filename}"
)
except
AssertionError
as
e
:
except
AssertionError
as
e
:
shared
.
state
.
end
()
return
models
.
TrainResponse
(
info
=
f
"create hypernetwork error: {e}"
)
return
models
.
TrainResponse
(
info
=
f
"create hypernetwork error: {e}"
)
finally
:
shared
.
state
.
end
()
def
preprocess
(
self
,
args
:
dict
):
def
preprocess
(
self
,
args
:
dict
):
try
:
try
:
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
"preprocess"
)
preprocess
(
**
args
)
# quick operation unless blip/booru interrogation is enabled
preprocess
(
**
args
)
# quick operation unless blip/booru interrogation is enabled
shared
.
state
.
end
()
shared
.
state
.
end
()
return
models
.
PreprocessResponse
(
info
=
'preprocess complete'
)
return
models
.
PreprocessResponse
(
info
=
'preprocess complete'
)
except
KeyError
as
e
:
except
KeyError
as
e
:
shared
.
state
.
end
()
return
models
.
PreprocessResponse
(
info
=
f
"preprocess error: invalid token: {e}"
)
return
models
.
PreprocessResponse
(
info
=
f
"preprocess error: invalid token: {e}"
)
except
AssertionError
as
e
:
except
Exception
as
e
:
shared
.
state
.
end
()
return
models
.
PreprocessResponse
(
info
=
f
"preprocess error: {e}"
)
return
models
.
PreprocessResponse
(
info
=
f
"preprocess error: {e}"
)
except
FileNotFoundError
as
e
:
finally
:
shared
.
state
.
end
()
shared
.
state
.
end
()
return
models
.
PreprocessResponse
(
info
=
f
'preprocess error: {e}'
)
def
train_embedding
(
self
,
args
:
dict
):
def
train_embedding
(
self
,
args
:
dict
):
try
:
try
:
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
"train_embedding"
)
apply_optimizations
=
shared
.
opts
.
training_xattention_optimizations
apply_optimizations
=
shared
.
opts
.
training_xattention_optimizations
error
=
None
error
=
None
filename
=
''
filename
=
''
...
@@ -653,15 +650,15 @@ class Api:
...
@@ -653,15 +650,15 @@ class Api:
finally
:
finally
:
if
not
apply_optimizations
:
if
not
apply_optimizations
:
sd_hijack
.
apply_optimizations
()
sd_hijack
.
apply_optimizations
()
shared
.
state
.
end
()
return
models
.
TrainResponse
(
info
=
f
"train embedding complete: filename: {filename} error: {error}"
)
return
models
.
TrainResponse
(
info
=
f
"train embedding complete: filename: {filename} error: {error}"
)
except
AssertionError
as
msg
:
except
Exception
as
msg
:
shared
.
state
.
end
()
return
models
.
TrainResponse
(
info
=
f
"train embedding error: {msg}"
)
return
models
.
TrainResponse
(
info
=
f
"train embedding error: {msg}"
)
finally
:
shared
.
state
.
end
()
def
train_hypernetwork
(
self
,
args
:
dict
):
def
train_hypernetwork
(
self
,
args
:
dict
):
try
:
try
:
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
"train_hypernetwork"
)
shared
.
loaded_hypernetworks
=
[]
shared
.
loaded_hypernetworks
=
[]
apply_optimizations
=
shared
.
opts
.
training_xattention_optimizations
apply_optimizations
=
shared
.
opts
.
training_xattention_optimizations
error
=
None
error
=
None
...
@@ -679,9 +676,10 @@ class Api:
...
@@ -679,9 +676,10 @@ class Api:
sd_hijack
.
apply_optimizations
()
sd_hijack
.
apply_optimizations
()
shared
.
state
.
end
()
shared
.
state
.
end
()
return
models
.
TrainResponse
(
info
=
f
"train embedding complete: filename: {filename} error: {error}"
)
return
models
.
TrainResponse
(
info
=
f
"train embedding complete: filename: {filename} error: {error}"
)
except
AssertionError
:
except
Exception
as
exc
:
return
models
.
TrainResponse
(
info
=
f
"train embedding error: {exc}"
)
finally
:
shared
.
state
.
end
()
shared
.
state
.
end
()
return
models
.
TrainResponse
(
info
=
f
"train embedding error: {error}"
)
def
get_memory
(
self
):
def
get_memory
(
self
):
try
:
try
:
...
...
modules/call_queue.py
View file @
d78377ea
...
@@ -30,7 +30,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
...
@@ -30,7 +30,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
id_task
=
None
id_task
=
None
with
queue_lock
:
with
queue_lock
:
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
id_task
)
progress
.
start_task
(
id_task
)
progress
.
start_task
(
id_task
)
try
:
try
:
...
...
modules/extras.py
View file @
d78377ea
...
@@ -73,8 +73,7 @@ def to_half(tensor, enable):
...
@@ -73,8 +73,7 @@ def to_half(tensor, enable):
def
run_modelmerger
(
id_task
,
primary_model_name
,
secondary_model_name
,
tertiary_model_name
,
interp_method
,
multiplier
,
save_as_half
,
custom_name
,
checkpoint_format
,
config_source
,
bake_in_vae
,
discard_weights
,
save_metadata
):
def
run_modelmerger
(
id_task
,
primary_model_name
,
secondary_model_name
,
tertiary_model_name
,
interp_method
,
multiplier
,
save_as_half
,
custom_name
,
checkpoint_format
,
config_source
,
bake_in_vae
,
discard_weights
,
save_metadata
):
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
"model-merge"
)
shared
.
state
.
job
=
'model-merge'
def
fail
(
message
):
def
fail
(
message
):
shared
.
state
.
textinfo
=
message
shared
.
state
.
textinfo
=
message
...
...
modules/interrogate.py
View file @
d78377ea
...
@@ -184,8 +184,7 @@ class InterrogateModels:
...
@@ -184,8 +184,7 @@ class InterrogateModels:
def
interrogate
(
self
,
pil_image
):
def
interrogate
(
self
,
pil_image
):
res
=
""
res
=
""
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
"interrogate"
)
shared
.
state
.
job
=
'interrogate'
try
:
try
:
if
shared
.
cmd_opts
.
lowvram
or
shared
.
cmd_opts
.
medvram
:
if
shared
.
cmd_opts
.
lowvram
or
shared
.
cmd_opts
.
medvram
:
lowvram
.
send_everything_to_cpu
()
lowvram
.
send_everything_to_cpu
()
...
...
modules/postprocessing.py
View file @
d78377ea
...
@@ -9,8 +9,7 @@ from modules.shared import opts
...
@@ -9,8 +9,7 @@ from modules.shared import opts
def
run_postprocessing
(
extras_mode
,
image
,
image_folder
,
input_dir
,
output_dir
,
show_extras_results
,
*
args
,
save_output
:
bool
=
True
):
def
run_postprocessing
(
extras_mode
,
image
,
image_folder
,
input_dir
,
output_dir
,
show_extras_results
,
*
args
,
save_output
:
bool
=
True
):
devices
.
torch_gc
()
devices
.
torch_gc
()
shared
.
state
.
begin
()
shared
.
state
.
begin
(
job
=
"extras"
)
shared
.
state
.
job
=
'extras'
image_data
=
[]
image_data
=
[]
image_names
=
[]
image_names
=
[]
...
...
modules/shared.py
View file @
d78377ea
...
@@ -4,6 +4,7 @@ import os
...
@@ -4,6 +4,7 @@ import os
import
sys
import
sys
import
threading
import
threading
import
time
import
time
import
logging
import
gradio
as
gr
import
gradio
as
gr
import
torch
import
torch
...
@@ -18,6 +19,8 @@ from modules.paths_internal import models_path, script_path, data_path, sd_confi
...
@@ -18,6 +19,8 @@ from modules.paths_internal import models_path, script_path, data_path, sd_confi
from
ldm.models.diffusion.ddpm
import
LatentDiffusion
from
ldm.models.diffusion.ddpm
import
LatentDiffusion
from
typing
import
Optional
from
typing
import
Optional
log
=
logging
.
getLogger
(
__name__
)
demo
=
None
demo
=
None
parser
=
cmd_args
.
parser
parser
=
cmd_args
.
parser
...
@@ -144,12 +147,15 @@ class State:
...
@@ -144,12 +147,15 @@ class State:
def
request_restart
(
self
)
->
None
:
def
request_restart
(
self
)
->
None
:
self
.
interrupt
()
self
.
interrupt
()
self
.
server_command
=
"restart"
self
.
server_command
=
"restart"
log
.
info
(
"Received restart request"
)
def
skip
(
self
):
def
skip
(
self
):
self
.
skipped
=
True
self
.
skipped
=
True
log
.
info
(
"Received skip request"
)
def
interrupt
(
self
):
def
interrupt
(
self
):
self
.
interrupted
=
True
self
.
interrupted
=
True
log
.
info
(
"Received interrupt request"
)
def
nextjob
(
self
):
def
nextjob
(
self
):
if
opts
.
live_previews_enable
and
opts
.
show_progress_every_n_steps
==
-
1
:
if
opts
.
live_previews_enable
and
opts
.
show_progress_every_n_steps
==
-
1
:
...
@@ -173,7 +179,7 @@ class State:
...
@@ -173,7 +179,7 @@ class State:
return
obj
return
obj
def
begin
(
self
):
def
begin
(
self
,
job
:
str
=
"(unknown)"
):
self
.
sampling_step
=
0
self
.
sampling_step
=
0
self
.
job_count
=
-
1
self
.
job_count
=
-
1
self
.
processing_has_refined_job_count
=
False
self
.
processing_has_refined_job_count
=
False
...
@@ -187,10 +193,13 @@ class State:
...
@@ -187,10 +193,13 @@ class State:
self
.
interrupted
=
False
self
.
interrupted
=
False
self
.
textinfo
=
None
self
.
textinfo
=
None
self
.
time_start
=
time
.
time
()
self
.
time_start
=
time
.
time
()
self
.
job
=
job
devices
.
torch_gc
()
devices
.
torch_gc
()
log
.
info
(
"Starting job
%
s"
,
job
)
def
end
(
self
):
def
end
(
self
):
duration
=
time
.
time
()
-
self
.
time_start
log
.
info
(
"Ending job
%
s (
%.2
f seconds)"
,
self
.
job
,
duration
)
self
.
job
=
""
self
.
job
=
""
self
.
job_count
=
0
self
.
job_count
=
0
...
...
webui.py
View file @
d78377ea
...
@@ -18,6 +18,17 @@ from packaging import version
...
@@ -18,6 +18,17 @@ from packaging import version
import
logging
import
logging
# We can't use cmd_opts for this because it will not have been initialized at this point.
log_level
=
os
.
environ
.
get
(
"SD_WEBUI_LOG_LEVEL"
)
if
log_level
:
log_level
=
getattr
(
logging
,
log_level
.
upper
(),
None
)
or
logging
.
INFO
logging
.
basicConfig
(
level
=
log_level
,
format
=
'
%(asctime)
s
%(levelname)
s [
%(name)
s]
%(message)
s'
,
datefmt
=
'
%
Y-
%
m-
%
d
%
H:
%
M:
%
S'
,
)
logging
.
getLogger
(
"torch.distributed.nn"
)
.
setLevel
(
logging
.
ERROR
)
# sshh...
logging
.
getLogger
(
"xformers"
)
.
addFilter
(
lambda
record
:
'A matching Triton is not available'
not
in
record
.
getMessage
())
logging
.
getLogger
(
"xformers"
)
.
addFilter
(
lambda
record
:
'A matching Triton is not available'
not
in
record
.
getMessage
())
from
modules
import
paths
,
timer
,
import_hook
,
errors
,
devices
# noqa: F401
from
modules
import
paths
,
timer
,
import_hook
,
errors
,
devices
# noqa: F401
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment