Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
b12de850
Commit
b12de850
authored
Dec 25, 2022
by
AUTOMATIC1111
Committed by
GitHub
Dec 25, 2022
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #5992 from yuvalabou/F541
Fix F541: f-string without any placeholders
parents
a66514e1
3bf5591e
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
18 additions
and
18 deletions
+18
-18
extensions-builtin/LDSR/ldsr_model_arch.py
extensions-builtin/LDSR/ldsr_model_arch.py
+1
-1
modules/codeformer/vqgan_arch.py
modules/codeformer/vqgan_arch.py
+2
-2
modules/hypernetworks/hypernetwork.py
modules/hypernetworks/hypernetwork.py
+2
-2
modules/images.py
modules/images.py
+1
-1
modules/interrogate.py
modules/interrogate.py
+1
-1
modules/safe.py
modules/safe.py
+4
-4
modules/sd_models.py
modules/sd_models.py
+4
-4
modules/sd_vae.py
modules/sd_vae.py
+1
-1
modules/textual_inversion/textual_inversion.py
modules/textual_inversion/textual_inversion.py
+1
-1
scripts/prompts_from_file.py
scripts/prompts_from_file.py
+1
-1
No files found.
extensions-builtin/LDSR/ldsr_model_arch.py
View file @
b12de850
...
...
@@ -26,7 +26,7 @@ class LDSR:
global
cached_ldsr_model
if
shared
.
opts
.
ldsr_cached
and
cached_ldsr_model
is
not
None
:
print
(
f
"Loading model from cache"
)
print
(
"Loading model from cache"
)
model
:
torch
.
nn
.
Module
=
cached_ldsr_model
else
:
print
(
f
"Loading model from {self.modelPath}"
)
...
...
modules/codeformer/vqgan_arch.py
View file @
b12de850
...
...
@@ -382,7 +382,7 @@ class VQAutoEncoder(nn.Module):
self
.
load_state_dict
(
torch
.
load
(
model_path
,
map_location
=
'cpu'
)[
'params'
])
logger
.
info
(
f
'vqgan is loaded from: {model_path} [params]'
)
else
:
raise
ValueError
(
f
'Wrong params!'
)
raise
ValueError
(
'Wrong params!'
)
def
forward
(
self
,
x
):
...
...
@@ -431,7 +431,7 @@ class VQGANDiscriminator(nn.Module):
elif
'params'
in
chkpt
:
self
.
load_state_dict
(
torch
.
load
(
model_path
,
map_location
=
'cpu'
)[
'params'
])
else
:
raise
ValueError
(
f
'Wrong params!'
)
raise
ValueError
(
'Wrong params!'
)
def
forward
(
self
,
x
):
return
self
.
main
(
x
)
\ No newline at end of file
modules/hypernetworks/hypernetwork.py
View file @
b12de850
...
...
@@ -277,7 +277,7 @@ def load_hypernetwork(filename):
print
(
traceback
.
format_exc
(),
file
=
sys
.
stderr
)
else
:
if
shared
.
loaded_hypernetwork
is
not
None
:
print
(
f
"Unloading hypernetwork"
)
print
(
"Unloading hypernetwork"
)
shared
.
loaded_hypernetwork
=
None
...
...
@@ -443,7 +443,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
initial_step
=
hypernetwork
.
step
or
0
if
initial_step
>=
steps
:
shared
.
state
.
textinfo
=
f
"Model has already been trained beyond specified max steps"
shared
.
state
.
textinfo
=
"Model has already been trained beyond specified max steps"
return
hypernetwork
,
filename
scheduler
=
LearnRateScheduler
(
learn_rate
,
steps
,
initial_step
)
...
...
modules/images.py
View file @
b12de850
...
...
@@ -599,7 +599,7 @@ def read_info_from_image(image):
Negative prompt: {json_info["uc"]}
Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
except
Exception
:
print
(
f
"Error parsing NovelAI image generation parameters:"
,
file
=
sys
.
stderr
)
print
(
"Error parsing NovelAI image generation parameters:"
,
file
=
sys
.
stderr
)
print
(
traceback
.
format_exc
(),
file
=
sys
.
stderr
)
return
geninfo
,
items
...
...
modules/interrogate.py
View file @
b12de850
...
...
@@ -172,7 +172,7 @@ class InterrogateModels:
res
+=
", "
+
match
except
Exception
:
print
(
f
"Error interrogating"
,
file
=
sys
.
stderr
)
print
(
"Error interrogating"
,
file
=
sys
.
stderr
)
print
(
traceback
.
format_exc
(),
file
=
sys
.
stderr
)
res
+=
"<error>"
...
...
modules/safe.py
View file @
b12de850
...
...
@@ -137,15 +137,15 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs):
except
pickle
.
UnpicklingError
:
print
(
f
"Error verifying pickled file from {filename}:"
,
file
=
sys
.
stderr
)
print
(
traceback
.
format_exc
(),
file
=
sys
.
stderr
)
print
(
f
"-----> !!!! The file is most likely corrupted !!!! <-----"
,
file
=
sys
.
stderr
)
print
(
f
"You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.
\n\n
"
,
file
=
sys
.
stderr
)
print
(
"-----> !!!! The file is most likely corrupted !!!! <-----"
,
file
=
sys
.
stderr
)
print
(
"You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.
\n\n
"
,
file
=
sys
.
stderr
)
return
None
except
Exception
:
print
(
f
"Error verifying pickled file from {filename}:"
,
file
=
sys
.
stderr
)
print
(
traceback
.
format_exc
(),
file
=
sys
.
stderr
)
print
(
f
"
\n
The file may be malicious, so the program is not going to read it."
,
file
=
sys
.
stderr
)
print
(
f
"You can skip this check with --disable-safe-unpickle commandline argument.
\n\n
"
,
file
=
sys
.
stderr
)
print
(
"
\n
The file may be malicious, so the program is not going to read it."
,
file
=
sys
.
stderr
)
print
(
"You can skip this check with --disable-safe-unpickle commandline argument.
\n\n
"
,
file
=
sys
.
stderr
)
return
None
return
unsafe_torch_load
(
filename
,
*
args
,
**
kwargs
)
...
...
modules/sd_models.py
View file @
b12de850
...
...
@@ -117,13 +117,13 @@ def select_checkpoint():
return
checkpoint_info
if
len
(
checkpoints_list
)
==
0
:
print
(
f
"No checkpoints found. When searching for checkpoints, looked at:"
,
file
=
sys
.
stderr
)
print
(
"No checkpoints found. When searching for checkpoints, looked at:"
,
file
=
sys
.
stderr
)
if
shared
.
cmd_opts
.
ckpt
is
not
None
:
print
(
f
" - file {os.path.abspath(shared.cmd_opts.ckpt)}"
,
file
=
sys
.
stderr
)
print
(
f
" - directory {model_path}"
,
file
=
sys
.
stderr
)
if
shared
.
cmd_opts
.
ckpt_dir
is
not
None
:
print
(
f
" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}"
,
file
=
sys
.
stderr
)
print
(
f
"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit."
,
file
=
sys
.
stderr
)
print
(
"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit."
,
file
=
sys
.
stderr
)
exit
(
1
)
checkpoint_info
=
next
(
iter
(
checkpoints_list
.
values
()))
...
...
@@ -324,7 +324,7 @@ def load_model(checkpoint_info=None):
script_callbacks
.
model_loaded_callback
(
sd_model
)
print
(
f
"Model loaded."
)
print
(
"Model loaded."
)
return
sd_model
...
...
@@ -359,5 +359,5 @@ def reload_model_weights(sd_model=None, info=None):
if
not
shared
.
cmd_opts
.
lowvram
and
not
shared
.
cmd_opts
.
medvram
:
sd_model
.
to
(
devices
.
device
)
print
(
f
"Weights loaded."
)
print
(
"Weights loaded."
)
return
sd_model
modules/sd_vae.py
View file @
b12de850
...
...
@@ -208,5 +208,5 @@ def reload_vae_weights(sd_model=None, vae_file="auto"):
if
not
shared
.
cmd_opts
.
lowvram
and
not
shared
.
cmd_opts
.
medvram
:
sd_model
.
to
(
devices
.
device
)
print
(
f
"VAE Weights loaded."
)
print
(
"VAE Weights loaded."
)
return
sd_model
modules/textual_inversion/textual_inversion.py
View file @
b12de850
...
...
@@ -263,7 +263,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
initial_step
=
embedding
.
step
or
0
if
initial_step
>=
steps
:
shared
.
state
.
textinfo
=
f
"Model has already been trained beyond specified max steps"
shared
.
state
.
textinfo
=
"Model has already been trained beyond specified max steps"
return
embedding
,
filename
scheduler
=
LearnRateScheduler
(
learn_rate
,
steps
,
initial_step
)
...
...
scripts/prompts_from_file.py
View file @
b12de850
...
...
@@ -140,7 +140,7 @@ class Script(scripts.Script):
try
:
args
=
cmdargs
(
line
)
except
Exception
:
print
(
f
"Error parsing line
[line]
as commandline:"
,
file
=
sys
.
stderr
)
print
(
f
"Error parsing line
{line}
as commandline:"
,
file
=
sys
.
stderr
)
print
(
traceback
.
format_exc
(),
file
=
sys
.
stderr
)
args
=
{
"prompt"
:
line
}
else
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment