Commit a459075d authored by AUTOMATIC1111's avatar AUTOMATIC1111

actual solution to the uncommon hanging problem that is seemingly caused by...

actual solution to the uncommon hanging problem that is seemingly caused by multiple progress requests working on same tensor
parent d7c9c614
...@@ -95,31 +95,30 @@ def progressapi(req: ProgressRequest): ...@@ -95,31 +95,30 @@ def progressapi(req: ProgressRequest):
predicted_duration = elapsed_since_start / progress if progress > 0 else None predicted_duration = elapsed_since_start / progress if progress > 0 else None
eta = predicted_duration - elapsed_since_start if predicted_duration is not None else None eta = predicted_duration - elapsed_since_start if predicted_duration is not None else None
live_preview = None
id_live_preview = req.id_live_preview id_live_preview = req.id_live_preview
shared.state.set_current_image()
if opts.live_previews_enable and req.live_preview and shared.state.id_live_preview != req.id_live_preview: if opts.live_previews_enable and req.live_preview:
image = shared.state.current_image shared.state.set_current_image()
if image is not None: if shared.state.id_live_preview != req.id_live_preview:
buffered = io.BytesIO() image = shared.state.current_image
if image is not None:
if opts.live_previews_image_format == "png": buffered = io.BytesIO()
# using optimize for large images takes an enormous amount of time
if max(*image.size) <= 256: if opts.live_previews_image_format == "png":
save_kwargs = {"optimize": True} # using optimize for large images takes an enormous amount of time
if max(*image.size) <= 256:
save_kwargs = {"optimize": True}
else:
save_kwargs = {"optimize": False, "compress_level": 1}
else: else:
save_kwargs = {"optimize": False, "compress_level": 1} save_kwargs = {}
else: image.save(buffered, format=opts.live_previews_image_format, **save_kwargs)
save_kwargs = {} base64_image = base64.b64encode(buffered.getvalue()).decode('ascii')
live_preview = f"data:image/{opts.live_previews_image_format};base64,{base64_image}"
image.save(buffered, format=opts.live_previews_image_format, **save_kwargs) id_live_preview = shared.state.id_live_preview
base64_image = base64.b64encode(buffered.getvalue()).decode('ascii')
live_preview = f"data:image/{opts.live_previews_image_format};base64,{base64_image}"
id_live_preview = shared.state.id_live_preview
else:
live_preview = None
else:
live_preview = None
return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo) return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo)
......
...@@ -111,7 +111,7 @@ def images_tensor_to_samples(image, approximation=None, model=None): ...@@ -111,7 +111,7 @@ def images_tensor_to_samples(image, approximation=None, model=None):
def store_latent(decoded): def store_latent(decoded):
state.current_latent = decoded.clone() state.current_latent = decoded
if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0: if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
if not shared.parallel_processing_allowed: if not shared.parallel_processing_allowed:
......
...@@ -128,7 +128,7 @@ class State: ...@@ -128,7 +128,7 @@ class State:
devices.torch_gc() devices.torch_gc()
def set_current_image(self): def set_current_image(self):
"""sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" """if enough sampling steps have been made after the last call to this, sets self.current_image from self.current_latent, and modifies self.id_live_preview accordingly"""
if not shared.parallel_processing_allowed: if not shared.parallel_processing_allowed:
return return
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment