Commit 66b6bb09 authored by novelailab's avatar novelailab

error handling

parent 4c358ee7
...@@ -18,7 +18,6 @@ from PIL import Image ...@@ -18,7 +18,6 @@ from PIL import Image
def pil_upscale(image, scale=1): def pil_upscale(image, scale=1):
device = image.device device = image.device
dtype = image.dtype dtype = image.dtype
#image = Image.open("bob_Ross_as_captain_America__oil_on_canvas_artstation_by_J._C._Leyendecker_and_Edmund_Blair_Leighton_and_Charlie_Bowater_octane_render-0.jpg").convert("RGB") #image = Image.load("./Untitle524245425d.png")#
image = Image.fromarray((image.cpu().permute(1,2,0).numpy().astype(np.float32) * 255.).astype(np.uint8)) image = Image.fromarray((image.cpu().permute(1,2,0).numpy().astype(np.float32) * 255.).astype(np.uint8))
if scale > 1: if scale > 1:
image = image.resize((int(image.width * scale), int(image.height * scale)), resample=Image.LANCZOS) image = image.resize((int(image.width * scale), int(image.height * scale)), resample=Image.LANCZOS)
...@@ -211,7 +210,8 @@ class StableDiffusionModel(nn.Module): ...@@ -211,7 +210,8 @@ class StableDiffusionModel(nn.Module):
) )
x_samples_ddim = self.model.decode_first_stage(samples) x_samples_ddim = self.model.decode_first_stage(samples)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0).squeeze(0)
x_samples_ddim = pil_upscale(x_samples_ddim, scale=2)
with torch.autocast("cuda", enabled=self.config.amp): with torch.autocast("cuda", enabled=self.config.amp):
with self.model.ema_scope(): with self.model.ema_scope():
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment