Commit 179ae47d authored by AUTOMATIC1111's avatar AUTOMATIC1111

fix the problem with infinite prompts where empty cond would be calculated incorrectly

parent 0b646335
...@@ -177,6 +177,7 @@ class SD3Cond(torch.nn.Module): ...@@ -177,6 +177,7 @@ class SD3Cond(torch.nn.Module):
self.weights_loaded = False self.weights_loaded = False
def forward(self, prompts: list[str]): def forward(self, prompts: list[str]):
with devices.without_autocast():
lg_out, vector_out = self.model_lg(prompts) lg_out, vector_out = self.model_lg(prompts)
token_count = lg_out.shape[1] token_count = lg_out.shape[1]
......
...@@ -47,7 +47,6 @@ class SD3Inferencer(torch.nn.Module): ...@@ -47,7 +47,6 @@ class SD3Inferencer(torch.nn.Module):
return contextlib.nullcontext() return contextlib.nullcontext()
def get_learned_conditioning(self, batch: list[str]): def get_learned_conditioning(self, batch: list[str]):
with devices.without_autocast():
return self.cond_stage_model(batch) return self.cond_stage_model(batch)
def apply_model(self, x, t, cond): def apply_model(self, x, t, cond):
......
...@@ -718,9 +718,8 @@ def get_empty_cond(sd_model): ...@@ -718,9 +718,8 @@ def get_empty_cond(sd_model):
p = processing.StableDiffusionProcessingTxt2Img() p = processing.StableDiffusionProcessingTxt2Img()
extra_networks.activate(p, {}) extra_networks.activate(p, {})
if hasattr(sd_model, 'conditioner'): if hasattr(sd_model, 'get_learned_conditioning'):
d = sd_model.get_learned_conditioning([""]) d = sd_model.get_learned_conditioning([""])
return d['crossattn']
else: else:
d = sd_model.cond_stage_model([""]) d = sd_model.cond_stage_model([""])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment