Commit 06d0a5ab authored by AUTOMATIC1111's avatar AUTOMATIC1111

fix NaN issue when running without --precision half

parent 80f618ea
...@@ -262,8 +262,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): ...@@ -262,8 +262,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
def forward(self, tokens): def forward(self, tokens):
backup_embeds = self.transformer.get_input_embeddings() backup_embeds = self.transformer.get_input_embeddings()
device = backup_embeds.weight.device tokens = torch.asarray(tokens, dtype=torch.int64, device=backup_embeds.weight.device)
tokens = torch.LongTensor(tokens).to(device)
outputs = self.transformer(tokens, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state) outputs = self.transformer(tokens, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state)
self.transformer.set_input_embeddings(backup_embeds) self.transformer.set_input_embeddings(backup_embeds)
if self.layer == "last": if self.layer == "last":
......
...@@ -149,7 +149,8 @@ class SD3Inferencer(torch.nn.Module): ...@@ -149,7 +149,8 @@ class SD3Inferencer(torch.nn.Module):
return contextlib.nullcontext() return contextlib.nullcontext()
def get_learned_conditioning(self, batch: list[str]): def get_learned_conditioning(self, batch: list[str]):
return self.cond_stage_model(batch) with devices.without_autocast():
return self.cond_stage_model(batch)
def apply_model(self, x, t, cond): def apply_model(self, x, t, cond):
return self.model.apply_model(x, t, c_crossattn=cond['crossattn'], y=cond['vector']) return self.model.apply_model(x, t, c_crossattn=cond['crossattn'], y=cond['vector'])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment