Commit e59c66c0 authored by Fampai's avatar Fampai Committed by AUTOMATIC1111

Optimized code for Ignoring last CLIP layers

parent 6c383d2e
......@@ -282,11 +282,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
remade_batch_tokens_of_same_length = [x + [self.wrapped.tokenizer.eos_token_id] * (target_token_count - len(x)) for x in remade_batch_tokens]
tokens = torch.asarray(remade_batch_tokens_of_same_length).to(device)
tmp = -opts.CLIP_ignore_last_layers
if (opts.CLIP_ignore_last_layers == 0):
outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids)
z = outputs.last_hidden_state
else:
tmp = -opts.CLIP_stop_at_last_layers
outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids, output_hidden_states=tmp)
z = outputs.hidden_states[tmp]
z = self.wrapped.transformer.text_model.final_layer_norm(z)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment