Commit a336c7fe authored by AUTOMATIC1111's avatar AUTOMATIC1111 Committed by GitHub

Merge pull request #9017 from camenduru/dev

convert to python v3.9
parents 4c1ad743 6a147db1
...@@ -2,6 +2,7 @@ import glob ...@@ -2,6 +2,7 @@ import glob
import os import os
import re import re
import torch import torch
from typing import Union
from modules import shared, devices, sd_models, errors from modules import shared, devices, sd_models, errors
...@@ -235,7 +236,7 @@ def lora_calc_updown(lora, module, target): ...@@ -235,7 +236,7 @@ def lora_calc_updown(lora, module, target):
return updown return updown
def lora_apply_weights(self: torch.nn.Conv2d | torch.nn.Linear | torch.nn.MultiheadAttention): def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
""" """
Applies the currently selected set of Loras to the weights of torch layer self. Applies the currently selected set of Loras to the weights of torch layer self.
If weights already have this particular set of loras applied, does nothing. If weights already have this particular set of loras applied, does nothing.
...@@ -295,7 +296,7 @@ def lora_apply_weights(self: torch.nn.Conv2d | torch.nn.Linear | torch.nn.Multih ...@@ -295,7 +296,7 @@ def lora_apply_weights(self: torch.nn.Conv2d | torch.nn.Linear | torch.nn.Multih
setattr(self, "lora_current_names", wanted_names) setattr(self, "lora_current_names", wanted_names)
def lora_reset_cached_weight(self: torch.nn.Conv2d | torch.nn.Linear): def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
setattr(self, "lora_current_names", ()) setattr(self, "lora_current_names", ())
setattr(self, "lora_weights_backup", None) setattr(self, "lora_weights_backup", None)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment