Commit 9bcfb92a authored by AUTOMATIC1111's avatar AUTOMATIC1111

rename logging from textual inversion to not confuse it with global logging module

parent d74fc56f
......@@ -11,7 +11,7 @@ import tqdm
from einops import rearrange, repeat
from ldm.util import default
from modules import devices, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors
from modules.textual_inversion import textual_inversion, logging
from modules.textual_inversion import textual_inversion, saving_settings
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
......@@ -533,7 +533,7 @@ def train_hypernetwork(id_task, hypernetwork_name: str, learn_rate: float, batch
model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds),
**{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]}
)
logging.save_settings_to_file(log_directory, {**saved_params, **locals()})
saving_settings.save_settings_to_file(log_directory, {**saved_params, **locals()})
latent_sampling_method = ds.latent_sampling_method
......
......@@ -17,7 +17,7 @@ import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay
from modules.textual_inversion.logging import save_settings_to_file
from modules.textual_inversion.saving_settings import save_settings_to_file
TextualInversionTemplate = namedtuple("TextualInversionTemplate", ["name", "path"])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment