"pad_cond_uncond":OptionInfo(False,"Pad prompt/negative prompt to be same length",infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
"pad_cond_uncond":OptionInfo(False,"Pad prompt/negative prompt to be same length",infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
"persistent_cond_cache":OptionInfo(True,"Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"),
"persistent_cond_cache":OptionInfo(True,"Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"),
"batch_cond_uncond":OptionInfo(True,"Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
"batch_cond_uncond":OptionInfo(True,"Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
"fp8_storage":OptionInfo("Disable","FP8 weight",gr.Dropdown,{"choices":["Disable","Enable for SDXL","Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."),
"fp8_storage":OptionInfo("Disable","FP8 weight",gr.Radio,{"choices":["Disable","Enable for SDXL","Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."),
"cache_fp16_weight":OptionInfo(False,"Cache FP16 weight for LoRA").info("Cache fp16 weight when enabling FP8, will increase the quality of LoRA. Use more system ram."),
"cache_fp16_weight":OptionInfo(False,"Cache FP16 weight for LoRA").info("Cache fp16 weight when enabling FP8, will increase the quality of LoRA. Use more system ram."),