Commit 31306ce6 authored by AUTOMATIC1111's avatar AUTOMATIC1111

change the behavior of discard_next_to_last_sigma for sgm_uniform to match other schedulers

parent ac9aa44c
...@@ -113,10 +113,6 @@ class KDiffusionSampler(sd_samplers_common.Sampler): ...@@ -113,10 +113,6 @@ class KDiffusionSampler(sd_samplers_common.Sampler):
if scheduler.need_inner_model: if scheduler.need_inner_model:
sigmas_kwargs['inner_model'] = self.model_wrap sigmas_kwargs['inner_model'] = self.model_wrap
if scheduler.name == "sgm_uniform": # XXX check this
# Ensure the "step" will be target step + 1
steps += 1 if not discard_next_to_last_sigma else 0
sigmas = scheduler.function(n=steps, **sigmas_kwargs, device=shared.device) sigmas = scheduler.function(n=steps, **sigmas_kwargs, device=shared.device)
if discard_next_to_last_sigma: if discard_next_to_last_sigma:
......
...@@ -25,7 +25,7 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): ...@@ -25,7 +25,7 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device):
end = inner_model.sigma_to_t(torch.tensor(sigma_min)) end = inner_model.sigma_to_t(torch.tensor(sigma_min))
sigs = [ sigs = [
inner_model.t_to_sigma(ts) inner_model.t_to_sigma(ts)
for ts in torch.linspace(start, end, n)[:-1] for ts in torch.linspace(start, end, n + 1)[:-1]
] ]
sigs += [0.0] sigs += [0.0]
return torch.FloatTensor(sigs).to(device) return torch.FloatTensor(sigs).to(device)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment