|
|
"""Code is adapted from https://github.com/CompVis/stable-diffusion/blob/21f890f9da3cfbeaba8e2ac3c425ee9e998d5229/ldm/modules/diffusionmodules/util.py""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from inspect import isfunction |
|
|
import math |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): |
|
|
if schedule == "linear": |
|
|
betas = ( |
|
|
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 |
|
|
) |
|
|
|
|
|
elif schedule == "cosine": |
|
|
timesteps = ( |
|
|
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s |
|
|
) |
|
|
alphas = timesteps / (1 + cosine_s) * np.pi / 2 |
|
|
alphas = torch.cos(alphas).pow(2) |
|
|
alphas = alphas / alphas[0] |
|
|
betas = 1 - alphas[1:] / alphas[:-1] |
|
|
betas = np.clip(betas, a_min=0, a_max=0.999) |
|
|
|
|
|
elif schedule == "sqrt_linear": |
|
|
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) |
|
|
elif schedule == "sqrt": |
|
|
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 |
|
|
else: |
|
|
raise ValueError(f"schedule '{schedule}' unknown.") |
|
|
return betas.numpy() |
|
|
|
|
|
|
|
|
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): |
|
|
if ddim_discr_method == 'uniform': |
|
|
c = num_ddpm_timesteps // num_ddim_timesteps |
|
|
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) |
|
|
elif ddim_discr_method == 'quad': |
|
|
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) |
|
|
else: |
|
|
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') |
|
|
|
|
|
|
|
|
|
|
|
steps_out = ddim_timesteps + 1 |
|
|
if verbose: |
|
|
print(f'Selected timesteps for ddim sampler: {steps_out}') |
|
|
return steps_out |
|
|
|
|
|
|
|
|
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): |
|
|
|
|
|
alphas = alphacums[ddim_timesteps] |
|
|
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) |
|
|
|
|
|
|
|
|
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) |
|
|
if verbose: |
|
|
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') |
|
|
print(f'For the chosen value of eta, which is {eta}, ' |
|
|
f'this results in the following sigma_t schedule for ddim sampler {sigmas}') |
|
|
return sigmas, alphas, alphas_prev |
|
|
|
|
|
|
|
|
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): |
|
|
""" |
|
|
Create a beta schedule that discretizes the given alpha_t_bar function, |
|
|
which defines the cumulative product of (1-beta) over time from t = [0,1]. |
|
|
:param num_diffusion_timesteps: the number of betas to produce. |
|
|
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and |
|
|
produces the cumulative product of (1-beta) up to that |
|
|
part of the diffusion process. |
|
|
:param max_beta: the maximum beta to use; use values lower than 1 to |
|
|
prevent singularities. |
|
|
""" |
|
|
betas = [] |
|
|
for i in range(num_diffusion_timesteps): |
|
|
t1 = i / num_diffusion_timesteps |
|
|
t2 = (i + 1) / num_diffusion_timesteps |
|
|
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) |
|
|
return np.array(betas) |
|
|
|
|
|
|
|
|
def extract_into_tensor(a, t, x_shape, batch_axis=0): |
|
|
batch_size = t.shape[0] |
|
|
out = a.gather(-1, t) |
|
|
out_shape = [1, ] * len(x_shape) |
|
|
out_shape[batch_axis] = batch_size |
|
|
return out.reshape(out_shape) |
|
|
|
|
|
|
|
|
def scale_module(module, scale): |
|
|
""" |
|
|
Scale the parameters of a module and return it. |
|
|
""" |
|
|
for p in module.parameters(): |
|
|
p.detach().mul_(scale) |
|
|
return module |
|
|
|
|
|
|
|
|
def mean_flat(tensor, batch_axis=0): |
|
|
""" |
|
|
Take the mean over all non-batch dimensions. |
|
|
""" |
|
|
mean_dim = list(range(len(tensor.shape))) |
|
|
mean_dim.pop(batch_axis) |
|
|
return tensor.mean(dim=mean_dim) |
|
|
|
|
|
|
|
|
def noise_like(shape, device): |
|
|
return torch.randn(shape, device=device) |
|
|
|
|
|
|
|
|
def default(val, d): |
|
|
if val is not None: |
|
|
return val |
|
|
return d() if isfunction(d) else d |
|
|
|