text
stringlengths 1
93.6k
|
|---|
sigma = kappa * torch.sqrt((1-alpha_next) / (1-Gamma_bar[T_user-1-i]) * (1 - Gamma_bar[T_user-1-i] / alpha_next))
|
x *= torch.sqrt(alpha_next / Gamma_bar[T_user-1-i]) # x_prev multiplier
|
c = torch.sqrt(1 - alpha_next - sigma ** 2) - torch.sqrt(1 - Gamma_bar[T_user-1-i]) * torch.sqrt(alpha_next / Gamma_bar[T_user-1-i]) # theta multiplier
|
if i == T_user - 1:
|
x += c * epsilon_theta + sigma * std_normal(size)
|
else:
|
x += c * epsilon_theta + sigma * std_normal(size)
|
return x
|
def VAR_sampling_new(net, size, diffusion_hyperparams, user_defined_eta, kappa, continuous_steps):
|
"""
|
Perform the complete sampling step according to user defined variances
|
Parameters:
|
net (torch network): the model
|
size (tuple): size of tensor to be generated,
|
usually is (number of audios to generate, channels=1, length of audio)
|
diffusion_hyperparams (dict): dictionary of diffusion hyperparameters returned by calc_diffusion_hyperparams
|
note, the tensors need to be cuda tensors
|
user_defined_eta (np.array): User defined noise
|
kappa (float): factor multipled over sigma, between 0 and 1
|
continuous_steps (list): continuous steps computed from user_defined_eta
|
Returns:
|
the generated images in torch.tensor, shape=size
|
"""
|
_dh = diffusion_hyperparams
|
T, Alpha, Alpha_bar, Beta = _dh["T"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Beta"]
|
assert len(Alpha_bar) == T
|
assert len(size) == 4
|
assert 0.0 <= kappa <= 1.0
|
# compute diffusion hyperparameters for user defined noise
|
T_user = len(user_defined_eta)
|
Beta_tilde = map_gpu(torch.from_numpy(user_defined_eta)).to(torch.float32)
|
Gamma_bar = 1 - Beta_tilde
|
for t in range(1, T_user):
|
Gamma_bar[t] *= Gamma_bar[t-1]
|
assert Gamma_bar[0] <= Alpha_bar[0] and Gamma_bar[-1] >= Alpha_bar[-1]
|
# print('begin sampling, total number of reverse steps = %s' % T_user)
|
x_prev_multiplier = torch.zeros(T_user)
|
theta_multiplier = torch.zeros(T_user)
|
std = torch.zeros(T_user)
|
for i, tau in enumerate(continuous_steps):
|
diffusion_steps = tau * map_gpu(torch.ones(size[0]))
|
if i == T_user - 1: # the next step is to generate x_0
|
assert abs(tau) < 0.1
|
alpha_next = torch.tensor(1.0)
|
sigma = torch.tensor(0.0)
|
else:
|
alpha_next = Gamma_bar[T_user-1-i - 1]
|
sigma = kappa * torch.sqrt((1-alpha_next) / (1-Gamma_bar[T_user-1-i]) * (1 - Gamma_bar[T_user-1-i] / alpha_next))
|
x_prev_multiplier[i] = torch.sqrt(alpha_next / Gamma_bar[T_user-1-i])
|
theta_multiplier[i] = torch.sqrt(1 - alpha_next - sigma ** 2) - torch.sqrt(1 - Gamma_bar[T_user-1-i]) * torch.sqrt(alpha_next / Gamma_bar[T_user-1-i])
|
if i == T_user - 1:
|
std[i] = 0.001
|
else:
|
std[i] = sigma
|
x = std_normal(size)
|
with torch.no_grad():
|
for i, tau in enumerate(continuous_steps):
|
diffusion_steps = tau * map_gpu(torch.ones(size[0]))
|
epsilon_theta = net(x, diffusion_steps)
|
x = x*x_prev_multiplier[i] + theta_multiplier[i]*epsilon_theta + std[i]*std_normal(size)
|
return x
|
def generate(output_name, model_path, model_config,
|
diffusion_config, approxdiff, generation_param,
|
n_generate, batchsize, n_exist):
|
"""
|
Parameters:
|
output_name (str): save generated images to this folder
|
model_path (str): checkpoint file
|
model_config (dic): dic of model config
|
diffusion_config (dic): dic of diffusion config
|
generation_param (dic): parameter: user defined variance or user defined steps
|
approxdiff (str): diffusion style: STD, STEP, VAR
|
n_generate (int): number of generated samples
|
batchsize (int): batch size of training
|
n_exist (int): existing number of samples
|
Returns:
|
Generated images (tensor): (B, C, H, W) where C = 3
|
"""
|
if batchsize > n_generate:
|
batchsize = n_generate
|
assert n_generate % batchsize == 0
|
if 'generated' not in os.listdir():
|
os.mkdir('generated')
|
if output_name not in os.listdir('generated'):
|
os.mkdir(os.path.join('generated', output_name))
|
# map diffusion hyperparameters to gpu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.