text
stringlengths 1
93.6k
|
|---|
T_user = len(user_defined_steps)
|
user_defined_steps = sorted(list(user_defined_steps), reverse=True)
|
x = std_normal(size)
|
with torch.no_grad():
|
for i, tau in enumerate(user_defined_steps):
|
diffusion_steps = tau * map_gpu(torch.ones(size[0]))
|
epsilon_theta = net(x, diffusion_steps)
|
if i == T_user - 1: # the next step is to generate x_0
|
assert tau == 0
|
alpha_next = torch.tensor(1.0)
|
sigma = torch.tensor(0.0)
|
else:
|
alpha_next = Alpha_bar[user_defined_steps[i+1]]
|
sigma = kappa * torch.sqrt((1-alpha_next) / (1-Alpha_bar[tau]) * (1 - Alpha_bar[tau] / alpha_next))
|
x *= torch.sqrt(alpha_next / Alpha_bar[tau])
|
c = torch.sqrt(1 - alpha_next - sigma ** 2) - torch.sqrt(1 - Alpha_bar[tau]) * torch.sqrt(alpha_next / Alpha_bar[tau])
|
x += c * epsilon_theta + sigma * std_normal(size)
|
return x
|
# VAR
|
def _precompute_VAR_steps(diffusion_hyperparams, user_defined_eta):
|
_dh = diffusion_hyperparams
|
T, Alpha, Alpha_bar, Beta = _dh["T"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Beta"]
|
assert len(Alpha_bar) == T
|
# compute diffusion hyperparameters for user defined noise
|
T_user = len(user_defined_eta)
|
Beta_tilde = map_gpu(torch.from_numpy(user_defined_eta)).to(torch.float32)
|
Gamma_bar = 1 - Beta_tilde
|
for t in range(1, T_user):
|
Gamma_bar[t] *= Gamma_bar[t-1]
|
assert Gamma_bar[0] <= Alpha_bar[0] and Gamma_bar[-1] >= Alpha_bar[-1]
|
continuous_steps = []
|
with torch.no_grad():
|
for t in range(T_user-1, -1, -1):
|
t_adapted = None
|
for i in range(T - 1):
|
if Alpha_bar[i] >= Gamma_bar[t] > Alpha_bar[i+1]:
|
t_adapted = bisearch(f=lambda _t: _log_cont_noise(_t, Beta[0].cpu().numpy(), Beta[-1].cpu().numpy(), T),
|
domain=(i-0.01, i+1.01),
|
target=np.log(Gamma_bar[t].cpu().numpy()))
|
break
|
if t_adapted is None:
|
t_adapted = T - 1
|
continuous_steps.append(t_adapted) # must be decreasing
|
return continuous_steps
|
def VAR_sampling(net, size, diffusion_hyperparams, user_defined_eta, kappa, continuous_steps):
|
"""
|
Perform the complete sampling step according to user defined variances
|
Parameters:
|
net (torch network): the model
|
size (tuple): size of tensor to be generated,
|
usually is (number of audios to generate, channels=1, length of audio)
|
diffusion_hyperparams (dict): dictionary of diffusion hyperparameters returned by calc_diffusion_hyperparams
|
note, the tensors need to be cuda tensors
|
user_defined_eta (np.array): User defined noise
|
kappa (float): factor multipled over sigma, between 0 and 1
|
continuous_steps (list): continuous steps computed from user_defined_eta
|
Returns:
|
the generated images in torch.tensor, shape=size
|
"""
|
_dh = diffusion_hyperparams
|
T, Alpha, Alpha_bar, Beta = _dh["T"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Beta"]
|
assert len(Alpha_bar) == T
|
assert len(size) == 4
|
assert 0.0 <= kappa <= 1.0
|
# compute diffusion hyperparameters for user defined noise
|
T_user = len(user_defined_eta)
|
Beta_tilde = map_gpu(torch.from_numpy(user_defined_eta)).to(torch.float32)
|
Gamma_bar = 1 - Beta_tilde
|
for t in range(1, T_user):
|
Gamma_bar[t] *= Gamma_bar[t-1]
|
assert Gamma_bar[0] <= Alpha_bar[0] and Gamma_bar[-1] >= Alpha_bar[-1]
|
# print('begin sampling, total number of reverse steps = %s' % T_user)
|
x = std_normal(size)
|
with torch.no_grad():
|
for i, tau in enumerate(continuous_steps):
|
diffusion_steps = tau * map_gpu(torch.ones(size[0]))
|
epsilon_theta = net(x, diffusion_steps)
|
if i == T_user - 1: # the next step is to generate x_0
|
assert abs(tau) < 0.1
|
alpha_next = torch.tensor(1.0)
|
sigma = torch.tensor(0.0)
|
else:
|
alpha_next = Gamma_bar[T_user-1-i - 1]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.