text
stringlengths
1
93.6k
diffusion_hyperparams = calc_diffusion_hyperparams(**diffusion_config)
for key in diffusion_hyperparams:
if key != "T":
diffusion_hyperparams[key] = map_gpu(diffusion_hyperparams[key])
# predefine model
net = Model(**model_config)
print_size(net)
# load checkpoint
try:
# checkpoint = torch.load(model_path, map_location='cpu')
# net.load_state_dict(checkpoint)
d = fix_legacy_dict(torch.load(model_path, map_location='cpu'))
dm = net.state_dict()
# for k in args.delete_keys:
# print(
# f"Deleting key {k} becuase its shape in ckpt ({d[k].shape}) doesn't match "
# + f"with shape in model ({dm[k].shape})"
# )
# del d[k]
net.load_state_dict(d, strict=False)
net = map_gpu(net)
net.eval()
print('checkpoint successfully loaded')
except:
raise Exception('No valid model found')
# sampling
C, H, W = model_config["in_channels"], model_config["resolution"], model_config["resolution"]
for i in tqdm(range(n_exist // batchsize, n_generate // batchsize)):
if approxdiff == 'STD':
Xi = STD_sampling(net, (batchsize, C, H, W), diffusion_hyperparams)
elif approxdiff == 'STEP':
user_defined_steps = generation_param["user_defined_steps"]
Xi = STEP_sampling(net, (batchsize, C, H, W),
diffusion_hyperparams,
user_defined_steps,
kappa=generation_param["kappa"])
elif approxdiff == 'VAR':
user_defined_eta = generation_param["user_defined_eta"]
continuous_steps = _precompute_VAR_steps(diffusion_hyperparams, user_defined_eta)
Xi = VAR_sampling(net, (batchsize, C, H, W),
diffusion_hyperparams,
user_defined_eta,
kappa=generation_param["kappa"],
continuous_steps=continuous_steps)
# print(diffusion_hyperparams)
# print(user_defined_eta)
# print(continuous_steps)
# save image
for j, x in enumerate(rescale(Xi)):
index = i * batchsize + j
save_image(x, fp=os.path.join('generated', output_name, '{}.jpg'.format(index)))
save_image(make_grid(rescale(Xi)[:64]), fp=os.path.join('generated', '{}.jpg'.format(output_name)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# dataset and model
parser.add_argument('-name', '--name', type=str, default = 'cifar10', choices=["cifar10", "celeba64"],
help='Name of experiment')
parser.add_argument('-ema', '--ema', help='Whether use ema', default = True)
parser.add_argument('-pretrain', '--pretrain', help='Whether use pretrained model', default = False)
# fast generation parameters
parser.add_argument('-approxdiff', '--approxdiff', type=str, default = 'VAR', choices=['STD', 'STEP', 'VAR'], help='approximate diffusion process')
parser.add_argument('-kappa', '--kappa', type=float, default=1.0, help='factor to be multiplied to sigma')
parser.add_argument('-S', '--S', type=int, default=10, help='number of steps')
parser.add_argument('-schedule', '--schedule', type=str, choices=['linear', 'quadratic'], default = 'quadratic', help='noise level schedules')
# generation util
parser.add_argument('-n', '--n_generate', type=int, help='Number of samples to generate', default = 50048)
parser.add_argument('-bs', '--batchsize', type=int, default=128, help='Batchsize of generation')
parser.add_argument('-gpu', '--gpu', type=str, default='cuda', choices=['cuda']+[str(i) for i in range(16)], help='gpu device')
args = parser.parse_args()
global map_gpu
map_gpu = _map_gpu(args.gpu)
from config import model_config_map
model_config = model_config_map[args.name]
kappa = args.kappa
if args.approxdiff == 'STD':
variance_schedule = '1000'
generation_param = {"kappa": kappa}
elif args.approxdiff == 'VAR': # user defined variance
user_defined_eta = get_VAR_noise(args.S, args.schedule)
generation_param = {"kappa": kappa,
"user_defined_eta": user_defined_eta}
variance_schedule = '{}{}'.format(args.S, args.schedule)
elif args.approxdiff == 'STEP': # user defined step
user_defined_steps = get_STEP_step(args.S, args.schedule)
generation_param = {"kappa": kappa,
"user_defined_steps": user_defined_steps}