text
stringlengths 1
93.6k
|
|---|
et_count = density_map.sum().item()
|
if save_output:
|
image = data.mul_(torch.Tensor([0.229,0.224,0.225]).view(3,1,1))\
|
.add_(torch.Tensor([0.485,0.456,0.406]).view(3,1,1))
|
if args.test_fixed_size != -1:
|
H, W = mask.shape
|
_, _, fixed_size = data[0].shape
|
assert args.test_fixed_size == fixed_size
|
inital_img = torch.zeros((3, H, W))
|
for img_slice, (x, y) in zip(image, itertools.product(range(W / fixed_size), range(H / fixed_size))):
|
inital_img[:, y * fixed_size:(y + 1) * fixed_size, x * fixed_size:(x + 1) * fixed_size] = img_slice
|
H = mask.sum(dim=0).max().item()
|
W = mask.sum(dim=1).max().item()
|
inital_img = inital_img.masked_select(mask).view(3, H, W)
|
image = inital_img
|
image = image.data.cpu().numpy()
|
dgen.save_image(image.transpose((1,2,0))*255.0, save_path, fname[0].split('.')[0] + "_0_img.png")
|
gt_dens = gt_dens.data.cpu().numpy()
|
density_map = density_map.data.cpu().numpy()
|
dgen.save_density_map(gt_dens.squeeze(), save_path, fname[0].split('.')[0] + "_1_gt.png")
|
dgen.save_density_map(density_map.squeeze(), save_path, fname[0].split('.')[0] + "_2_et.png")
|
del gt_dens
|
del data, dens
|
detail += "index: {}; fname: {}; gt: {}; et: {};\n".format(i, fname[0].split('.')[0], gt_count, et_count)
|
mae += abs(gt_count-et_count)
|
mse += ((gt_count-et_count)*(gt_count-et_count))
|
mae = mae/len(data_loader)
|
mse = np.sqrt(mse/len(data_loader))
|
duration = timer.toc(average=False)
|
print "testing time: %d" % duration
|
return mae,mse,detail
|
if __name__ == '__main__':
|
args = parser.parse_args()
|
# set gpu ids
|
str_ids = args.gpus.split(',')
|
args.gpus = []
|
for str_id in str_ids:
|
id = int(str_id)
|
if id >= 0:
|
args.gpus.append(id)
|
if len(args.gpus) > 0:
|
torch.cuda.set_device(args.gpus[0])
|
args.loss = None
|
args.test_crop_type = 'Adap'
|
args.pretrain = None
|
data_loader_test = CreateDataLoader(args, phase='test')
|
optimizer = lambda x: torch.optim.Adam(filter(lambda p: p.requires_grad, x.parameters()))
|
net = CrowdCounter(optimizer=optimizer, opt=args)
|
if args.model_path.endswith('.h5'):
|
output_path = args.model_path[:-3] + '/output/'
|
if not os.path.exists(args.model_path[:-3]):
|
os.mkdir(args.model_path[:-3])
|
test_once = True
|
else:
|
output_path = args.model_path + '/output/'
|
test_once = False
|
if not os.path.exists(output_path):
|
os.mkdir(output_path)
|
if test_once:
|
model_files = [args.model_path]
|
elif args.epoch is not None:
|
model_files = ['%06d.h5' % args.epoch]
|
assert args.save_output
|
elif not args.is_wait:
|
def list_dir(watch_path):
|
return itertools.chain(*[[filename] if (os.path.isfile(os.path.join(watch_path,filename)) and '.h5' in filename)\
|
else []\
|
for filename in os.listdir(watch_path)])
|
model_files = list(list_dir(args.model_path))
|
model_files.sort()
|
model_files = model_files[::-1]
|
assert not args.save_output
|
else:
|
model_files = ['%06d.h5' % epoch for epoch in range(0, 301)]
|
assert not args.save_output
|
if args.split is not None:
|
model_files = ['%06d.h5' % epoch for epoch in map(int, args.split[:-1].split(','))]
|
print model_files
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.