| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | import torch |
| | from tqdm import tqdm |
| |
|
| | |
| | import matplotlib.pyplot as plt |
| | import numpy as np |
| | import matplotlib |
| | matplotlib.style.use('ggplot') |
| | import sys |
| | import os |
| |
|
| |
|
| | |
| | |
| | DATASET_ODIR = "~/semantic2d_data/2024-04-04-12-16-41" |
| | DATASET_NAME = "train" |
| | SEMANTIC_MASK_ODIR = "./output" |
| |
|
| | |
| | POINTS = 1081 |
| | AGNLE_MIN = -2.356194496154785 |
| | AGNLE_MAX = 2.356194496154785 |
| | RANGE_MAX = 60.0 |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | NEW_LINE = "\n" |
| | |
| | |
| | class Semantic2DLidarDataset(torch.utils.data.Dataset): |
| | def __init__(self, img_path, file_name): |
| | |
| | |
| | self.scan_file_names = [] |
| | self.intensity_file_names = [] |
| | self.vel_file_names = [] |
| | self.label_file_names = [] |
| | |
| | self.s_max = 30 |
| | self.s_min = 0 |
| | |
| | fp_file = open(img_path+'/'+file_name+'.txt', 'r') |
| |
|
| | |
| | for line in fp_file.read().split(NEW_LINE): |
| | if('.npy' in line): |
| | self.scan_file_names.append(img_path+'/scans_lidar/'+line) |
| | self.intensity_file_names.append(img_path+'/intensities_lidar/'+line) |
| | self.label_file_names.append(img_path+'/semantic_label/'+line) |
| | |
| | fp_file.close() |
| | self.length = len(self.scan_file_names) |
| |
|
| | print("dataset length: ", self.length) |
| |
|
| |
|
| | def __len__(self): |
| | return self.length |
| |
|
| | def __getitem__(self, idx): |
| | |
| | scan = np.zeros((1, POINTS)) |
| | intensity = np.zeros((1, POINTS)) |
| | label = np.zeros((1, POINTS)) |
| | |
| | |
| | scan_name = self.scan_file_names[idx] |
| | scan = np.load(scan_name) |
| |
|
| | |
| | intensity_name = self.intensity_file_names[idx] |
| | intensity = np.load(intensity_name) |
| |
|
| | |
| | label_name = self.label_file_names[idx] |
| | label = np.load(label_name) |
| | |
| | |
| | scan[np.isnan(scan)] = 0. |
| | scan[np.isinf(scan)] = 0. |
| |
|
| | intensity[np.isnan(intensity)] = 0. |
| | intensity[np.isinf(intensity)] = 0. |
| |
|
| | scan[scan >= 15] = 0. |
| |
|
| | label[np.isnan(label)] = 0. |
| | label[np.isinf(label)] = 0. |
| |
|
| | |
| | scan_tensor = torch.FloatTensor(scan) |
| | intensity_tensor = torch.FloatTensor(intensity) |
| | label_tensor = torch.FloatTensor(label) |
| |
|
| | data = { |
| | 'scan': scan_tensor, |
| | 'intensity': intensity_tensor, |
| | 'label': label_tensor, |
| | } |
| |
|
| | return data |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | if __name__ == '__main__': |
| | |
| | dataset_odir = DATASET_ODIR |
| | dataset_name = DATASET_NAME |
| | semantic_mask_odir = SEMANTIC_MASK_ODIR |
| | |
| | if not os.path.exists(semantic_mask_odir): |
| | os.makedirs(semantic_mask_odir) |
| |
|
| | |
| | eval_dataset = Semantic2DLidarDataset(dataset_odir, dataset_name) |
| | eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=1, num_workers=2, \ |
| | shuffle=False, drop_last=True, pin_memory=True) |
| | |
| | |
| | cnt = 0 |
| | cnt_m = 0 |
| | |
| | num_batches = int(len(eval_dataset)/eval_dataloader.batch_size) |
| | for i, batch in tqdm(enumerate(eval_dataloader), total=num_batches): |
| | |
| | if(i % 200 == 0): |
| | scans = batch['scan'] |
| | scans = scans.detach().cpu().numpy() |
| | labels = batch['label'] |
| | labels = labels.detach().cpu().numpy() |
| |
|
| | |
| | r = scans.reshape(POINTS) |
| | theta = np.linspace(AGNLE_MIN, AGNLE_MAX, num=POINTS, endpoint='true') |
| |
|
| | |
| | fig = plt.figure(figsize=(12, 12)) |
| | ax = fig.add_subplot(1,1,1, projection='polar', facecolor='seashell') |
| | smap = labels.reshape(POINTS) |
| |
|
| | |
| | theta = np.insert(theta, -1, np.pi) |
| | r = np.insert(r, -1, 1) |
| | smap = np.insert(smap, -1, 0) |
| | label_val = np.unique(smap).astype(int) |
| | print("label_values: ", label_val) |
| |
|
| | colors = smap |
| | area = 6 |
| | scatter = ax.scatter(theta, r, c=colors, s=area, cmap='nipy_spectral', alpha=0.95, linewidth=10) |
| | ax.set_xticks(np.linspace(AGNLE_MIN, AGNLE_MAX, 8, endpoint='true')) |
| | ax.set_thetamin(-135) |
| | ax.set_thetamax(135) |
| | ax.set_yticklabels([]) |
| | |
| | classes = ['Other', 'Chair', 'Door', 'Elevator', 'Person', 'Pillar', 'Sofa', 'Table', 'Trash bin', 'Wall'] |
| | plt.xticks(fontsize=16) |
| | plt.yticks(fontsize=16) |
| | plt.legend(handles=scatter.legend_elements(num=[j for j in label_val])[0], labels=[classes[j] for j in label_val], bbox_to_anchor=(0.5, -0.08), loc='lower center', fontsize=18) |
| | ax.grid(False) |
| | ax.set_theta_offset(np.pi/2) |
| | |
| | input_img_name = semantic_mask_odir + "/semantic_mask" + str(i)+ ".png" |
| | plt.savefig(input_img_name, bbox_inches='tight') |
| | plt.show() |
| |
|
| | print(i) |
| | |
| |
|