Datasets:
ArXiv:
License:
| import os | |
| import re | |
| import json | |
| import argparse | |
| import logging | |
| from typing import Any, Dict | |
| from utils.classes import CLASS_LIST, map_sub_class_to_primary_class, map_class_id_to_class_name | |
| logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%y/%m/%d %H:%M:%S", level=logging.INFO) | |
| class ValidateFile(argparse.Action): | |
| """ | |
| Custom argparse action to validate file paths. | |
| """ | |
| def __call__(self, parser, namespace, values, option_string=None): | |
| # Validate the file path format | |
| file_path_pattern = r"^[a-zA-Z0-9_\-\/.#+]+$" | |
| if not re.match(file_path_pattern, values): | |
| parser.error(f"Invalid file path: {values}") | |
| # Check if the file exists | |
| if not os.path.exists(values): | |
| parser.error(f"File {values} does NOT exist.") | |
| # Check if the file is readable | |
| if not os.access(values, os.R_OK): | |
| parser.error(f"File {values} is NOT readable.") | |
| # Set the validated file path in the namespace | |
| setattr(namespace, self.dest, values) | |
| def validate_file_path(input_string: str) -> str: | |
| """ | |
| Validates whether the input string matches a file path pattern | |
| :param str input_string: input string | |
| :return: validated file path | |
| :rtype: str | |
| :: | |
| file_path = validate_file_path(input_string) | |
| """ | |
| file_path_pattern = r"^[a-zA-Z0-9_\-\/.#+]+$" | |
| if re.match(file_path_pattern, input_string): | |
| return input_string | |
| else: | |
| raise ValueError(f"Invalid file path: {input_string}") | |
| def sanitize_string(input_string: str) -> str: | |
| """ | |
| Sanitizes an input string | |
| :param str input_string: input string | |
| :return: sanitized string | |
| :rtype: str | |
| :: | |
| sanitized_string = sanitize_string(input_string) | |
| """ | |
| # Allow alphanumeric characters, dots, slashes, underscores, hashes, and dashes | |
| return re.sub(r"[^a-zA-Z0-9\._/#-]", "_", input_string) | |
| def make_dir(dir_path: str) -> None: | |
| """ | |
| Safely create a directory. | |
| """ | |
| valid_dir_path = validate_file_path(dir_path) | |
| if os.path.islink(valid_dir_path): | |
| raise ValueError(f"Directory path {dir_path} must not be a symbolic link.") | |
| try: | |
| if not os.path.isdir(valid_dir_path): | |
| os.makedirs(valid_dir_path) | |
| except OSError as e: | |
| raise ValueError(f"Failed to create directory {dir_path}: {e}") | |
| def load_json_from_file(file_path: str) -> Any: | |
| """ | |
| Safely loads JSON data from a file. | |
| """ | |
| valid_file_path = validate_file_path(file_path) | |
| try: | |
| with open(valid_file_path, "r") as f: | |
| return json.load(f) | |
| except json.JSONDecodeError as e: | |
| raise ValueError(f"Invalid JSON format in file {file_path}: {e}") | |
| except Exception as e: | |
| raise ValueError(f"An error occurred while loading file {file_path}: {e}") | |
| def split_files_per_scene(gt_path: str, pred_path: str, output_base_dir: str, scene_id_2_scene_name: Dict[int, str], num_frames_to_eval: int = 9000): | |
| """ | |
| Splits GT and Pred files per scene, saving them into separate directories. | |
| :param gt_path: Path to the ground truth JSON file. | |
| :param pred_path: Path to the predictions JSON file. | |
| :param output_base_dir: Base directory to save split files. | |
| """ | |
| # Create output base directory | |
| os.makedirs(output_base_dir, exist_ok=True) | |
| gt_scenes = set() | |
| pred_scenes = set() | |
| # convert to int | |
| valid_scene_ids = set(int(scene_id) for scene_id in scene_id_2_scene_name.keys()) | |
| # Process GT data | |
| scene_gt_writers = {} | |
| with open(gt_path, "r") as gt_file: | |
| for line in gt_file: | |
| line_split = line.split(" ") | |
| scene_id = int(line_split[0]) | |
| gt_scenes.add(scene_id) | |
| if scene_id not in scene_gt_writers: | |
| os.makedirs(os.path.join(output_base_dir, f"scene_{scene_id}"), exist_ok=True) | |
| scene_gt_writers[scene_id] = open(os.path.join(output_base_dir, f"scene_{scene_id}", "gt.txt"), "w") | |
| scene_gt_writers[scene_id].write(line) | |
| # Close all GT writers | |
| for writer in scene_gt_writers.values(): | |
| writer.close() | |
| # convert gt_scenes to a list and sort it | |
| gt_scenes = list(gt_scenes) | |
| gt_scenes.sort() | |
| logging.info(f"Found scenes {gt_scenes} in ground truth.") | |
| # Process Pred data | |
| scene_pred_writers = {} | |
| with open(pred_path, "r") as pred_file: | |
| for line in pred_file: | |
| line_split = line.split(" ") | |
| # Validate line length | |
| if len(line_split) != 11: | |
| raise ValueError(f"Found incorrect entry in predictions. Each entry should have 11 elements: (scene_id class_id object_id frame_id x y z width length height yaw)") | |
| # Validate scene id | |
| scene_id = int(line_split[0]) | |
| if scene_id not in valid_scene_ids: | |
| raise ValueError(f"Found incorrect scene id in predictions: {scene_id}. Valid scene ids are: {valid_scene_ids}, defined by the scene_id_2_scene_name json file") | |
| # Validate class id | |
| class_id = int(line_split[1]) | |
| if class_id not in map_class_id_to_class_name: | |
| raise ValueError(f"Found incorrect class id in predictions: {class_id}. Valid class ids are: {map_class_id_to_class_name.keys()}") | |
| # Validate object id | |
| object_id = int(line_split[2]) | |
| if object_id < 0: | |
| raise ValueError(f"Found incorrect object id in predictions: {object_id}. Object id should be positive.") | |
| # Validate frame id | |
| frame_id = int(line_split[3]) | |
| if frame_id < 0: | |
| raise ValueError(f"Found incorrect frame id in predictions: {frame_id}. Frame id should be 0 or positive.") | |
| if int(frame_id) >= int(num_frames_to_eval): | |
| continue | |
| pred_scenes.add(scene_id) | |
| if scene_id not in scene_pred_writers: | |
| os.makedirs(os.path.join(output_base_dir, f"scene_{scene_id}"), exist_ok=True) | |
| scene_pred_writers[scene_id] = open(os.path.join(output_base_dir, f"scene_{scene_id}", "pred.txt"), "w") | |
| scene_pred_writers[scene_id].write(line) | |
| # Close all Pred writers | |
| for writer in scene_pred_writers.values(): | |
| writer.close() | |
| # convert gt_scenes to a list and sort it | |
| pred_scenes = list(pred_scenes) | |
| pred_scenes.sort() | |
| logging.info(f"Found scenes {pred_scenes} in predictions.") | |
| def split_files_per_class(gt_path: str, pred_path: str, output_base_dir: str, confidence_threshold: float = 0.0, num_frames_to_eval:int = 20000, ground_truth_frame_offset_secs: float = 0.0, fps: float = 30.0): | |
| """ | |
| Splits GT and Pred files per class, saving them into separate directories. | |
| :param gt_path: Path to the ground truth JSON file. | |
| :param pred_path: Path to the predictions JSON file. | |
| :param output_base_dir: Base directory to save split files. | |
| """ | |
| # Create output base directory | |
| os.makedirs(output_base_dir, exist_ok=True) | |
| gt_classes = set() | |
| pred_classes = set() | |
| # Process GT data | |
| class_gt_writers = {} | |
| with open(gt_path, "r") as gt_file: | |
| for line in gt_file: | |
| line_split = line.split(" ") | |
| class_id = int(line_split[1]) | |
| class_name = map_class_id_to_class_name[class_id] | |
| gt_classes.add(class_name) | |
| if class_name not in class_gt_writers: | |
| os.makedirs(os.path.join(output_base_dir, class_name), exist_ok=True) | |
| class_gt_writers[class_name] = open(os.path.join(output_base_dir, class_name, "gt.txt"), "w") | |
| class_gt_writers[class_name].write(line) | |
| # Close all GT writers | |
| for writer in class_gt_writers.values(): | |
| writer.close() | |
| # convert gt_classes to a list and sort it | |
| gt_classes = list(gt_classes) | |
| gt_classes.sort() | |
| logging.info(f"Found classes {gt_classes} in ground truth.") | |
| # Process Pred data | |
| class_pred_writers = {} | |
| with open(pred_path, "r") as pred_file: | |
| for line in pred_file: | |
| line_split = line.split(" ") | |
| class_id = int(line_split[1]) | |
| class_name = map_class_id_to_class_name[class_id] | |
| pred_classes.add(class_name) | |
| if class_name not in class_pred_writers: | |
| os.makedirs(os.path.join(output_base_dir, class_name), exist_ok=True) | |
| class_pred_writers[class_name] = open(os.path.join(output_base_dir, class_name, "pred.txt"), "w") | |
| class_pred_writers[class_name].write(line) | |
| # Close all Pred writers | |
| for writer in class_pred_writers.values(): | |
| writer.close() | |
| # convert gt_classes to a list and sort it | |
| pred_classes = list(pred_classes) | |
| pred_classes.sort() | |
| logging.info(f"Found classes {pred_classes} in predictions.") | |
| def get_no_of_objects_per_scene(gt_path: str, scene_id_2_scene_name: Dict[int, str]): | |
| """ | |
| Get the number of objects per scene in the ground truth file. | |
| """ | |
| no_of_objects_per_scene = {} | |
| with open(gt_path, "r") as gt_file: | |
| for line in gt_file: | |
| line_split = line.split(" ") | |
| scene_id = line_split[0] | |
| if scene_id not in scene_id_2_scene_name: | |
| continue | |
| scene_name = scene_id_2_scene_name[scene_id] | |
| if scene_name not in no_of_objects_per_scene: | |
| no_of_objects_per_scene[scene_name] = 0 | |
| no_of_objects_per_scene[scene_name] += 1 | |
| return no_of_objects_per_scene | |
| def split_files_by_sensor(gt_path: str, pred_path: str, output_base_dir: str, map_camera_name_to_bev_name, confidence_threshold, num_frames_to_eval): | |
| """ | |
| Splits GT and Pred files by sensor and saves them into separate directories. | |
| :param gt_path: Path to the ground truth JSON file. | |
| :param pred_path: Path to the predictions JSON file. | |
| :param output_base_dir: Base directory to save split files. | |
| """ | |
| # Create output base directory | |
| os.makedirs(output_base_dir, exist_ok=True) | |
| # Set to keep track of unique sensor IDs | |
| gt_sensors = set() | |
| pred_sensors = set() | |
| # Create writers for GT data | |
| sensor_gt_writers = {} | |
| with open(gt_path, "r") as gt_file: | |
| for line in gt_file: | |
| if '"' not in line and "'" in line: | |
| line = line.replace("'", '"') | |
| data = json.loads(line) | |
| # Only eval frames below num_frames_to_eval | |
| if int(data['id']) >= num_frames_to_eval: | |
| continue | |
| cam_sensor_name = data['sensorId'] | |
| # Convert camera id to BEV sensor id | |
| bev_sensor_names = map_camera_name_to_bev_name[cam_sensor_name] | |
| for bev_sensor_name in bev_sensor_names: | |
| gt_sensors.add(bev_sensor_name) | |
| sensor_dir = os.path.join(output_base_dir, bev_sensor_name) | |
| os.makedirs(sensor_dir, exist_ok=True) | |
| gt_file_path = os.path.join(sensor_dir, "gt.json") | |
| if bev_sensor_name not in sensor_gt_writers: | |
| sensor_gt_writers[bev_sensor_name] = open(gt_file_path, "w") | |
| sensor_gt_writers[bev_sensor_name].write(json.dumps(data) + "\n") | |
| # Close all GT writers | |
| for writer in sensor_gt_writers.values(): | |
| writer.close() | |
| # Log found BEV sensors in GT | |
| logging.info(f"Found BEV sensors: {', '.join(sorted(gt_sensors))} in ground truth file.") | |
| # Create writers for Pred data | |
| sensor_pred_writers = {} | |
| with open(pred_path, "r") as pred_file: | |
| for line in pred_file: | |
| if '"' not in line and "'" in line: | |
| line = line.replace("'", '"') | |
| data = json.loads(line) | |
| # Only eval frames below num_frames_to_eval | |
| if int(data['id']) >= num_frames_to_eval: | |
| continue | |
| sensor_name = data['sensorId'] | |
| pred_sensors.add(sensor_name) | |
| sensor_dir = os.path.join(output_base_dir, sensor_name) | |
| os.makedirs(sensor_dir, exist_ok=True) | |
| if sensor_name not in sensor_pred_writers: | |
| pred_file_path = os.path.join(sensor_dir, "pred.json") | |
| sensor_pred_writers[sensor_name] = open(pred_file_path, "w") | |
| filtered_objects = [] | |
| for obj in data["objects"]: | |
| # Get the confidence value from bbox3d. | |
| confidence = obj["bbox3d"]["confidence"] | |
| if confidence >= confidence_threshold: | |
| filtered_objects.append(obj) | |
| # Replace the "objects" list with the filtered version. | |
| data["objects"] = filtered_objects | |
| sensor_pred_writers[sensor_name].write(json.dumps(data) + "\n") | |
| # Close all Pred writers | |
| for writer in sensor_pred_writers.values(): | |
| writer.close() | |
| # Log found BEV sensors in Prediction | |
| logging.info(f"Found BEV sensors: {', '.join(sorted(pred_sensors))} in prediction file.") | |
| print("") |