mohammed-aljafry commited on
Commit
fdd37bd
·
verified ·
1 Parent(s): f38952d

Upload logic.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. logic.py +495 -0
logic.py ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # logic.py
2
+
3
+ import os
4
+ import json
5
+ import cv2
6
+ import math
7
+ import numpy as np
8
+ from collections import deque
9
+ from pathlib import Path
10
+
11
+ import torch
12
+ from torch.utils.data import Dataset
13
+ from torchvision import transforms
14
+
15
+ # --- Constants & Configs ---
16
+ WAYPOINT_SCALE_FACTOR = 5.0
17
+ T1_FUTURE_TIME = 1.0
18
+ T2_FUTURE_TIME = 2.0
19
+ TRACKER_FREQUENCY = 10
20
+ MERGE_PERCENT = 0.4
21
+ PIXELS_PER_METER = 8
22
+ MAX_DISTANCE = 32
23
+ IMG_SIZE = MAX_DISTANCE * PIXELS_PER_METER * 2
24
+ EGO_CAR_X = IMG_SIZE // 2
25
+ EGO_CAR_Y = IMG_SIZE - (4.0 * PIXELS_PER_METER)
26
+ reweight_array = np.ones((20, 20, 7))
27
+ last_valid_waypoints = None
28
+ last_valid_theta = 0.0
29
+
30
+ class ControllerConfig:
31
+ turn_KP, turn_KI, turn_KD, turn_n = 1.0, 0.1, 0.1, 20
32
+ speed_KP, speed_KI, speed_KD, speed_n = 0.5, 0.05, 0.1, 20
33
+ max_speed, max_throttle, clip_delta = 6.0, 0.75, 0.25
34
+ collision_buffer, detect_threshold = [0.0, 0.0], 0.04
35
+ brake_speed, brake_ratio = 0.4, 1.1
36
+
37
+ # --- Data Handling ---
38
+ transform = transforms.Compose([
39
+ transforms.Resize((224, 224)),
40
+ transforms.ToTensor(),
41
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
42
+ ])
43
+
44
+ lidar_transform = transforms.Compose([
45
+ transforms.Resize((112, 112)),
46
+ transforms.ToTensor(),
47
+ transforms.Normalize(mean=[0.5], std=[0.5]),
48
+ ])
49
+
50
+ class LMDriveDataset(Dataset):
51
+ def __init__(self, data_dir, transform=None, lidar_transform=None):
52
+ self.data_dir = Path(data_dir)
53
+ self.transform = transform
54
+ self.lidar_transform = lidar_transform
55
+ self.samples = []
56
+
57
+ measurement_dir = self.data_dir / "measurements"
58
+ image_dir = self.data_dir / "rgb_full"
59
+
60
+ measurement_files = sorted([f for f in os.listdir(measurement_dir) if f.endswith(".json")])
61
+ image_files = sorted([f for f in os.listdir(image_dir) if f.endswith(".jpg")])
62
+
63
+ num_samples = min(len(measurement_files), len(image_files))
64
+
65
+ for i in range(num_samples):
66
+ frame_id = i
67
+ measurement_path = str(measurement_dir / f"{frame_id:04d}.json")
68
+ image_name = f"{frame_id:04d}.jpg"
69
+ image_path = str(image_dir / image_name)
70
+
71
+ if not os.path.exists(measurement_path) or not os.path.exists(image_path):
72
+ continue
73
+
74
+ with open(measurement_path, "r") as f:
75
+ measurements_data = json.load(f)
76
+
77
+ self.samples.append({
78
+ "image_path": image_path,
79
+ "measurement_path": measurement_path,
80
+ "frame_id": frame_id,
81
+ "measurements": measurements_data
82
+ })
83
+
84
+ def __len__(self):
85
+ return len(self.samples)
86
+
87
+ def __getitem__(self, idx):
88
+ sample = self.samples[idx]
89
+ full_image = cv2.imread(sample["image_path"])
90
+ if full_image is None:
91
+ raise ValueError(f"Failed to load image: {sample['image_path']}")
92
+ full_image = cv2.cvtColor(full_image, cv2.COLOR_BGR2RGB)
93
+
94
+ front_image = full_image[:600, :800]
95
+ left_image = full_image[600:1200, :800]
96
+ right_image = full_image[1200:1800, :800]
97
+ center_image = full_image[1800:2400, :800]
98
+
99
+ front_image_tensor = self.transform(front_image)
100
+ left_image_tensor = self.transform(left_image)
101
+ right_image_tensor = self.transform(right_image)
102
+ center_image_tensor = self.transform(center_image)
103
+
104
+ lidar_path = str(self.data_dir / "lidar" / f"{sample['frame_id']:04d}.png")
105
+ lidar = cv2.imread(lidar_path)
106
+ if lidar is None:
107
+ lidar = np.zeros((112, 112, 3), dtype=np.uint8)
108
+ else:
109
+ if len(lidar.shape) == 2:
110
+ lidar = cv2.cvtColor(lidar, cv2.COLOR_GRAY2BGR)
111
+ lidar = cv2.cvtColor(lidar, cv2.COLOR_BGR2RGB)
112
+ lidar_tensor = self.lidar_transform(lidar)
113
+
114
+ measurements_data = sample["measurements"]
115
+ x = measurements_data.get("x", 0.0)
116
+ y = measurements_data.get("y", 0.0)
117
+ theta = measurements_data.get("theta", 0.0)
118
+ speed = measurements_data.get("speed", 0.0)
119
+ steer = measurements_data.get("steer", 0.0)
120
+ throttle = measurements_data.get("throttle", 0.0)
121
+ brake = int(measurements_data.get("brake", False))
122
+ command = measurements_data.get("command", 0)
123
+ is_junction = int(measurements_data.get("is_junction", False))
124
+ should_brake = int(measurements_data.get("should_brake", 0))
125
+ x_command = measurements_data.get("x_command", 0.0)
126
+ y_command = measurements_data.get("y_command", 0.0)
127
+ target_point = torch.tensor([x_command, y_command], dtype=torch.float32)
128
+ measurements = torch.tensor(
129
+ [x, y, theta, speed, steer, throttle, brake, command, is_junction, should_brake],
130
+ dtype=torch.float32
131
+ )
132
+ return {
133
+ "rgb": front_image_tensor, "rgb_left": left_image_tensor, "rgb_right": right_image_tensor,
134
+ "rgb_center": center_image_tensor, "lidar": lidar_tensor,
135
+ "measurements": measurements, "target_point": target_point
136
+ }
137
+
138
+
139
+ # --- Object Tracking ---
140
+ class TrackedObject:
141
+ def __init__(self):
142
+ self.last_step = 0
143
+ self.last_pos = [0, 0]
144
+ self.historical_pos = deque(maxlen=10)
145
+ self.historical_steps = deque(maxlen=10)
146
+ self.historical_features = deque(maxlen=10)
147
+
148
+ def update(self, step, object_info):
149
+ self.last_step = step
150
+ self.last_pos = object_info[:2]
151
+ self.historical_pos.append(self.last_pos)
152
+ self.historical_steps.append(step)
153
+ if len(object_info) > 2:
154
+ self.historical_features.append(object_info[2])
155
+
156
+ class Tracker:
157
+ def __init__(self, frequency=10):
158
+ self.tracks = []
159
+ self.alive_ids = []
160
+ self.frequency = frequency
161
+
162
+ def update_and_predict(self, det_data, pos, theta, frame_num):
163
+ det_data_weighted = det_data * reweight_array
164
+ detected_objects = find_peak_box(det_data_weighted)
165
+ objects_info = []
166
+ R = np.array([[np.cos(-theta), -np.sin(-theta)], [np.sin(-theta), np.cos(-theta)]])
167
+
168
+ for obj in detected_objects:
169
+ i, j = obj['coords']
170
+ obj_data = obj['raw_data']
171
+ center_y, center_x = convert_grid_to_xy(i, j)
172
+ center_x += obj_data[1]
173
+ center_y += obj_data[2]
174
+ loc = R.T.dot(np.array([center_x, center_y]))
175
+ objects_info.append([loc[0] + pos[0], loc[1] + pos[1], obj_data[1:]])
176
+
177
+ updates_ids = self._update(objects_info, frame_num)
178
+ speed_results, heading_results = self._predict(updates_ids)
179
+
180
+ for k, poi in enumerate(updates_ids):
181
+ i, j = poi
182
+ if heading_results[k] is not None:
183
+ factor = MERGE_PERCENT * 0.1
184
+ det_data[i, j, 3] = heading_results[k] * factor + det_data[i, j, 3] * (1 - factor)
185
+ if speed_results[k] is not None:
186
+ factor = MERGE_PERCENT * 0.1
187
+ det_data[i, j, 6] = speed_results[k] * factor + det_data[i, j, 6] * (1 - factor)
188
+ return det_data
189
+
190
+ def _update(self, objects_info, step):
191
+ latest_ids = []
192
+ if len(self.tracks) == 0:
193
+ for object_info in objects_info:
194
+ to = TrackedObject()
195
+ to.update(step, object_info)
196
+ self.tracks.append(to)
197
+ latest_ids.append(len(self.tracks) - 1)
198
+ else:
199
+ matched_ids = set()
200
+ for idx, object_info in enumerate(objects_info):
201
+ min_id, min_error = -1, float('inf')
202
+ pos_x, pos_y = object_info[:2]
203
+ for _id in self.alive_ids:
204
+ if _id in matched_ids:
205
+ continue
206
+ track_pos = self.tracks[_id].last_pos
207
+ distance = np.sqrt((track_pos[0] - pos_x)**2 + (track_pos[1] - pos_y)**2)
208
+ if distance < 2.0 and distance < min_error:
209
+ min_error = distance
210
+ min_id = _id
211
+ if min_id != -1:
212
+ self.tracks[min_id].update(step, objects_info[idx])
213
+ latest_ids.append(min_id)
214
+ matched_ids.add(min_id)
215
+ else:
216
+ to = TrackedObject()
217
+ to.update(step, object_info)
218
+ self.tracks.append(to)
219
+ latest_ids.append(len(self.tracks) - 1)
220
+ self.alive_ids = [i for i, track in enumerate(self.tracks) if track.last_step > step - 6]
221
+ return latest_ids
222
+
223
+ def _predict(self, updates_ids):
224
+ speed_results, heading_results = [], []
225
+ for each_id in updates_ids:
226
+ to = self.tracks[each_id]
227
+ avg_speed, avg_heading = [], []
228
+ for feature in to.historical_features:
229
+ avg_speed.append(feature[2])
230
+ avg_heading.append(feature[:2])
231
+ if len(avg_speed) < 2:
232
+ speed_results.append(None)
233
+ heading_results.append(None)
234
+ continue
235
+ avg_speed = np.mean(avg_speed)
236
+ avg_heading = np.mean(np.stack(avg_heading), axis=0)
237
+ yaw_angle = get_yaw_angle(avg_heading)
238
+ heading_results.append((4 - yaw_angle / np.pi) % 2)
239
+ speed_results.append(avg_speed)
240
+ return speed_results, heading_results
241
+
242
+ # --- Control System ---
243
+ class PIDController:
244
+ def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20):
245
+ self._K_P = K_P
246
+ self._K_I = K_I
247
+ self._K_D = K_D
248
+ self._window = deque([0 for _ in range(n)], maxlen=n)
249
+ self._max = 0.0
250
+ self._min = 0.0
251
+
252
+ def step(self, error):
253
+ self._window.append(error)
254
+ self._max = max(self._max, abs(error))
255
+ self._min = -abs(self._max)
256
+ if len(self._window) >= 2:
257
+ integral = np.mean(self._window)
258
+ derivative = self._window[-1] - self._window[-2]
259
+ else:
260
+ integral = 0.0
261
+ derivative = 0.0
262
+ return self._K_P * error + self._K_I * integral + self._K_D * derivative
263
+
264
+ class InterfuserController:
265
+ def __init__(self, config):
266
+ self.turn_controller = PIDController(config.turn_KP, config.turn_KI, config.turn_KD, config.turn_n)
267
+ self.speed_controller = PIDController(config.speed_KP, config.speed_KI, config.speed_KD, config.speed_n)
268
+ self.config = config
269
+ self.collision_buffer = np.array(config.collision_buffer)
270
+ self.detect_threshold = config.detect_threshold
271
+ self.stop_steps = 0
272
+ self.forced_forward_steps = 0
273
+ self.red_light_steps = 0
274
+ self.block_red_light = 0
275
+ self.in_stop_sign_effect = False
276
+ self.block_stop_sign_distance = 0
277
+ self.stop_sign_timer = 0
278
+ self.stop_sign_trigger_times = 0
279
+
280
+ def run_step(self, speed, waypoints, junction, traffic_light_state, stop_sign, meta_data):
281
+ if speed < 0.2:
282
+ self.stop_steps += 1
283
+ else:
284
+ self.stop_steps = max(0, self.stop_steps - 10)
285
+ if speed < 0.06 and self.in_stop_sign_effect:
286
+ self.in_stop_sign_effect = False
287
+ if junction < 0.3:
288
+ self.stop_sign_trigger_times = 0
289
+ if traffic_light_state > 0.7:
290
+ self.red_light_steps += 1
291
+ else:
292
+ self.red_light_steps = 0
293
+ if self.red_light_steps > 1000:
294
+ self.block_red_light = 80
295
+ self.red_light_steps = 0
296
+ if self.block_red_light > 0:
297
+ self.block_red_light -= 1
298
+ traffic_light_state = 0.01
299
+ if stop_sign < 0.6 and self.block_stop_sign_distance < 0.1:
300
+ self.in_stop_sign_effect = True
301
+ self.block_stop_sign_distance = 2.0
302
+ self.stop_sign_trigger_times = 3
303
+ self.block_stop_sign_distance = max(0, self.block_stop_sign_distance - 0.05 * speed)
304
+ if self.block_stop_sign_distance < 0.1:
305
+ if self.stop_sign_trigger_times > 0:
306
+ self.block_stop_sign_distance = 2.0
307
+ self.stop_sign_trigger_times -= 1
308
+ self.in_stop_sign_effect = True
309
+ aim = (waypoints[1] + waypoints[0]) / 2.0
310
+ angle = np.degrees(np.pi / 2 - np.arctan2(aim[1], aim[0])) / 90
311
+ if speed < 0.01:
312
+ angle = 0
313
+ steer = self.turn_controller.step(angle)
314
+ steer = np.clip(steer, -1.0, 1.0)
315
+
316
+ desired_speed = self.config.max_speed
317
+
318
+ downsampled_waypoints = downsample_waypoints(waypoints)
319
+ safe_dis = get_max_safe_distance(meta_data, downsampled_waypoints, t=0, collision_buffer=self.collision_buffer, threshold=self.detect_threshold)
320
+
321
+ if traffic_light_state > 0.5 or (stop_sign > 0.6 and self.stop_sign_timer < 20):
322
+ desired_speed = 0.0
323
+ if stop_sign > 0.6:
324
+ self.stop_sign_timer += 1
325
+ else:
326
+ self.stop_sign_timer = 0
327
+
328
+ brake = speed > desired_speed * self.config.brake_ratio
329
+
330
+ delta = np.clip(desired_speed - speed, 0.0, self.config.clip_delta)
331
+ throttle = self.speed_controller.step(delta)
332
+ throttle = np.clip(throttle, 0.0, self.config.max_throttle)
333
+
334
+ meta_info_1 = f"speed: {speed:.2f}, target_speed: {desired_speed:.2f}"
335
+ meta_info_2 = f"on_road_prob: {junction:.2f}, red_light_prob: {traffic_light_state:.2f}, stop_sign_prob: {1 - stop_sign:.2f}"
336
+ meta_info_3 = f"stop_steps: {self.stop_steps}, block_stop_sign_distance: {self.block_stop_sign_distance:.1f}"
337
+
338
+ if self.stop_steps > 1200:
339
+ self.forced_forward_steps = 12
340
+ self.stop_steps = 0
341
+ if self.forced_forward_steps > 0:
342
+ throttle = 0.8
343
+ brake = False
344
+ self.forced_forward_steps -= 1
345
+ if self.in_stop_sign_effect:
346
+ throttle = 0
347
+ brake = True
348
+
349
+ return steer, throttle, brake, (meta_info_1, meta_info_2, meta_info_3, safe_dis)
350
+
351
+
352
+ # --- Visualization & Helper Functions ---
353
+ class DisplayInterface:
354
+ def __init__(self, width=1200, height=600):
355
+ self._width = width
356
+ self._height = height
357
+
358
+ def run_interface(self, data):
359
+ dashboard = np.zeros((self._height, self._width, 3), dtype=np.uint8)
360
+ font = cv2.FONT_HERSHEY_SIMPLEX
361
+ dashboard[:, :800] = cv2.resize(data.get('camera_view'), (800, 600))
362
+ dashboard[:400, 800:1200] = cv2.resize(data['map_t0'], (400, 400))
363
+ dashboard[400:600, 800:1000] = cv2.resize(data['map_t1'], (200, 200))
364
+ dashboard[400:600, 1000:1200] = cv2.resize(data['map_t2'], (200, 200))
365
+ cv2.line(dashboard, (800, 0), (800, 600), (255, 255, 255), 2)
366
+ cv2.line(dashboard, (800, 400), (1200, 400), (255, 255, 255), 2)
367
+ cv2.line(dashboard, (1000, 400), (1000, 600), (255, 255, 255), 2)
368
+ y_pos = 40
369
+ for key, text in data['text_info'].items():
370
+ cv2.putText(dashboard, text, (820, y_pos), font, 0.6, (255, 255, 255), 1)
371
+ y_pos += 30
372
+ y_pos += 10
373
+ for t, counts in data['object_counts'].items():
374
+ count_str = f"{t}: C={counts['car']} B={counts['bike']} P={counts['pedestrian']}"
375
+ cv2.putText(dashboard, count_str, (820, y_pos), font, 0.5, (255, 255, 255), 1)
376
+ y_pos += 20
377
+ cv2.putText(dashboard, "t0", (1160, 30), font, 0.8, (0, 255, 255), 2)
378
+ cv2.putText(dashboard, "t1", (960, 430), font, 0.8, (0, 255, 255), 2)
379
+ cv2.putText(dashboard, "t2", (1160, 430), font, 0.8, (0, 255, 255), 2)
380
+ return dashboard
381
+
382
+ def get_yaw_angle(forward_vector):
383
+ forward_vector = forward_vector / np.linalg.norm(forward_vector)
384
+ return math.atan2(forward_vector[1], forward_vector[0])
385
+
386
+ def ensure_rgb(image):
387
+ if len(image.shape) == 2 or image.shape[2] == 1:
388
+ return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
389
+ return image
390
+
391
+ def convert_grid_to_xy(i, j):
392
+ return (j - 9.5) * 1.6, (19.5 - i) * 1.6
393
+
394
+ def find_peak_box(data):
395
+ det_data = np.zeros((22, 22, 7))
396
+ det_data[1:21, 1:21] = data
397
+ detected_objects = []
398
+ for i in range(1, 21):
399
+ for j in range(1, 21):
400
+ if det_data[i, j, 0] > 0.6 and (det_data[i, j, 0] > det_data[i, j - 1, 0] and det_data[i, j, 0] > det_data[i, j + 1, 0] and det_data[i, j, 0] > det_data[i - 1, j, 0] and det_data[i, j, 0] > det_data[i + 1, j, 0]):
401
+ length, width, confidence = det_data[i, j, 4], det_data[i, j, 5], det_data[i, j, 0]
402
+ obj_type = 'unknown'
403
+ if length > 4.0: obj_type = 'car'
404
+ elif length / width > 1.5: obj_type = 'bike'
405
+ else: obj_type = 'pedestrian'
406
+ detected_objects.append({'coords': (i - 1, j - 1), 'type': obj_type, 'confidence': confidence, 'raw_data': det_data[i, j]})
407
+ return detected_objects
408
+
409
+ def add_rect(img, loc, ori, box, value, color):
410
+ center_x = int(loc[0] * PIXELS_PER_METER + MAX_DISTANCE * PIXELS_PER_METER)
411
+ center_y = int(loc[1] * PIXELS_PER_METER + MAX_DISTANCE * PIXELS_PER_METER)
412
+ size_px = (int(box[0] * PIXELS_PER_METER), int(box[1] * PIXELS_PER_METER))
413
+ angle_deg = -np.degrees(math.atan2(ori[1], ori[0]))
414
+ box_points = cv2.boxPoints(((center_x, center_y), size_px, angle_deg))
415
+ cv2.fillConvexPoly(img, np.int32(box_points), [int(x * value) for x in color])
416
+ return img
417
+
418
+ def render(det_data, t=0):
419
+ CLASS_COLORS = {'car': (0, 0, 255), 'bike': (0, 255, 0), 'pedestrian': (255, 0, 0), 'unknown': (128, 128, 128)}
420
+ det_weighted = det_data * reweight_array
421
+ detected_objects = find_peak_box(det_weighted)
422
+ counts = {cls: 0 for cls in CLASS_COLORS.keys()}
423
+ [counts.update({obj['type']: counts.get(obj['type'], 0) + 1}) for obj in detected_objects]
424
+ img = np.zeros((IMG_SIZE, IMG_SIZE, 3), np.uint8)
425
+ for obj in detected_objects:
426
+ i, j = obj['coords']
427
+ obj_data = obj['raw_data']
428
+ speed, theta = obj_data[6], obj_data[3] * np.pi
429
+ ori = np.array([math.cos(theta), math.sin(theta)])
430
+ loc_x = obj_data[1] + t * speed * ori[0] + convert_grid_to_xy(i, j)[0]
431
+ loc_y = obj_data[2] - t * speed * ori[1] + convert_grid_to_xy(i, j)[1]
432
+ box = np.array([obj_data[4], obj_data[5]]) * (1.5 if obj['type'] == 'pedestrian' else 1.0)
433
+ add_rect(img, np.array([loc_x, loc_y]), ori, box, obj['confidence'], CLASS_COLORS[obj['type']])
434
+ return img, counts
435
+
436
+ def render_self_car(loc, ori, box, pixels_per_meter=PIXELS_PER_METER):
437
+ img = np.zeros((IMG_SIZE, IMG_SIZE, 3), np.uint8)
438
+ center_x = int(loc[0] * pixels_per_meter + MAX_DISTANCE * pixels_per_meter)
439
+ center_y = int(loc[1] * pixels_per_meter + MAX_DISTANCE * pixels_per_meter)
440
+ size_px = (int(box[0] * pixels_per_meter), int(box[1] * pixels_per_meter))
441
+ angle_deg = -np.degrees(math.atan2(ori[1], ori[0]))
442
+ box_points = np.int32(cv2.boxPoints(((center_x, center_y), size_px, angle_deg)))
443
+ cv2.fillConvexPoly(img, box_points, (0, 255, 255))
444
+ return img
445
+
446
+ def render_waypoints(waypoints, pixels_per_meter=PIXELS_PER_METER):
447
+ global last_valid_waypoints
448
+ img = np.zeros((IMG_SIZE, IMG_SIZE, 3), np.uint8)
449
+ current_waypoints = waypoints if waypoints is not None and len(waypoints) > 2 else last_valid_waypoints
450
+ if current_waypoints is not None:
451
+ last_valid_waypoints = current_waypoints
452
+ for i, point in enumerate(current_waypoints):
453
+ px = int(EGO_CAR_X + point[1] * pixels_per_meter)
454
+ py = int(EGO_CAR_Y - point[0] * pixels_per_meter)
455
+ color = (0, 0, 255) if i == len(current_waypoints) - 1 else (0, 255, 0)
456
+ cv2.circle(img, (px, py), 4, color, -1)
457
+ return img
458
+
459
+ def collision_detections(map1, map2, threshold=0.04):
460
+ if len(map2.shape) == 3 and map2.shape[2] == 3:
461
+ map2 = cv2.cvtColor(map2, cv2.COLOR_BGR2GRAY)
462
+ assert map1.shape == map2.shape
463
+ overlap_map = (map1 > 0.01) & (map2 > 0.01)
464
+ return float(np.sum(overlap_map)) / np.sum(map2 > 0) < threshold
465
+
466
+ def get_max_safe_distance(meta_data, downsampled_waypoints, t, collision_buffer, threshold):
467
+ surround_map = meta_data.reshape(20, 20, 7)[..., :3][..., 0]
468
+ if np.sum(surround_map) < 1:
469
+ return np.linalg.norm(downsampled_waypoints[-3])
470
+ hero_bounding_box = np.array([2.45, 1.0]) + collision_buffer
471
+ safe_distance = 0.0
472
+ for i in range(len(downsampled_waypoints) - 2):
473
+ aim = (downsampled_waypoints[i + 1] + downsampled_waypoints[i + 2]) / 2.0
474
+ loc, ori = downsampled_waypoints[i], aim - downsampled_waypoints[i]
475
+ self_car_map = render_self_car(loc=loc, ori=ori, box=hero_bounding_box, pixels_per_meter=PIXELS_PER_METER)
476
+ self_car_map_gray = cv2.cvtColor(cv2.resize(self_car_map, (20, 20)), cv2.COLOR_BGR2GRAY)
477
+ if not collision_detections(surround_map, self_car_map_gray, threshold):
478
+ break
479
+ safe_distance = max(safe_distance, np.linalg.norm(loc))
480
+ return safe_distance
481
+
482
+ def downsample_waypoints(waypoints, precision=0.2):
483
+ downsampled_waypoints = []
484
+ last_waypoint = np.array([0.0, 0.0])
485
+ for i in range(len(waypoints)):
486
+ now_waypoint = waypoints[i]
487
+ dis = np.linalg.norm(now_waypoint - last_waypoint)
488
+ if dis > precision:
489
+ interval = int(dis / precision)
490
+ move_vector = (now_waypoint - last_waypoint) / (interval + 1)
491
+ for j in range(interval):
492
+ downsampled_waypoints.append(last_waypoint + move_vector * (j + 1))
493
+ downsampled_waypoints.append(now_waypoint)
494
+ last_waypoint = now_waypoint
495
+ return downsampled_waypoints