mohammed-aljafry commited on
Commit
cb510da
·
verified ·
1 Parent(s): 87e37f9

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +104 -138
app.py CHANGED
@@ -22,51 +22,32 @@ from logic import (
22
  # 1. إعدادات ومسارات النماذج
23
  # ==============================================================================
24
  WEIGHTS_DIR = "model"
 
25
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26
 
27
- # قاموس لتحديد الإعدادات الخاصة بكل نموذج.
28
  MODELS_SPECIFIC_CONFIGS = {
29
- "interfuser_baseline": {
30
- "rgb_backbone_name": "r50",
31
- "embed_dim": 256,
32
- "direct_concat": True,
33
- },
34
- "interfuser_lightweight": {
35
- "rgb_backbone_name": "r26",
36
- "embed_dim": 128,
37
- "enc_depth": 4,
38
- "dec_depth": 4,
39
- "direct_concat": True,
40
- }
41
  }
42
 
43
  def find_available_models():
44
- """
45
- تبحث في مجلد الأوزان وتعيد قائمة بأسماء النماذج المتاحة.
46
- """
47
- if not os.path.isdir(WEIGHTS_DIR):
48
- print(f"تحذير: مجلد الأوزان '{WEIGHTS_DIR}' غير موجود.")
49
- return []
50
  return [f.replace(".pth", "") for f in os.listdir(WEIGHTS_DIR) if f.endswith(".pth")]
51
 
52
  # ==============================================================================
53
- # 2. دالة تحميل النموذج (لا تستخدم متغيرات عامة)
54
  # ==============================================================================
 
 
55
  def load_model(model_name: str):
56
- """
57
- تبني وتحمل النموذج المختار وتُرجعه ككائن.
58
- """
59
  if not model_name or "لم يتم" in model_name:
60
  return None, "الرجاء اختيار نموذج صالح."
61
-
62
  weights_path = os.path.join(WEIGHTS_DIR, f"{model_name}.pth")
63
  print(f"Building model: '{model_name}'")
64
-
65
  model_config = MODELS_SPECIFIC_CONFIGS.get(model_name, {})
66
  model = build_interfuser_model(model_config)
67
-
68
  if not os.path.exists(weights_path):
69
- gr.Warning(f"ملف الأوزان '{weights_path}' غير موجود. النموذج سيعمل بأوزان عشوائية.")
70
  else:
71
  try:
72
  state_dic = torch.load(weights_path, map_location=device, weights_only=True)
@@ -74,40 +55,25 @@ def load_model(model_name: str):
74
  print(f"تم تحميل أوزان النموذج '{model_name}' بنجاح.")
75
  except Exception as e:
76
  gr.Warning(f"فشل تحميل الأوزان للنموذج '{model_name}': {e}.")
77
-
78
  model.to(device)
79
  model.eval()
80
-
81
- # إرجاع كائن النموذج نفسه + رسالة للمستخدم
82
  return model, f"تم تحميل نموذج: {model_name}"
83
 
84
- # ==============================================================================
85
- # 3. دالة التشغيل الرئيسية (تستقبل النموذج كمدخل)
86
- # ==============================================================================
87
  def run_single_frame(
88
- model_from_state, # <-- مدخل جديد من gr.State
89
- rgb_image_path,
90
- rgb_left_image_path,
91
- rgb_right_image_path,
92
- rgb_center_image_path,
93
- lidar_image_path,
94
- measurements_path,
95
- target_point_list
96
  ):
97
- # لم نعد نستخدم المتغير العام، بل نستخدم النموذج الذي تم تمريره
98
  if model_from_state is None:
99
- raise gr.Error("الرجاء اختيار وتحميل نموذج صالح أولاً من القائمة المنسدلة.")
100
-
101
  try:
102
- # --- 1. قراءة ومعالجة المدخلات ---
103
  if not (rgb_image_path and measurements_path):
104
  raise gr.Error("الرجاء توفير الصورة الأمامية وملف القياسات على الأقل.")
105
 
106
- rgb_image_pil = Image.open(rgb_image_path.name).convert("RGB")
107
- # بقية معالجة المدخلات
108
- rgb_left_pil = Image.open(rgb_left_image_path.name).convert("RGB") if rgb_left_image_path else rgb_image_pil
109
- rgb_right_pil = Image.open(rgb_right_image_path.name).convert("RGB") if rgb_right_image_path else rgb_image_pil
110
- rgb_center_pil = Image.open(rgb_center_image_path.name).convert("RGB") if rgb_center_image_path else rgb_image_pil
111
 
112
  front_tensor = transform(rgb_image_pil).unsqueeze(0).to(device)
113
  left_tensor = transform(rgb_left_pil).unsqueeze(0).to(device)
@@ -115,20 +81,19 @@ def run_single_frame(
115
  center_tensor = transform(rgb_center_pil).unsqueeze(0).to(device)
116
 
117
  if lidar_image_path:
118
- lidar_array = np.load(lidar_image_path.name)
119
  if lidar_array.max() > 0: lidar_array = (lidar_array / lidar_array.max()) * 255.0
120
  lidar_pil = Image.fromarray(lidar_array.astype(np.uint8)).convert('RGB')
121
  else:
122
  lidar_pil = Image.fromarray(np.zeros((112, 112, 3), dtype=np.uint8))
123
  lidar_tensor = lidar_transform(lidar_pil).unsqueeze(0).to(device)
124
 
125
- with open(measurements_path.name, 'r') as f: m_dict = json.load(f)
126
 
127
  measurements_tensor = torch.tensor([[
128
- m_dict.get('x', 0.0), m_dict.get('y', 0.0), m_dict.get('theta', 0.0),
129
- m_dict.get('speed', 5.0), m_dict.get('steer', 0.0), m_dict.get('throttle', 0.0),
130
- float(m_dict.get('brake', 0.0)), m_dict.get('command', 2.0),
131
- float(m_dict.get('is_junction', 0.0)), float(m_dict.get('should_brake', 0.0))
132
  ]], dtype=torch.float32).to(device)
133
 
134
  target_point_tensor = torch.tensor([target_point_list], dtype=torch.float32).to(device)
@@ -145,118 +110,121 @@ def run_single_frame(
145
  traffic, waypoints, is_junction, traffic_light, stop_sign, _ = outputs
146
 
147
  # --- 3. المعالجة اللاحقة والتصوّر ---
148
- speed = m_dict.get('speed', 5.0)
149
- pos, theta = [m_dict.get('x', 0.0), m_dict.get('y', 0.0)], m_dict.get('theta', 0.0)
 
 
 
150
 
151
- traffic_np = traffic[0].detach().cpu().numpy().reshape(20, 20, -1)
152
- waypoints_np = waypoints[0].detach().cpu().numpy() * WAYPOINT_SCALE_FACTOR
153
-
154
- tracker = Tracker()
155
- updated_traffic = tracker.update_and_predict(traffic_np.copy(), pos, theta, frame_num=0)
156
-
157
- controller = InterfuserController(ControllerConfig())
158
- steer, throttle, brake, metadata = controller.run_step(
159
- speed, waypoints_np, is_junction.sigmoid()[0, 1].item(),
160
- traffic_light.sigmoid()[0, 0].item(), stop_sign.sigmoid()[0, 1].item(), updated_traffic
161
- )
162
-
163
  map_t0, counts_t0 = render(updated_traffic, t=0)
164
  map_t1, counts_t1 = render(updated_traffic, t=T1_FUTURE_TIME)
165
  map_t2, counts_t2 = render(updated_traffic, t=T2_FUTURE_TIME)
166
-
167
  wp_map = render_waypoints(waypoints_np)
168
  self_car_map = render_self_car(np.array([0,0]), [math.cos(0), math.sin(0)], [4.0, 2.0])
169
-
170
  map_t0 = cv2.add(cv2.add(map_t0, wp_map), self_car_map)
171
  map_t0 = cv2.resize(map_t0, (400, 400))
172
  map_t1 = cv2.add(ensure_rgb(map_t1), ensure_rgb(self_car_map)); map_t1 = cv2.resize(map_t1, (200, 200))
173
  map_t2 = cv2.add(ensure_rgb(map_t2), ensure_rgb(self_car_map)); map_t2 = cv2.resize(map_t2, (200, 200))
174
-
175
  display = DisplayInterface()
176
  light_state, stop_sign_state = "Red" if traffic_light.sigmoid()[0,0].item() > 0.5 else "Green", "Yes" if stop_sign.sigmoid()[0,1].item() > 0.5 else "No"
177
-
178
- interface_data = {
179
- 'camera_view': np.array(rgb_image_pil), 'map_t0': map_t0, 'map_t1': map_t1, 'map_t2': map_t2,
180
- 'text_info': { 'Control': f"S:{steer:.2f} T:{throttle:.2f} B:{int(brake)}", 'Light': f"L: {light_state}", 'Stop': f"St: {stop_sign_state}" },
181
- 'object_counts': {'t0': counts_t0, 't1': counts_t1, 't2': counts_t2}
182
- }
183
-
184
  dashboard_image = display.run_interface(interface_data)
185
-
186
  # --- 4. تجهيز المخرجات ---
187
- result_dict = {
188
- "predicted_waypoints": waypoints_np.tolist(),
189
- "control_commands": {"steer": steer, "throttle": throttle, "brake": bool(brake)},
190
- "perception": {"traffic_light_status": light_state, "stop_sign_detected": (stop_sign_state == "Yes"), "is_at_junction_prob": round(is_junction.sigmoid()[0,1].item(), 3)},
191
- "metadata": {"speed_info": metadata[0], "perception_info": metadata[1], "stop_info": metadata[2], "safe_distance": metadata[3]}
192
- }
193
 
194
  return Image.fromarray(dashboard_image), result_dict
195
-
196
  except Exception as e:
197
  print(traceback.format_exc())
198
  raise gr.Error(f"حدث خطأ أثناء معالجة الإطار: {e}")
199
 
200
  # ==============================================================================
201
- # 4. تعريف واجهة Gradio
202
  # ==============================================================================
203
-
204
  available_models = find_available_models()
205
 
206
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
207
- gr.Markdown("# 🚗 محاكاة القيادة الذاتية باستخدام Interfuser")
208
-
209
  # مكون الحالة الخفي لتخزين النموذج الخاص بكل جلسة
210
  model_state = gr.State(value=None)
211
 
212
- with gr.Row():
213
- model_selector = gr.Dropdown(
214
- label="اختر النموذج من مجلد 'model'",
215
- choices=available_models,
216
- value=available_models[0] if available_models else "لم يتم العثور على نماذج"
217
- )
218
- status_textbox = gr.Textbox(label="حالة تحميل النموذج", interactive=False)
219
 
220
- # التحميل الأولي والتحميل عند التغيير
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  if available_models:
222
  demo.load(fn=load_model, inputs=model_selector, outputs=[model_state, status_textbox])
223
 
224
  model_selector.change(fn=load_model, inputs=model_selector, outputs=[model_state, status_textbox])
225
-
226
- gr.Markdown("---")
227
 
228
- with gr.Tabs():
229
- with gr.TabItem("نقطة نهاية API (إطار واحد)", id=1):
230
- gr.Markdown("### اختبار النموذج بإدخال مباشر")
231
-
232
- with gr.Row():
233
- with gr.Column(scale=1):
234
- gr.Markdown("#### المدخلات")
235
- api_rgb_image_path = gr.File(label="RGB (Front) File (.jpg, .png)")
236
- api_rgb_left_image_path = gr.File(label="RGB (Left) File (Optional)")
237
- api_rgb_right_image_path = gr.File(label="RGB (Right) File (Optional)")
238
- api_rgb_center_image_path = gr.File(label="RGB (Center) File (Optional)")
239
- api_lidar_image_path = gr.File(label="LiDAR File (.npy, Optional)")
240
- api_measurements_path = gr.File(label="Measurements File (.json)")
241
- api_target_point_list = gr.JSON(label="Target Point (List [x, y])", value=[0.0, 100.0])
242
- api_run_button = gr.Button("🚀 تشغيل إطار واحد", variant="primary")
243
-
244
- with gr.Column(scale=2):
245
- gr.Markdown("#### المخرجات")
246
- api_output_image = gr.Image(label="Dashboard Result", type="pil", interactive=False)
247
- api_output_json = gr.JSON(label="نتائج النموذج (JSON)")
248
-
249
- api_run_button.click(
250
- fn=run_single_frame,
251
- inputs=[
252
- model_state, # تمرير الحالة كأول مدخل
253
- api_rgb_image_path, api_rgb_left_image_path, api_rgb_right_image_path,
254
- api_rgb_center_image_path, api_lidar_image_path,
255
- api_measurements_path, api_target_point_list
256
- ],
257
- outputs=[api_output_image, api_output_json],
258
- api_name="run_single_frame"
259
- )
260
 
261
  # ==============================================================================
262
  # 5. تشغيل التطبيق
@@ -264,6 +232,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
264
  if __name__ == "__main__":
265
  if not available_models:
266
  print("تحذير: لم يتم العثور على أي ملفات نماذج (.pth) في مجلد 'model/weights'.")
267
- print("سيتم تشغيل الواجهة ولكن لن تتمكن من تحميل أي نموذج.")
268
- # .queue() ضروري للتعامل مع الجلسات المتعددة ب��كل صحيح
269
- demo.queue().launch(debug=True)
 
22
  # 1. إعدادات ومسارات النماذج
23
  # ==============================================================================
24
  WEIGHTS_DIR = "model"
25
+ EXAMPLES_DIR = "examples" # مجلد جديد للأمثلة
26
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
27
 
 
28
  MODELS_SPECIFIC_CONFIGS = {
29
+ "interfuser_baseline": { "rgb_backbone_name": "r50", "embed_dim": 256, "direct_concat": True },
30
+ "interfuser_lightweight": { "rgb_backbone_name": "r26", "embed_dim": 128, "enc_depth": 4, "dec_depth": 4, "direct_concat": True }
 
 
 
 
 
 
 
 
 
 
31
  }
32
 
33
  def find_available_models():
34
+ if not os.path.isdir(WEIGHTS_DIR): return []
 
 
 
 
 
35
  return [f.replace(".pth", "") for f in os.listdir(WEIGHTS_DIR) if f.endswith(".pth")]
36
 
37
  # ==============================================================================
38
+ # 2. الدوال الأساسية (load_model, run_single_frame)
39
  # ==============================================================================
40
+ # (هذه الدوال تبقى كما هي من الإصدار السابق الذي يدعم الجلسات)
41
+
42
  def load_model(model_name: str):
 
 
 
43
  if not model_name or "لم يتم" in model_name:
44
  return None, "الرجاء اختيار نموذج صالح."
 
45
  weights_path = os.path.join(WEIGHTS_DIR, f"{model_name}.pth")
46
  print(f"Building model: '{model_name}'")
 
47
  model_config = MODELS_SPECIFIC_CONFIGS.get(model_name, {})
48
  model = build_interfuser_model(model_config)
 
49
  if not os.path.exists(weights_path):
50
+ gr.Warning(f"ملف الأوزان '{weights_path}' غير موجود.")
51
  else:
52
  try:
53
  state_dic = torch.load(weights_path, map_location=device, weights_only=True)
 
55
  print(f"تم تحميل أوزان النموذج '{model_name}' بنجاح.")
56
  except Exception as e:
57
  gr.Warning(f"فشل تحميل الأوزان للنموذج '{model_name}': {e}.")
 
58
  model.to(device)
59
  model.eval()
 
 
60
  return model, f"تم تحميل نموذج: {model_name}"
61
 
 
 
 
62
  def run_single_frame(
63
+ model_from_state, rgb_image_path, rgb_left_image_path, rgb_right_image_path,
64
+ rgb_center_image_path, lidar_image_path, measurements_path, target_point_list
 
 
 
 
 
 
65
  ):
 
66
  if model_from_state is None:
67
+ raise gr.Error("الرجاء اختيار وتحميل نموذج صالح أولاً من القائمة.")
 
68
  try:
 
69
  if not (rgb_image_path and measurements_path):
70
  raise gr.Error("الرجاء توفير الصورة الأمامية وملف القياسات على الأقل.")
71
 
72
+ # --- 1. معالجة المدخلات ---
73
+ rgb_image_pil = Image.open(rgb_image_path).convert("RGB")
74
+ rgb_left_pil = Image.open(rgb_left_image_path).convert("RGB") if rgb_left_image_path else rgb_image_pil
75
+ rgb_right_pil = Image.open(rgb_right_image_path).convert("RGB") if rgb_right_image_path else rgb_image_pil
76
+ rgb_center_pil = Image.open(rgb_center_image_path).convert("RGB") if rgb_center_image_path else rgb_image_pil
77
 
78
  front_tensor = transform(rgb_image_pil).unsqueeze(0).to(device)
79
  left_tensor = transform(rgb_left_pil).unsqueeze(0).to(device)
 
81
  center_tensor = transform(rgb_center_pil).unsqueeze(0).to(device)
82
 
83
  if lidar_image_path:
84
+ lidar_array = np.load(lidar_image_path)
85
  if lidar_array.max() > 0: lidar_array = (lidar_array / lidar_array.max()) * 255.0
86
  lidar_pil = Image.fromarray(lidar_array.astype(np.uint8)).convert('RGB')
87
  else:
88
  lidar_pil = Image.fromarray(np.zeros((112, 112, 3), dtype=np.uint8))
89
  lidar_tensor = lidar_transform(lidar_pil).unsqueeze(0).to(device)
90
 
91
+ with open(measurements_path, 'r') as f: m_dict = json.load(f)
92
 
93
  measurements_tensor = torch.tensor([[
94
+ m_dict.get('x',0.0), m_dict.get('y',0.0), m_dict.get('theta',0.0), m_dict.get('speed',5.0),
95
+ m_dict.get('steer',0.0), m_dict.get('throttle',0.0), float(m_dict.get('brake',0.0)),
96
+ m_dict.get('command',2.0), float(m_dict.get('is_junction',0.0)), float(m_dict.get('should_brake',0.0))
 
97
  ]], dtype=torch.float32).to(device)
98
 
99
  target_point_tensor = torch.tensor([target_point_list], dtype=torch.float32).to(device)
 
110
  traffic, waypoints, is_junction, traffic_light, stop_sign, _ = outputs
111
 
112
  # --- 3. المعالجة اللاحقة والتصوّر ---
113
+ speed, pos, theta = m_dict.get('speed',5.0), [m_dict.get('x',0.0), m_dict.get('y',0.0)], m_dict.get('theta',0.0)
114
+ traffic_np, waypoints_np = traffic[0].detach().cpu().numpy().reshape(20,20,-1), waypoints[0].detach().cpu().numpy() * WAYPOINT_SCALE_FACTOR
115
+ tracker, controller = Tracker(), InterfuserController(ControllerConfig())
116
+ updated_traffic = tracker.update_and_predict(traffic_np.copy(), pos, theta, 0)
117
+ steer, throttle, brake, metadata = controller.run_step(speed, waypoints_np, is_junction.sigmoid()[0,1].item(), traffic_light.sigmoid()[0,0].item(), stop_sign.sigmoid()[0,1].item(), updated_traffic)
118
 
119
+ # ... (بقية الكود الخاص بالرسم والتصوّر لا يتغير) ...
 
 
 
 
 
 
 
 
 
 
 
120
  map_t0, counts_t0 = render(updated_traffic, t=0)
121
  map_t1, counts_t1 = render(updated_traffic, t=T1_FUTURE_TIME)
122
  map_t2, counts_t2 = render(updated_traffic, t=T2_FUTURE_TIME)
 
123
  wp_map = render_waypoints(waypoints_np)
124
  self_car_map = render_self_car(np.array([0,0]), [math.cos(0), math.sin(0)], [4.0, 2.0])
 
125
  map_t0 = cv2.add(cv2.add(map_t0, wp_map), self_car_map)
126
  map_t0 = cv2.resize(map_t0, (400, 400))
127
  map_t1 = cv2.add(ensure_rgb(map_t1), ensure_rgb(self_car_map)); map_t1 = cv2.resize(map_t1, (200, 200))
128
  map_t2 = cv2.add(ensure_rgb(map_t2), ensure_rgb(self_car_map)); map_t2 = cv2.resize(map_t2, (200, 200))
 
129
  display = DisplayInterface()
130
  light_state, stop_sign_state = "Red" if traffic_light.sigmoid()[0,0].item() > 0.5 else "Green", "Yes" if stop_sign.sigmoid()[0,1].item() > 0.5 else "No"
131
+ interface_data = {'camera_view': np.array(rgb_image_pil),'map_t0': map_t0,'map_t1': map_t1,'map_t2': map_t2,
132
+ 'text_info': {'Control': f"S:{steer:.2f} T:{throttle:.2f} B:{int(brake)}",'Light': f"L: {light_state}",'Stop': f"St: {stop_sign_state}"},
133
+ 'object_counts': {'t0': counts_t0,'t1': counts_t1,'t2': counts_t2}}
 
 
 
 
134
  dashboard_image = display.run_interface(interface_data)
135
+
136
  # --- 4. تجهيز المخرجات ---
137
+ result_dict = {"predicted_waypoints": waypoints_np.tolist(), "control_commands": {"steer": steer,"throttle": throttle,"brake": bool(brake)},
138
+ "perception": {"traffic_light_status": light_state,"stop_sign_detected": (stop_sign_state == "Yes"),"is_at_junction_prob": round(is_junction.sigmoid()[0,1].item(), 3)},
139
+ "metadata": {"speed_info": metadata[0],"perception_info": metadata[1],"stop_info": metadata[2],"safe_distance": metadata[3]}}
 
 
 
140
 
141
  return Image.fromarray(dashboard_image), result_dict
 
142
  except Exception as e:
143
  print(traceback.format_exc())
144
  raise gr.Error(f"حدث خطأ أثناء معالجة الإطار: {e}")
145
 
146
  # ==============================================================================
147
+ # 4. تعريف واجهة Gradio المحسّنة
148
  # ==============================================================================
 
149
  available_models = find_available_models()
150
 
151
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), css=".gradio-container {max-width: 95% !important;}") as demo:
 
 
152
  # مكون الحالة الخفي لتخزين النموذج الخاص بكل جلسة
153
  model_state = gr.State(value=None)
154
 
155
+ gr.Markdown("# 🚗 محاكاة القيادة الذاتية باستخدام Interfuser")
156
+ gr.Markdown("مرحباً بك في واجهة اختبار نموذج Interfuser. اتبع الخطوات أدناه لتشغيل المحاكاة على إطار واحد.")
 
 
 
 
 
157
 
158
+ with gr.Row():
159
+ # -- العمود الأيسر: الإعدادات والمدخلات --
160
+ with gr.Column(scale=1):
161
+ # --- الخطوة 1: اختيار النموذج ---
162
+ with gr.Box():
163
+ gr.Markdown("## ⚙️ الخطوة 1: اختر النموذج")
164
+ with gr.Row():
165
+ model_selector = gr.Dropdown(
166
+ label="النماذج المتاحة",
167
+ choices=available_models,
168
+ value=available_models[0] if available_models else "لم يتم العثور على نماذج"
169
+ )
170
+ status_textbox = gr.Textbox(label="حالة النموذج", interactive=False)
171
+
172
+ # --- الخطوة 2: رفع ملفات السيناريو ---
173
+ with gr.Box():
174
+ gr.Markdown("## 🗂️ الخطوة 2: ارفع ملفات السيناريو")
175
+
176
+ # المدخلات المطلوبة
177
+ with gr.Group():
178
+ gr.Markdown("**(مطلوب)**")
179
+ api_rgb_image_path = gr.File(label="صورة الكاميرا الأمامية (RGB)")
180
+ api_measurements_path = gr.File(label="ملف القياسات (JSON)")
181
+
182
+ # المدخلات الاختيارية
183
+ with gr.Accordion("📷 مدخلات اختيارية (كاميرات ومستشعرات إضافية)", open=False):
184
+ api_rgb_left_image_path = gr.File(label="كاميرا اليسار (RGB)")
185
+ api_rgb_right_image_path = gr.File(label="كاميرا اليمين (RGB)")
186
+ api_rgb_center_image_path = gr.File(label="كاميرا الوسط (RGB)")
187
+ api_lidar_image_path = gr.File(label="بيانات الليدار (NPY)")
188
+
189
+ api_target_point_list = gr.JSON(label="📍 النقطة المستهدفة (x, y)", value=[0.0, 100.0])
190
+
191
+ # زر التشغيل
192
+ api_run_button = gr.Button("🚀 شغل المحاكاة", variant="primary", scale=2)
193
+
194
+ # --- أمثلة جاهزة ---
195
+ with gr.Box():
196
+ gr.Markdown("### ✨ أمثلة جاهزة")
197
+ gr.Markdown("انقر على مثال لتعبئة الحقول تلقائياً (يتطلب وجود مجلد `examples` بنفس بنية البيانات).")
198
+ gr.Examples(
199
+ examples=[
200
+ [os.path.join(EXAMPLES_DIR, "sample1", "rgb.png"), None, None, None, None, os.path.join(EXAMPLES_DIR, "sample1", "measurements.json")],
201
+ [os.path.join(EXAMPLES_DIR, "sample2", "rgb.png"), None, None, None, None, os.path.join(EXAMPLES_DIR, "sample2", "measurements.json")]
202
+ ],
203
+ inputs=[api_rgb_image_path, api_rgb_left_image_path, api_rgb_right_image_path, api_rgb_center_image_path, api_lidar_image_path, api_measurements_path],
204
+ label="اختر سيناريو اختبار"
205
+ )
206
+
207
+ # -- العمود الأيمن: المخرجات --
208
+ with gr.Column(scale=2):
209
+ with gr.Box():
210
+ gr.Markdown("## 📊 الخطوة 3: شاهد النتائج")
211
+ api_output_image = gr.Image(label="لوحة التحكم المرئية (Dashboard)", type="pil", interactive=False)
212
+ with gr.Accordion("عرض نتائج JSON التفصيلية", open=False):
213
+ api_output_json = gr.JSON(label="النتائج المهيكلة (JSON)")
214
+
215
+ # --- ربط منطق الواجهة ---
216
  if available_models:
217
  demo.load(fn=load_model, inputs=model_selector, outputs=[model_state, status_textbox])
218
 
219
  model_selector.change(fn=load_model, inputs=model_selector, outputs=[model_state, status_textbox])
 
 
220
 
221
+ api_run_button.click(
222
+ fn=run_single_frame,
223
+ inputs=[model_state, api_rgb_image_path, api_rgb_left_image_path, api_rgb_right_image_path,
224
+ api_rgb_center_image_path, api_lidar_image_path, api_measurements_path, api_target_point_list],
225
+ outputs=[api_output_image, api_output_json],
226
+ api_name="run_single_frame"
227
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
 
229
  # ==============================================================================
230
  # 5. تشغيل التطبيق
 
232
  if __name__ == "__main__":
233
  if not available_models:
234
  print("تحذير: لم يتم العثور على أي ملفات نماذج (.pth) في مجلد 'model/weights'.")
235
+ demo.queue().launch(debug=True, share=True) # share=True لإنشاء رابط عام مؤقت