altawil commited on
Commit
ca76580
·
verified ·
1 Parent(s): 49410d8

Update simulation_modules.py

Browse files
Files changed (1) hide show
  1. simulation_modules.py +832 -318
simulation_modules.py CHANGED
@@ -1,336 +1,850 @@
1
- # simulation_modules.py
2
-
3
- import torch
4
  import numpy as np
 
5
  import cv2
6
  import math
7
- from collections import deque
8
- from typing import List, Tuple, Dict, Any, Optional
9
-
10
- # ================== Constants ==================
11
- WAYPOINT_SCALE_FACTOR = 5.0
12
- T1_FUTURE_TIME = 1.0
13
- T2_FUTURE_TIME = 2.0
14
- PIXELS_PER_METER = 8
15
- MAX_DISTANCE = 32
16
- IMG_SIZE = MAX_DISTANCE * PIXELS_PER_METER * 2
17
- EGO_CAR_X = IMG_SIZE // 2
18
- EGO_CAR_Y = IMG_SIZE - (4.0 * PIXELS_PER_METER)
19
-
20
- COLORS = {
21
- 'vehicle': [255, 0, 0],
22
- 'pedestrian': [0, 255, 0],
23
- 'cyclist': [0, 0, 255],
24
- 'waypoint': [255, 255, 0],
25
- 'ego_car': [255, 255, 255]
26
- }
27
-
28
- # ================== PID Controller ==================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  class PIDController:
 
30
  def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20):
31
- self._K_P = K_P
32
- self._K_I = K_I
33
- self._K_D = K_D
34
  self._window = deque([0 for _ in range(n)], maxlen=n)
35
-
36
  def step(self, error):
37
  self._window.append(error)
38
- if len(self._window) >= 2:
39
- integral = np.mean(self._window)
40
- derivative = self._window[-1] - self._window[-2]
41
- else:
42
- integral = derivative = 0.0
43
  return self._K_P * error + self._K_I * integral + self._K_D * derivative
 
 
 
44
 
45
- # ================== Helper Functions ==================
46
- def ensure_rgb(image):
47
- if len(image.shape) == 2:
48
- return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
49
- elif image.shape[2] == 1:
50
- return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
51
- return image
52
-
53
- def add_rect(img, loc, ori, box, value, color):
54
- center_x = int(loc[0] * PIXELS_PER_METER + MAX_DISTANCE * PIXELS_PER_METER)
55
- center_y = int(loc[1] * PIXELS_PER_METER + MAX_DISTANCE * PIXELS_PER_METER)
56
- size_px = (int(box[0] * PIXELS_PER_METER), int(box[1] * PIXELS_PER_METER))
57
- angle_deg = -np.degrees(math.atan2(ori[1], ori[0]))
58
- box_points = cv2.boxPoints(((center_x, center_y), size_px, angle_deg))
59
- box_points = np.int32(box_points)
60
- adjusted_color = [int(c * value) for c in color]
61
- cv2.fillConvexPoly(img, box_points, adjusted_color)
62
- return img
63
-
64
- def render(traffic_grid, t=0):
65
- img = np.zeros((IMG_SIZE, IMG_SIZE, 3), dtype=np.uint8)
66
- counts = {'vehicles': 0, 'pedestrians': 0, 'cyclists': 0}
67
-
68
- if isinstance(traffic_grid, torch.Tensor):
69
- traffic_grid = traffic_grid.cpu().numpy()
70
-
71
- h, w, c = traffic_grid.shape
72
- for y in range(h):
73
- for x in range(w):
74
- for ch in range(c):
75
- if traffic_grid[y, x, ch] > 0.1:
76
- world_x = (x / w - 0.5) * MAX_DISTANCE * 2
77
- world_y = (y / h - 0.5) * MAX_DISTANCE * 2
78
-
79
- if ch < 3:
80
- color = COLORS['vehicle']
81
- counts['vehicles'] += 1
82
- box_size = [2.0, 4.0]
83
- elif ch < 5:
84
- color = COLORS['pedestrian']
85
- counts['pedestrians'] += 1
86
- box_size = [0.8, 0.8]
87
- else:
88
- color = COLORS['cyclist']
89
- counts['cyclists'] += 1
90
- box_size = [1.2, 2.0]
91
-
92
- img = add_rect(img, [world_x, world_y], [1.0, 0.0],
93
- box_size, traffic_grid[y, x, ch], color)
94
-
95
- return img, counts
96
-
97
- def render_waypoints(waypoints, scale_factor=WAYPOINT_SCALE_FACTOR):
98
- img = np.zeros((IMG_SIZE, IMG_SIZE, 3), dtype=np.uint8)
99
-
100
- if isinstance(waypoints, torch.Tensor):
101
- waypoints = waypoints.cpu().numpy()
102
-
103
- scaled_waypoints = waypoints * scale_factor
104
-
105
- for i, wp in enumerate(scaled_waypoints):
106
- px = int(wp[0] * PIXELS_PER_METER + IMG_SIZE // 2)
107
- py = int(wp[1] * PIXELS_PER_METER + IMG_SIZE // 2)
108
-
109
- if 0 <= px < IMG_SIZE and 0 <= py < IMG_SIZE:
110
- radius = max(3, 8 - i)
111
- cv2.circle(img, (px, py), radius, COLORS['waypoint'], -1)
112
-
113
- if i > 0:
114
- prev_px = int(scaled_waypoints[i-1][0] * PIXELS_PER_METER + IMG_SIZE // 2)
115
- prev_py = int(scaled_waypoints[i-1][1] * PIXELS_PER_METER + IMG_SIZE // 2)
116
- if 0 <= prev_px < IMG_SIZE and 0 <= prev_py < IMG_SIZE:
117
- cv2.line(img, (prev_px, prev_py), (px, py), COLORS['waypoint'], 2)
118
-
119
- return img
120
-
121
- def render_self_car(img):
122
- car_pos = [0, -4.0]
123
- car_ori = [1.0, 0.0]
124
- car_size = [2.0, 4.5]
125
- return add_rect(img, car_pos, car_ori, car_size, 1.0, COLORS['ego_car'])
126
-
127
- # ================== Tracker Classes ==================
128
- class TrackedObject:
129
- def __init__(self, obj_id: int):
130
- self.id = obj_id
131
- self.last_step = 0
132
- self.last_pos = [0.0, 0.0]
133
- self.historical_pos = []
134
- self.historical_steps = []
135
- self.velocity = [0.0, 0.0]
136
- self.confidence = 1.0
137
-
138
- def update(self, step: int, obj_info: List[float]):
139
- self.last_step = step
140
- self.last_pos = obj_info[:2]
141
- self.historical_pos.append(obj_info[:2])
142
- self.historical_steps.append(step)
143
-
144
- if len(self.historical_pos) >= 2:
145
- dt = self.historical_steps[-1] - self.historical_steps[-2]
146
- if dt > 0:
147
- dx = self.historical_pos[-1][0] - self.historical_pos[-2][0]
148
- dy = self.historical_pos[-1][1] - self.historical_pos[-2][1]
149
- self.velocity = [dx/dt, dy/dt]
150
-
151
- def predict_position(self, future_time: float) -> List[float]:
152
- predicted_x = self.last_pos[0] + self.velocity[0] * future_time
153
- predicted_y = self.last_pos[1] + self.velocity[1] * future_time
154
- return [predicted_x, predicted_y]
155
-
156
- def is_alive(self, current_step: int, max_age: int = 5) -> bool:
157
- return (current_step - self.last_step) <= max_age
158
 
159
- class Tracker:
160
- def __init__(self, frequency: int = 10):
161
- self.tracks: List[TrackedObject] = []
162
- self.frequency = frequency
163
- self.next_id = 0
164
- self.current_step = 0
165
-
166
- def update_and_predict(self, detections: List[Dict], step: int) -> np.ndarray:
167
- self.current_step = step
168
-
169
- for detection in detections:
170
- pos = detection.get('position', [0, 0])
171
- feature = detection.get('feature', 0.5)
172
-
173
- best_match = None
174
- min_distance = float('inf')
175
-
176
- for track in self.tracks:
177
- if track.is_alive(step):
178
- distance = np.linalg.norm(np.array(pos) - np.array(track.last_pos))
179
- if distance < min_distance and distance < 2.0:
180
- min_distance = distance
181
- best_match = track
182
-
183
- if best_match:
184
- best_match.update(step, pos + [feature])
185
- else:
186
- new_track = TrackedObject(self.next_id)
187
- new_track.update(step, pos + [feature])
188
- self.tracks.append(new_track)
189
- self.next_id += 1
190
-
191
- self.tracks = [t for t in self.tracks if t.is_alive(step)]
192
- return self._generate_prediction_grid()
193
-
194
- def _generate_prediction_grid(self) -> np.ndarray:
195
- grid = np.zeros((20, 20, 7), dtype=np.float32)
196
-
197
- for track in self.tracks:
198
- if track.is_alive(self.current_step):
199
- current_pos = track.last_pos
200
- future_pos_t1 = track.predict_position(T1_FUTURE_TIME)
201
- future_pos_t2 = track.predict_position(T2_FUTURE_TIME)
202
-
203
- for pos in [current_pos, future_pos_t1, future_pos_t2]:
204
- grid_x = int((pos[0] / (MAX_DISTANCE * 2) + 0.5) * 20)
205
- grid_y = int((pos[1] / (MAX_DISTANCE * 2) + 0.5) * 20)
206
-
207
- if 0 <= grid_x < 20 and 0 <= grid_y < 20:
208
- channel = 0
209
- grid[grid_y, grid_x, channel] = max(grid[grid_y, grid_x, channel], track.confidence)
210
-
211
- return grid
212
-
213
- # ================== Controller Classes ==================
214
- class ControllerConfig:
215
- def __init__(self):
216
- self.turn_KP = 1.0
217
- self.turn_KI = 0.1
218
- self.turn_KD = 0.1
219
- self.turn_n = 20
220
-
221
- self.speed_KP = 0.5
222
- self.speed_KI = 0.05
223
- self.speed_KD = 0.1
224
- self.speed_n = 20
225
-
226
- self.max_speed = 6.0
227
- self.max_throttle = 0.75
228
- self.clip_delta = 0.25
229
-
230
- self.brake_speed = 0.4
231
- self.brake_ratio = 1.1
232
 
233
  class InterfuserController:
234
- def __init__(self, config: ControllerConfig):
235
- self.config = config
236
- self.turn_controller = PIDController(config.turn_KP, config.turn_KI, config.turn_KD, config.turn_n)
237
- self.speed_controller = PIDController(config.speed_KP, config.speed_KI, config.speed_KD, config.speed_n)
238
- self.last_steer = 0.0
239
- self.last_throttle = 0.0
240
- self.target_speed = 3.0
241
-
242
- def run_step(self, current_speed: float, waypoints: np.ndarray,
243
- junction: float, traffic_light_state: float,
244
- stop_sign: float, meta_data: Dict) -> Tuple[float, float, bool, str]:
245
-
246
- if isinstance(waypoints, torch.Tensor):
247
- waypoints = waypoints.cpu().numpy()
248
-
249
- if len(waypoints) > 1:
250
- dx = waypoints[1][0] - waypoints[0][0]
251
- dy = waypoints[1][1] - waypoints[0][1]
252
- target_yaw = math.atan2(dy, dx)
253
- steer = self.turn_controller.step(target_yaw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
  else:
255
- steer = 0.0
256
-
257
- steer = np.clip(steer, -1.0, 1.0)
258
-
259
- target_speed = self.target_speed
260
- if junction > 0.5:
261
- target_speed *= 0.7
262
- if abs(steer) > 0.3:
263
- target_speed *= 0.8
264
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  speed_error = target_speed - current_speed
266
- throttle = self.speed_controller.step(speed_error)
267
- throttle = np.clip(throttle, 0.0, self.config.max_throttle)
268
-
269
- brake = False
270
- if traffic_light_state > 0.5 or stop_sign > 0.5 or current_speed > self.config.max_speed:
271
- brake = True
272
- throttle = 0.0
273
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
  self.last_steer = steer
275
- self.last_throttle = throttle
276
-
277
- metadata = f"Speed:{current_speed:.1f} Target:{target_speed:.1f} Junction:{junction:.2f}"
278
-
279
- return steer, throttle, brake, metadata
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
- # ================== Display Interface ==================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
  class DisplayInterface:
283
- def __init__(self, width: int = 1200, height: int = 600):
284
- self._width = width
285
- self._height = height
286
- self.camera_width = width // 2
287
- self.camera_height = height
288
- self.map_width = width // 2
289
- self.map_height = height // 3
290
-
291
- def run_interface(self, data: Dict[str, Any]) -> np.ndarray:
292
- dashboard = np.zeros((self._height, self._width, 3), dtype=np.uint8)
293
-
294
- # Camera view
295
- camera_view = data.get('camera_view')
296
- if camera_view is not None:
297
- camera_resized = cv2.resize(camera_view, (self.camera_width, self.camera_height))
298
- dashboard[:, :self.camera_width] = camera_resized
299
-
300
- # Maps
301
- map_start_x = self.camera_width
302
-
303
- map_t0 = data.get('map_t0')
304
- if map_t0 is not None:
305
- map_resized = cv2.resize(map_t0, (self.map_width, self.map_height))
306
- dashboard[:self.map_height, map_start_x:] = map_resized
307
- cv2.putText(dashboard, "Current (t=0)", (map_start_x + 10, 30),
308
- cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
309
-
310
- map_t1 = data.get('map_t1')
311
- if map_t1 is not None:
312
- map_resized = cv2.resize(map_t1, (self.map_width, self.map_height))
313
- y_start = self.map_height
314
- dashboard[y_start:y_start + self.map_height, map_start_x:] = map_resized
315
- cv2.putText(dashboard, f"Future (t={T1_FUTURE_TIME}s)",
316
- (map_start_x + 10, y_start + 30), cv2.FONT_HERSHEY_SIMPLEX,
317
- 0.7, (255, 255, 255), 2)
318
-
319
- map_t2 = data.get('map_t2')
320
- if map_t2 is not None:
321
- map_resized = cv2.resize(map_t2, (self.map_width, self.map_height))
322
- y_start = self.map_height * 2
323
- dashboard[y_start:, map_start_x:] = map_resized
324
- cv2.putText(dashboard, f"Future (t={T2_FUTURE_TIME}s)",
325
- (map_start_x + 10, y_start + 30), cv2.FONT_HERSHEY_SIMPLEX,
326
- 0.7, (255, 255, 255), 2)
327
-
328
- # Text info
329
- text_info = data.get('text_info', {})
330
- y_offset = 50
331
- for key, value in text_info.items():
332
- cv2.putText(dashboard, value, (10, y_offset), cv2.FONT_HERSHEY_SIMPLEX,
333
- 0.6, (0, 255, 0), 2)
334
- y_offset += 30
335
-
336
- return dashboard
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ===================================================================
2
+ # ملف: utils.py (نسخة v2.0 - متوافقة مع الخرائط الديناميكية)
3
+ # ===================================================================
4
  import numpy as np
5
+ from collections import deque
6
  import cv2
7
  import math
8
+ from typing import Dict, List
9
+ import torch
10
+ from torchvision import transforms
11
+ import os
12
+ import json
13
+ import gzip
14
+ import torch
15
+ from torch.utils.data import Dataset
16
+ from dataclasses import dataclass
17
+ from typing import Dict, Tuple, Optional
18
+ import time
19
+ from dataclasses import dataclass
20
+ from typing import Dict, Tuple, Optional
21
+
22
+
23
+ def find_peak_box_and_classify(data):
24
+ """
25
+ [v2.0] تجد القمم في شبكة الكشف وتستخرج معلومات الكائنات.
26
+ هذه النسخة مرنة وتعمل مع أي حجم خريطة.
27
+ """
28
+ # ✅ إصلاح: الحصول على أبعاد الخريطة ديناميكيًا
29
+ H, W, _ = data.shape
30
+
31
+ # ✅ إصلاح: إضافة إطار الأمان يعتمد على حجم الخريطة الفعلي
32
+ det_data = np.zeros((H + 2, W + 2, 7))
33
+ det_data[1:H+1, 1:W+1] = data
34
+
35
+ detected_objects, object_counts = [], {"car": 0, "bike": 0, "pedestrian": 0, "unknown": 0}
36
+
37
+ # ✅ إصلاح: حلقة التكرار تمر على حجم الخريطة الفعلي
38
+ for i in range(1, H + 1):
39
+ for j in range(1, W + 1):
40
+ confidence = det_data[i, j, 0]
41
+ # تم رفع حد الثقة قليلاً لتقليل الاكتشافات الخاطئة
42
+ if confidence > 0.2:
43
+ # البحث عن القمة المحلية
44
+ if (confidence >= det_data[i,j-1,0] and confidence >= det_data[i,j+1,0] and
45
+ confidence >= det_data[i-1,j,0] and confidence >= det_data[i+1,j,0]):
46
+
47
+ # استخراج البيانات
48
+ length, width = det_data[i,j,4], det_data[i,j,5]
49
+
50
+ # تصنيف مبسط يعتمد على الأبعاد
51
+ if length > 3.5: obj_type = 'car'
52
+ elif length > 1.5 and width > 0.5: obj_type = 'bike'
53
+ else: obj_type = 'pedestrian'
54
+
55
+ object_counts[obj_type] += 1
56
+
57
+ # ✅ إصلاح: إحداثيات الشبكة الآن نسبة إلى الخريطة الأصلية (i-1, j-1)
58
+ detected_objects.append({
59
+ 'grid_coords': (i - 1, j - 1),
60
+ 'raw_data': det_data[i, j],
61
+ 'type': obj_type
62
+ })
63
+ return detected_objects, object_counts
64
+
65
+ def check_for_nearby_obstacle(meta_data, grid_conf, max_dist=15.0, threshold=0.4):
66
+ """
67
+ [v2.0] تتحقق من وجود أي عائق قريب.
68
+ """
69
+ detected_objects, _ = find_peak_box_and_classify(meta_data)
70
+ if not detected_objects:
71
+ return False
72
+
73
+ # استخدام إعدادات الشبكة من القاموس
74
+ x_res = grid_conf['x_res']
75
+ y_res = grid_conf['y_res']
76
+ x_min = grid_conf['x_min']
77
+ y_min = grid_conf['y_min']
78
+
79
+ for obj_dict in detected_objects:
80
+ if obj_dict['raw_data'][0] > threshold:
81
+ raw_data = obj_dict['raw_data']
82
+ grid_i, grid_j = obj_dict['grid_coords']
83
+ offset_x, offset_y = raw_data[1], raw_data[2]
84
+
85
+ # ✅ إصلاح: استخدام نظام الإحداثيات الصحيح (Y-أمام, X-يسار)
86
+ # لاحظ أن grid_j يرتبط بـ x_rel و grid_i يرتبط بـ y_rel
87
+ x_rel = (grid_j * x_res) + x_min + (x_res / 2) + offset_x
88
+ y_rel = (grid_i * y_res) + y_min + (y_res / 2) + offset_y
89
+
90
+ distance = np.linalg.norm([x_rel, y_rel])
91
+
92
+ if distance < max_dist:
93
+ print(f"🛑 تم اكتشاف عائق قريب على مسافة {distance:.2f} متر.")
94
+ return True, distance
95
+
96
+ # print("✅ لا توجد عائق قريب.")
97
+
98
+ return False
99
+
100
+
101
+ # ===================================================================
102
+ # وحدات مساعدة (Helpers) - وظائف نقية ومفصولة
103
+ # ===================================================================
104
+
105
+ def convert_grid_to_relative_xy(grid_i, grid_j, grid_conf):
106
+ """
107
+ يحول إحداثيات الش��كة (i, j) إلى إحداثيات مترية نسبية للسيارة (y-أمام, x-جانب).
108
+
109
+ Args:
110
+ grid_i (int): مؤشر الصف في الشبكة (المحور الأمامي).
111
+ grid_j (int): مؤشر العمود في الشبكة (المحور الجانبي).
112
+ grid_conf (dict): قاموس يحتوي على أبعاد ودقة الشبكة.
113
+
114
+ Returns:
115
+ tuple[float, float]: الإحداثيات النسبية (relative_y, relative_x) لمركز الخلية.
116
+ """
117
+ x_res = grid_conf['x_res']
118
+ y_res = grid_conf['y_res']
119
+ x_min = grid_conf['x_min']
120
+ y_min = grid_conf['y_min']
121
+
122
+ # حساب مركز الخلية
123
+ relative_x = (grid_j * x_res) + x_min + (x_res / 2.0)
124
+ relative_y = (grid_i * y_res) + y_min + (y_res / 2.0)
125
+
126
+ return relative_y, relative_x
127
+
128
+ # ===================================================================
129
+ # الفئات الرئيسية (Core Classes)
130
+ # ===================================================================
131
+
132
+ class TrackedObject:
133
+ """
134
+ يمثل كائنًا واحدًا يتم تتبعه عبر الزمن.
135
+ يحتفظ بمعرف فريد وتاريخ لمواقعه.
136
+ """
137
+ def __init__(self, obj_id, initial_detection, frame_num):
138
+ self.id = obj_id
139
+ self.type = initial_detection['type']
140
+ self.history = deque(maxlen=20) # تخزين آخر 20 موقعًا
141
+ self.last_frame_seen = frame_num
142
+ self.last_confidence = 0.0
143
+
144
+ self.update(initial_detection, frame_num)
145
+
146
+ def update(self, detection, frame_num):
147
+ """تحديث حالة الكائن ببيانات اكتشاف جديدة."""
148
+ self.history.append(detection['global_pos'])
149
+ self.last_frame_seen = frame_num
150
+ self.last_confidence = detection['raw_data'][0]
151
+
152
+ @property
153
+ def last_pos(self):
154
+ """الحصول على آخر موقع مسجل للكائن."""
155
+ return self.history[-1] if self.history else None
156
+
157
+ def __repr__(self):
158
+ """تمثيل نصي مفيد لتصحيح الأخطاء."""
159
+ return f"Track(ID={self.id}, Type='{self.type}', LastSeen={self.last_frame_seen})"
160
+
161
+
162
+ class Tracker:
163
+ """
164
+ يتتبع الكائنات المكتشفة في خرائط BEV عبر الإطارات.
165
+ يستخدم نظام إحداثيات عالمي للمطابقة والحفاظ على هوية الكائنات.
166
+ """
167
+ def __init__(self, grid_conf, match_threshold=2.5, prune_age=5):
168
+ """
169
+ Args:
170
+ grid_conf (dict): إعدادات الشبكة لتحويل الإحداثيات.
171
+ match_threshold (float): أقصى مسافة (بالمتر) لاعتبار كائن متطابقًا مع مسار.
172
+ prune_age (int): عدد الإطارات التي يجب انتظارها قبل حذف مسار غير نشط.
173
+ """
174
+ self.grid_conf = grid_conf
175
+ self.match_threshold = match_threshold
176
+ self.prune_age = prune_age
177
+
178
+ self.tracks = {} # قاموس لتخزين المسارات باستخدام ID كـ مفتاح
179
+ self.next_track_id = 0
180
+
181
+ def process_frame(self, bev_map, ego_pos, ego_theta, frame_num):
182
+ """
183
+ الدالة الرئيسية: تعالج إطارًا واحدًا، تكتشف الكائنات، وتحدث المسارات.
184
+ """
185
+ # 1. اكتشاف الكائنات من خريطة BEV
186
+ # (نفترض أن find_peak_box_and_classify معرفة في مكان آخر)
187
+ detections, _ = find_peak_box_and_classify(bev_map)
188
+
189
+ # 2. تحويل مواقع الكائنات إلى إحداثيات عالمية
190
+ self._add_global_positions(detections, ego_pos, ego_theta)
191
+
192
+ # 3. مطابقة الاكتشافات بالمسارات الموجودة
193
+ matches, unmatched_detections = self._match_detections_to_tracks(detections)
194
+
195
+ # 4. تحديث المسارات المتطابقة
196
+ for track_id, detection_idx in matches.items():
197
+ self.tracks[track_id].update(detections[detection_idx], frame_num)
198
+
199
+ # 5. إنشاء مسارات جديدة للاكتشافات غير المتطابقة
200
+ for detection_idx in unmatched_detections:
201
+ self._create_new_track(detections[detection_idx], frame_num)
202
+
203
+ # 6. حذف المسارات القديمة (التي لم يتم رؤيتها منذ فترة)
204
+ self._prune_tracks(frame_num)
205
+
206
+ return list(self.tracks.values())
207
+
208
+ def _add_global_positions(self, detections, ego_pos, ego_theta):
209
+ """يضيف مفتاح 'global_pos' لكل اكتشاف في القائمة."""
210
+ R = np.array([[np.cos(ego_theta), -np.sin(ego_theta)],
211
+ [np.sin(ego_theta), np.cos(ego_theta)]])
212
+
213
+ for det in detections:
214
+ grid_i, grid_j = det['grid_coords']
215
+ raw_data = det['raw_data']
216
+
217
+ # حساب الموقع النسبي (y-أمام, x-جانب)
218
+ relative_y, relative_x = convert_grid_to_relative_xy(grid_i, grid_j, self.grid_conf)
219
+ relative_x += raw_data[1] # الإزاحة الجانبية بالأمتار
220
+ relative_y += raw_data[2] # الإزاحة الأمامية بالأمتار
221
+
222
+ # التحويل إلى إحداثيات عالمية
223
+ global_offset = R.dot(np.array([relative_y, relative_x]))
224
+ det['global_pos'] = ego_pos + global_offset
225
+
226
+ def _match_detections_to_tracks(self, detections):
227
+ """مطابقة بسيطة وجشعة (Greedy Matching) بين الاكتشافات والمسارات."""
228
+ matches = {}
229
+ unmatched_detections = set(range(len(detections)))
230
+
231
+ if not self.tracks or not detections:
232
+ return matches, unmatched_detections
233
+
234
+ active_track_ids = list(self.tracks.keys())
235
+
236
+ for track_id in active_track_ids:
237
+ track = self.tracks[track_id]
238
+ min_dist = self.match_threshold
239
+ best_det_idx = -1
240
+
241
+ for i in range(len(detections)):
242
+ if i not in unmatched_detections: continue
243
+
244
+ dist = np.linalg.norm(track.last_pos - detections[i]['global_pos'])
245
+ if dist < min_dist:
246
+ min_dist = dist
247
+ best_det_idx = i
248
+
249
+ if best_det_idx != -1:
250
+ matches[track_id] = best_det_idx
251
+ unmatched_detections.remove(best_det_idx)
252
+
253
+ return matches, unmatched_detections
254
+
255
+ def _create_new_track(self, detection, frame_num):
256
+ """إنشاء وإضافة مسار جديد إلى القاموس."""
257
+ new_id = self.next_track_id
258
+ new_track = TrackedObject(new_id, detection, frame_num)
259
+ self.tracks[new_id] = new_track
260
+ self.next_track_id += 1
261
+
262
+ def _prune_tracks(self, current_frame_num):
263
+ """حذف المسارات التي لم يتم تحديثها منذ فترة طويلة."""
264
+ ids_to_delete = []
265
+ for track_id, track in self.tracks.items():
266
+ if current_frame_num - track.last_frame_seen > self.prune_age:
267
+ ids_to_delete.append(track_id)
268
+
269
+ for track_id in ids_to_delete:
270
+ del self.tracks[track_id]
271
+
272
+
273
  class PIDController:
274
+
275
  def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20):
276
+ self._K_P, self._K_I, self._K_D = K_P, K_I, K_D
 
 
277
  self._window = deque([0 for _ in range(n)], maxlen=n)
 
278
  def step(self, error):
279
  self._window.append(error)
280
+ integral = np.mean(self._window) if len(self._window) > 1 else 0.0
281
+ derivative = (self._window[-1] - self._window[-2]) if len(self._window) > 1 else 0.0
 
 
 
282
  return self._K_P * error + self._K_I * integral + self._K_D * derivative
283
+ def reset(self):
284
+ self._window.clear()
285
+ self._window.extend([0 for _ in range(self._window.maxlen)])
286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
  class InterfuserController:
290
+ """
291
+ [النسخة 34.0] المتحكم ذو الذاكرة (Memory-Enhanced).
292
+ يعالج مشكلة "التردد المميت" عن طريق:
293
+ 1. عدم الثقة بتقديرات السرعة الأولية للكائنات المكتشفة حديثًا.
294
+ 2. إضافة "فترة سماح" (ذاكرة قصيرة المدى) للحفاظ على الحذر بعد فقدان أثر
295
+ المركبة التي يتم متابعتها، مما يمنع التسارع غير الآمن.
296
+ """
297
+ def __init__(self, config_dict):
298
+ self.config = config_dict
299
+ c = self.config['controller_params']
300
+ self.freq = self.config.get('frequency', 10.0) # تردد النظام
301
+
302
+ # 1. إعداد وحدات التحكم الأساسية
303
+ self.turn_controller = PIDController(c['turn_KP'], c['turn_KI'], c['turn_KD'], c['turn_n'])
304
+ self.speed_controller = PIDController(c['speed_KP'], c['speed_KI'], c['speed_KD'], c['speed_n'])
305
+
306
+ # 2. دمج المتتبع
307
+ self.tracker = Tracker(grid_conf=config_dict.get('grid_conf', {}),
308
+ match_threshold=c.get('tracker_match_thresh', 2.5),
309
+ prune_age=c.get('tracker_prune_age', 5))
310
+
311
+ # 3. إعداد متغيرات الحالة
312
+ self.current_target_speed = 0.0
313
+ self.last_steer = 0
314
+
315
+ # [تعديل] إضافة متغيرات الذاكرة والسلوك الحذر
316
+ self.last_followed_track_id = -1
317
+ self.follow_grace_period_timer = 0
318
+
319
+ # متغيرات الحالة الأخرى
320
+ self.stop_sign_timer, self.red_light_block_timer = 0, 0
321
+ self.stop_steps_counter, self.forced_move_timer = 0, 0
322
+
323
+ def run_step(self, speed, waypoints, junction, traffic_light, stop_sign, bev_map, ego_pos, ego_theta, frame_num):
324
+ active_tracks = self.tracker.process_frame(bev_map, ego_pos, ego_theta, frame_num)
325
+ self._update_system_states(speed, traffic_light)
326
+ steer = self._get_steering_command(waypoints, speed)
327
+
328
+ final_goal_speed, reason = self._get_goal_speed(
329
+ speed, waypoints, traffic_light, stop_sign, junction, active_tracks, ego_pos
330
+ )
331
+
332
+ self._apply_speed_smoothing(final_goal_speed)
333
+ throttle, brake = self._get_longitudinal_control(speed, self.current_target_speed)
334
+
335
+ final_reason = reason if brake else "Cruising"
336
+ if self.forced_move_timer > 0:
337
+ throttle, brake, final_reason = self.config['controller_params']['forced_throttle'], False, "Forced Move"
338
+
339
+ return steer, throttle, brake, {'target_speed': self.current_target_speed, 'brake_reason': final_reason, 'active_tracks': len(active_tracks)}
340
+
341
+ def _get_goal_speed(self, current_speed, waypoints, traffic_light, stop_sign, junction, active_tracks, ego_pos):
342
+ c = self.config['controller_params']
343
+ max_speed = c['max_speed']
344
+
345
+ # القاعدة 1: قواعد السلامة الثابتة
346
+ if (traffic_light > c['light_threshold']): return 0.0, "Red Light"
347
+ if self._is_stop_sign_active(stop_sign, junction): return 0.0, "Stop Sign"
348
+
349
+ # القاعدة 2: منطق تجنب العوائق الديناميكي مع الذاكرة
350
+ obstacle_speed_limit, obstacle_reason, is_following = self._obstacle_avoidance_logic(active_tracks, ego_pos, current_speed)
351
+ if is_following:
352
+ return obstacle_speed_limit, obstacle_reason
353
+
354
+ # القاعدة 3: منطق الملاحة (نقاط المسار)
355
+ return self._navigation_logic(waypoints, max_speed)
356
+
357
+ def _obstacle_avoidance_logic(self, active_tracks, ego_pos, current_speed):
358
+ """[دالة جديدة] تحتوي على كل منطق التعامل مع العوائق."""
359
+ c = self.config['controller_params']
360
+ obstacle_speed_limit = c['max_speed']
361
+ is_following_a_track = False
362
+
363
+ if self.follow_grace_period_timer > 0:
364
+ self.follow_grace_period_timer -= 1
365
+
366
+ for track in active_tracks:
367
+ distance = np.linalg.norm(track.last_pos - ego_pos)
368
+
369
+ if distance < c.get('critical_distance', 4.0):
370
+ self.last_followed_track_id = -1
371
+ return 0.0, f"Critical Obstacle (ID: {track.id})", True
372
+
373
+ if distance < c.get('follow_distance', 12.0):
374
+ is_following_a_track = True
375
+ self.last_followed_track_id = track.id
376
+ self.follow_grace_period_timer = c.get('follow_grace_period', int(2 * self.freq)) # ثانيتان من الذاكرة
377
+
378
+ track_speed = self._estimate_track_speed(track)
379
+
380
+ # [منطق جديد] تجاهل التقديرات الأولية غير الموثوقة
381
+ if track_speed < 0.1 and len(track.history) < 3:
382
+ # لا تثق بالتقدير، حافظ على سرعتك الحالية مؤقتًا لمنع الفرملة غير الضرورية
383
+ target_speed = current_speed
384
+ else:
385
+ target_speed = track_speed * c.get('speed_match_factor', 0.9)
386
+
387
+ if target_speed < obstacle_speed_limit:
388
+ obstacle_speed_limit = target_speed
389
+
390
+ if is_following_a_track:
391
+ return obstacle_speed_limit, f"Following ID {self.last_followed_track_id}", True
392
+
393
+ # [منطق جديد] إذا لم نعد نرى السيارة ولكن الذاكرة نشطة
394
+ if self.follow_grace_period_timer > 0:
395
+ cautious_speed = c.get('cautious_speed', 5.0) # سرعة منخفضة حذرة
396
+ return min(current_speed, cautious_speed), f"Cautious (Lost Track {self.last_followed_track_id})", True
397
+
398
+ return -1, "No Obstacle", False # إشارة لعدم وجود عائق
399
+
400
+ def _navigation_logic(self, waypoints, max_speed):
401
+ """[دالة جديدة] تحتوي على منطق الاقتراب من الهدف النهائي."""
402
+ if not waypoints.any(): return max_speed, "Cruising (No Waypoints)"
403
+
404
+ APPROACH_ZONE, STOPPING_ZONE, MIN_APPROACH_SPEED = 15.0, 3.0, 2.5
405
+ distance_to_target = np.linalg.norm(waypoints[-1].numpy()) # .numpy() للأمان
406
+
407
+ if distance_to_target > APPROACH_ZONE:
408
+ return max_speed, "Cruising"
409
+ elif distance_to_target > STOPPING_ZONE:
410
+ ratio = (distance_to_target - STOPPING_ZONE) / (APPROACH_ZONE - STOPPING_ZONE)
411
+ return MIN_APPROACH_SPEED + ratio * (max_speed - MIN_APPROACH_SPEED), "Approaching Target"
412
  else:
413
+ return 0.0, "Stopping at Target"
414
+
415
+ def _apply_speed_smoothing(self, final_goal_speed):
416
+ """[دالة جديدة] تحتوي على منطق تنعيم السرعة."""
417
+ c = self.config['controller_params']
418
+ accel_rate = c.get('accel_rate', 0.1)
419
+ decel_rate = c.get('decel_rate', 0.2)
420
+
421
+ if final_goal_speed > self.current_target_speed:
422
+ self.current_target_speed = min(self.current_target_speed + accel_rate, final_goal_speed)
423
+ elif final_goal_speed < self.current_target_speed:
424
+ self.current_target_speed = max(self.current_target_speed - decel_rate, final_goal_speed)
425
+
426
+ def _estimate_track_speed(self, track):
427
+ if len(track.history) < 2: return 0.0
428
+ dist_moved = np.linalg.norm(track.history[-1] - track.history[-2])
429
+ return dist_moved * self.freq # السرعة = المسافة * التردد
430
+
431
+ # --- باقي الدوال المساعدة تبقى كما هي ---
432
+ def _is_stop_sign_active(self, stop_sign, junction): # ...
433
+ c = self.config['controller_params']
434
+ is_active = self.stop_sign_timer > 0
435
+ if self.stop_sign_timer > 0: self.stop_sign_timer -= 1
436
+ elif stop_sign > c['stop_threshold']: self.stop_sign_timer = c['stop_sign_duration']
437
+ if junction < 0.1: self.stop_sign_timer = 0
438
+ return is_active
439
+
440
+ def _get_longitudinal_control(self, current_speed, target_speed): # ...
441
+ c = self.config['controller_params']
442
  speed_error = target_speed - current_speed
443
+ control_signal = self.speed_controller.step(speed_error)
444
+ if control_signal > 0:
445
+ throttle, brake = np.clip(control_signal, 0.0, c['max_throttle']), False
446
+ else:
447
+ throttle, brake = 0.0, abs(control_signal) > c['brake_sensitivity']
448
+ if target_speed < c['min_speed']: throttle, brake = 0.0, True
449
+ return throttle, brake
450
+
451
+ def _update_system_states(self, speed, traffic_light): # ...
452
+ c = self.config['controller_params']
453
+ if speed < 0.1: self.stop_steps_counter += 1
454
+ else: self.stop_steps_counter = 0
455
+ if self.stop_steps_counter > c['max_stop_time']:
456
+ self.forced_move_timer, self.stop_steps_counter = c['forced_move_duration'], 0
457
+ if self.forced_move_timer > 0: self.forced_move_timer -= 1
458
+ if self.red_light_block_timer > 0: self.red_light_block_timer -= 1
459
+ elif speed < 0.1 and traffic_light > c['light_threshold']:
460
+ self.red_light_block_timer = c['max_red_light_time']
461
+ else:
462
+ self.red_light_block_timer = 0
463
+
464
+ def _get_steering_command(self, waypoints, speed):
465
+ if not waypoints.any() or speed < 0.2: return 0.0
466
+ # .numpy() للأمان عند التعامل مع NumPy
467
+ aim_point = (waypoints[1].numpy() + waypoints[0].numpy()) / 2.0
468
+ angle_rad = np.arctan2(aim_point[0], aim_point[1])
469
+ steer = self.turn_controller.step(np.degrees(angle_rad) / -90.0)
470
+ steer = self.last_steer * 0.4 + steer * 0.6
471
  self.last_steer = steer
472
+ return np.clip(steer, -1.0, 1.0)
473
+
474
+
475
+ def unnormalize_image(tensor: torch.Tensor) -> np.ndarray:
476
+ """
477
+ يعكس عملية تطبيع الصورة في PyTorch ويحولها إلى صورة BGR
478
+ جاهزة للعرض باستخدام OpenCV.
479
+
480
+ Args:
481
+ tensor: موتر (Tensor) الصورة المُطبع، بالشكل (C, H, W).
482
+
483
+ Returns:
484
+ صورة NumPy array بتنسيق BGR، بالشكل (H, W, C).
485
+ """
486
+ # 1. تحديد قيم المتوسط والانحراف المعياري المستخدمة في التطبيع الأصلي
487
+ # (هذه هي القيم القياسية لمجموعة بيانات ImageNet)
488
+ mean = torch.tensor([0.485, 0.456, 0.406], device=tensor.device)
489
+ std = torch.tensor([0.229, 0.224, 0.225], device=tensor.device)
490
+
491
+ # 2. عكس العملية الحسابية: (tensor * std) + mean
492
+ # يتم تغيير شكل mean و std لتتناسب مع أبعاد الموتر (C, H, W)
493
+ tensor = tensor * std[:, None, None] + mean[:, None, None]
494
+
495
+ # 3. قص القيم للتأكد من أنها ضمن النطاق [0, 1]
496
+ # قد تتجاوز العملية الحسابية هذا النطاق بشكل طفيف بسبب أخطاء التقريب.
497
+ tensor = torch.clamp(tensor, 0, 1)
498
+
499
+ # 4. تحويل الموتر إلى مصفوفة NumPy
500
+ img_np = tensor.cpu().numpy()
501
+
502
+ # 5. تغيير ترتيب الأبعاد من (C, H, W) إلى (H, W, C)
503
+ # PyTorch: (القنوات، الارتفاع، العرض)
504
+ # NumPy/OpenCV: (الارتفاع، العرض، القنوات)
505
+ img_np = np.transpose(img_np, (1, 2, 0))
506
+
507
+ # 6. تحويل نطاق الألوان من [0, 1] إلى [0, 255] وتغيير النوع
508
+ img_np = (img_np * 255).astype(np.uint8)
509
+
510
+ # 7. تحويل قنوات الألوان من RGB إلى BGR
511
+ # معظم مكتبات التعلم العميق (بما في ذلك PyTorch) تستخدم RGB.
512
+ # مكتبة OpenCV تتوقع صيغة BGR.
513
+ img_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
514
+
515
+ return img_bgr
516
+ # =========================================================
517
+ # الخطوة 1: دالة world_to_pixel (تبقى كما هي)
518
+ # =========================================================
519
+ def world_to_pixel(world_points: np.ndarray, grid_size_pixels: tuple, grid_size_meters: tuple) -> np.ndarray:
520
+ pixel_per_meter_x = grid_size_pixels[0] / grid_size_meters[0]
521
+ pixel_per_meter_y = grid_size_pixels[1] / grid_size_meters[1]
522
+ pixel_x = (grid_size_pixels[0] / 2) + (world_points[:, 1] * pixel_per_meter_x)
523
+ pixel_y = grid_size_pixels[1] - (world_points[:, 0] * pixel_per_meter_y)
524
+ return np.vstack((pixel_x, pixel_y)).T
525
+
526
+ # ===================================================================
527
+ # الخطوة 2: دالة render_bev (محدثة للمرحلة الثانية: الخرائط المستقبلية)
528
+ # ===================================================================
529
+ def render_bev(
530
+ active_tracks: List,
531
+ predicted_waypoints: np.ndarray,
532
+ ego_pos_global: np.ndarray,
533
+ ego_theta_global: float,
534
+ pixels_per_meter: int = 10,
535
+ grid_size_meters: tuple = (40, 40),
536
+ future_time_steps: tuple = (1.0, 2.0) # أزمنة التنبؤ المستقبلية
537
+ ) -> Dict[str, np.ndarray]:
538
+ """
539
+ [المرحلة الثانية]
540
+ تنشئ خريطة للحظة الحالية (t0) وخرائط تنبؤ مستقبلية (t1, t2).
541
+ """
542
+ side_m, fwd_m = grid_size_meters
543
+ width_px, height_px = int(side_m * pixels_per_meter), int(fwd_m * pixels_per_meter)
544
+
545
+ # مصفوفة التحويل من إحداثيات العالم إلى إحداثيات المركبة
546
+ R_world_to_ego = np.array([[math.cos(ego_theta_global), math.sin(ego_theta_global)],
547
+ [-math.sin(ego_theta_global), math.cos(ego_theta_global)]])
548
+
549
+ # إنشاء قواميس للخرائط الفارغة
550
+ bev_maps = {
551
+ 't0': np.zeros((height_px, width_px, 3), dtype=np.uint8),
552
+ 't1': np.zeros((height_px, width_px, 3), dtype=np.uint8),
553
+ 't2': np.zeros((height_px, width_px, 3), dtype=np.uint8)
554
+ }
555
+
556
+ # --- 1. رسم الكائنات المتتبعة في الحاضر والمستقبل ---
557
+ for track in active_tracks:
558
+ # --- أ. استخراج البيانات الحالية للكائن ---
559
+ pos_global = track.last_pos
560
+ yaw_rad_global = getattr(track, 'last_yaw', 0)
561
+ speed = getattr(track, 'speed', 0)
562
+ extent = getattr(track, 'last_extent', (1.0, 2.0)) # (الطول، العرض)
563
+
564
+ # --- ب. حساب الخصائص النسبية والرسم ---
565
+ relative_yaw_rad = yaw_rad_global - ego_theta_global
566
+ angle_deg = -90 - math.degrees(relative_yaw_rad)
567
+ width_px_obj = extent[1] * 2 * pixels_per_meter
568
+ length_px_obj = extent[0] * 2 * pixels_per_meter
569
+ box_size_px = (float(width_px_obj), float(length_px_obj))
570
+
571
+ # --- ج. رسم الكائن في الوقت الحالي (t0) ---
572
+ relative_pos_t0 = R_world_to_ego.dot(pos_global - ego_pos_global)
573
+ center_pixel_t0 = world_to_pixel(np.array([relative_pos_t0]), (width_px, height_px), grid_size_meters)[0]
574
+ box_points_t0 = cv2.boxPoints(((float(center_pixel_t0[0]), float(center_pixel_t0[1])), box_size_px, angle_deg))
575
+ cv2.drawContours(bev_maps['t0'], [box_points_t0.astype(np.int32)], 0, (0, 0, 255), 2) # أحمر
576
+
577
+ # --- د. التنبؤ بالمستقبل ورسمه ---
578
+ for i, t in enumerate(future_time_steps):
579
+ # حساب الإزاحة بناءً على السرعة والاتجاه والزمن
580
+ # (هذه العمليات تتم في الإطار المرجعي العالمي)
581
+ offset = np.array([math.cos(yaw_rad_global), math.sin(yaw_rad_global)]) * speed * t
582
+ future_pos_global = pos_global + offset
583
+
584
+ # تحويل الموقع المستقبلي إلى إحداثيات نسبية ثم إلى بكسلات
585
+ relative_pos_future = R_world_to_ego.dot(future_pos_global - ego_pos_global)
586
+ center_pixel_future = world_to_pixel(np.array([relative_pos_future]), (width_px, height_px), grid_size_meters)[0]
587
+
588
+ # رسم الصندوق المستقبلي (نفس الحجم والزاوية، ولكن بموقع ولون مختلفين)
589
+ key = f't{i+1}' # 't1', 't2', etc.
590
+ box_points_future = cv2.boxPoints(((float(center_pixel_future[0]), float(center_pixel_future[1])), box_size_px, angle_deg))
591
+ cv2.drawContours(bev_maps[key], [box_points_future.astype(np.int32)], 0, (255, 0, 128), 2) # بنفسجي
592
+
593
+ # --- 2. رسم مركبة الأنا (يجب رسمها على كل الخرائط) ---
594
+ ego_center_pixel = (width_px / 2, height_px - 5)
595
+ ego_size_px = (1.8 * pixels_per_meter, 4.0 * pixels_per_meter)
596
+ ego_box = cv2.boxPoints((ego_center_pixel, ego_size_px, -90))
597
+ for key in bev_maps:
598
+ cv2.drawContours(bev_maps[key], [ego_box.astype(np.int32)], 0, (0, 255, 255), -1) # أصفر
599
+
600
+ # --- 3. رسم نقاط المسار المتوقعة (فقط على خريطة الحاضر t0) ---
601
+ if predicted_waypoints.size > 0:
602
+ waypoints_pixels = world_to_pixel(predicted_waypoints, (width_px, height_px), grid_size_meters)
603
+ cv2.polylines(bev_maps['t0'], [waypoints_pixels.astype(np.int32)], isClosed=False, color=(0, 255, 0), thickness=3)
604
+
605
+ return bev_maps
606
+
607
+ # =========================================================
608
+ # (دالة world_to_pixel تبقى كما هي من الرد السابق)
609
+ def world_to_pixel(world_points, grid_size_pixels, grid_size_meters):
610
+ pixel_per_meter_x = grid_size_pixels[0] / grid_size_meters[0]
611
+ pixel_per_meter_y = grid_size_pixels[1] / grid_size_meters[1]
612
+
613
+ pixel_x = (world_points[:, 1] * pixel_per_meter_x) + (grid_size_pixels[0] / 2)
614
+ pixel_y = (grid_size_pixels[1]) - (world_points[:, 0] * pixel_per_meter_y)
615
+ return np.vstack((pixel_x, pixel_y)).T
616
+
617
+ # ==========================================================
618
+ # الدالة الثانية: generate_bev_image (النسخة النهائية)
619
+ # ==========================================================
620
+ def generate_bev_image(sample, grid_size_pixels=(400, 400), grid_size_meters=(20, 20)):
621
+ """
622
+ تأخذ عينة بيانات وتنشئ صورة BEV خام باستخدام OpenCV.
623
+ :return: صورة (numpy array) بتنسيق BGR.
624
+ """
625
+ side_meters, forward_meters = grid_size_meters
626
+ width_px, height_px = grid_size_pixels
627
+
628
+ bev_image = np.zeros((height_px, width_px, 3), dtype=np.uint8)
629
+
630
+ route_data = sample['measurements'].get('route', [])
631
+ if route_data:
632
+ route_world = np.array([[point[0], point[1]] for point in route_data])
633
+ route_pixels = world_to_pixel(route_world, (width_px, height_px), (side_meters, forward_meters))
634
+ cv2.polylines(bev_image, [route_pixels.astype(np.int_)], isClosed=False, color=(0, 255, 0), thickness=2)
635
+
636
+ objects_data = sample['objects']
637
+ for obj in objects_data:
638
+ obj_class = obj.get('class', 'غير معروف')
639
+ if obj_class in ['weather', 'ego_info', 'static']: continue
640
 
641
+ pos = obj.get('position', [0,0,0])
642
+ ext = obj.get('extent', [0,0,0])
643
+ yaw = obj.get('yaw', 0)
644
+
645
+ center_world = np.array([[pos[0], pos[1]]])
646
+ center_pixel = world_to_pixel(center_world, (width_px, height_px), (side_meters, forward_meters))[0]
647
+
648
+ length_m, width_m = ext[0]*2, ext[1]*2
649
+
650
+ width_px_obj = width_m * (width_px / side_meters)
651
+ length_px_obj = length_m * (height_px / forward_meters)
652
+
653
+ color = (255, 255, 0) if obj_class == 'ego_car' else (0, 0, 255)
654
+
655
+ center_tuple = (float(center_pixel[0]), float(center_pixel[1]))
656
+ # ملاحظة: OpenCV تتوقع (العرض، الطول) في size_tuple
657
+ size_tuple = (float(width_px_obj), float(length_px_obj))
658
+ angle_deg = np.degrees(yaw) # عكس الزاوية لتناسب OpenCV
659
+
660
+ box = cv2.boxPoints((center_tuple, size_tuple, angle_deg))
661
+ box = box.astype(np.int_)
662
+ cv2.drawContours(bev_image, [box], 0, color, 2)
663
+
664
+ return bev_image
665
+
666
+
667
+ # # ==========================================================
668
+ # # الكلاس الثالث: الواجهة الاحترافية للعرض
669
+ # # ==========================================================
670
+
671
+
672
+ # ==============================================================================
673
+ # الكلاس الأول: إعدادات العرض
674
+ # ==============================================================================
675
+ @dataclass
676
+ class DisplayConfig:
677
+ width: int = 1920
678
+ height: int = 1080
679
+ camera_ratio: float = 0.65
680
+ panel_margin: int = 30
681
+ section_spacing: int = 25
682
+ min_section_height: int = 180
683
+
684
+ # ==============================================================================
685
+ # الكلاس الثاني: الواجهة الاحترافية للعرض (النسخة المحسنة)
686
+ # ==============================================================================
687
  class DisplayInterface:
688
+ def __init__(self, config: Optional[DisplayConfig] = None):
689
+ self.config = config if config else DisplayConfig()
690
+ self._init_colors_and_fonts()
691
+ self._error_log = []
692
+
693
+ def _init_colors_and_fonts(self):
694
+ self.colors = {'panel_bg':(28,30,34), 'text':(240,240,240), 'text_header':(0,165,255), 'separator':(75,75,75), 'throttle':(100,200,100), 'brake':(30,30,240), 'steer_bar':(0,190,255), 'steer_neutral':(100,100,100), 'light_red':(30,30,240), 'light_green':(100,200,100), 'stop_sign':(200,100,255), 'gauge_bg':(50,50,50), 'gauge_needle':(30,30,240), 'error':(30,30,240)}
695
+ self.fonts = {'header': cv2.FONT_HERSHEY_DUPLEX, 'normal': cv2.FONT_HERSHEY_SIMPLEX}
696
+
697
+ def run_interface(self, data: Dict) -> np.ndarray:
698
+ """الدالة الرئيسية التي تنشئ وتعيد لوحة التحكم النهائية."""
699
+ try:
700
+ dashboard = np.zeros((self.config.height, self.config.width, 3), dtype=np.uint8)
701
+ cam_w = int(self.config.width * self.config.camera_ratio)
702
+
703
+ # --- 1. رسم المكونات الرئيسية ---
704
+ self._draw_camera_view(dashboard, data.get('camera_view'), cam_w)
705
+ self._draw_info_overlay(dashboard, data, cam_w)
706
+
707
+ # --- 2. رسم لوحة المعلومات الجانبية ---
708
+ dashboard[:, cam_w:] = self.colors['panel_bg']
709
+ current_y = self.config.panel_margin
710
+ current_y = self._draw_bev_maps_section(dashboard, data, cam_w, current_y)
711
+ self._draw_controls_section(dashboard, data, cam_w, current_y + self.config.section_spacing)
712
+
713
+ return dashboard
714
+ except Exception as e:
715
+ self._log_error(str(e))
716
+ return self._create_error_display(str(e))
717
+
718
+ # --------------------------------------------------------------------------
719
+ # الدوال المساعدة للرسم
720
+ # --------------------------------------------------------------------------
721
+
722
+ def _draw_camera_view(self, db: np.ndarray, view: np.ndarray, cam_w: int):
723
+ if view is not None:
724
+ db[:, :cam_w] = cv2.resize(view, (cam_w, self.config.height))
725
+
726
+ def _draw_info_overlay(self, db: np.ndarray, data: Dict, cam_w: int):
727
+ """يرسم شريط المعلومات الشفاف فوق عرض الكاميرا."""
728
+ overlay = np.zeros_like(db); panel_h=90; alpha=0.6
729
+ cv2.rectangle(overlay, (0, self.config.height-panel_h), (cam_w, self.config.height), self.colors['panel_bg'], -1)
730
+ db[:,:cam_w] = cv2.addWeighted(db[:,:cam_w], 1, overlay[:,:cam_w], alpha, 0)
731
+
732
+ base_y = self.config.height - panel_h + 35
733
+ self._draw_text(db, f"Frame: {data.get('frame_num', 'N/A')}", (20, base_y), 1.2)
734
+ # ============
735
+ counts=data.get('object_counts',{});
736
+ self._draw_text(db, f"Detections: C:{counts.get('car',0)}", (20, base_y+35), 1.0, font_type='normal')
737
+ # self._draw_text(db, f"Detections: C:{counts.get('car',0)} B:{counts.get('bike',0)} P:{counts.get('pedestrian',0)}",
738
+ # (20, base_y + 35), self.fonts['normal'], 1.0)
739
+
740
+ right_x=cam_w-320
741
+ light_prob = data.get('light_prob', 0.0)
742
+ light_color = self.colors['light_red'] if light_prob > 0.5 else self.colors['light_green']
743
+ cv2.circle(db, (right_x, base_y), 12, light_color, -1)
744
+ self._draw_text(db, f"Light: {light_prob:.2f}", (right_x+25, base_y+8), 1.0, font_type='normal')
745
+
746
+ stop_prob = data.get('stop_prob', 0.0)
747
+ stop_color = self.colors['stop_sign'] if stop_prob > 0.5 else self.colors['steer_neutral']
748
+ cv2.circle(db, (right_x, base_y+35), 12, stop_color, -1)
749
+ self._draw_text(db, f"Stop: {stop_prob:.2f}", (right_x+25, base_y+43), 1.0, font_type='normal')
750
+
751
+ def _draw_bev_maps_section(self, db: np.ndarray, data: Dict, info_x: int, start_y: int) -> int:
752
+ """يرسم قسم خرائط BEV (الحالية والمستقبلية)."""
753
+ x = info_x + self.config.panel_margin
754
+ panel_w = (self.config.width - info_x) - 2 * self.config.panel_margin
755
+
756
+ main_bev_h = 350; future_bev_h = 150
757
+
758
+ # الخريطة الرئيسية
759
+ bev_t0 = cv2.resize(data.get('map_t0', np.zeros((1,1,3))), (panel_w, main_bev_h))
760
+ self._draw_text(bev_t0, "BEV t+0.0s", (10, 30), 1.2, color=self.colors['text_header'])
761
+ db[start_y:start_y+main_bev_h, x:x+panel_w] = bev_t0
762
+
763
+ # الخرائط المستقبلية
764
+ future_y = start_y + main_bev_h + 10
765
+ future_bev_w = panel_w // 2
766
+
767
+ bev_t1 = cv2.resize(data.get('map_t1', np.zeros((1,1,3))), (future_bev_w, future_bev_h))
768
+ self._draw_text(bev_t1, "t+1.0s", (10, 20), 0.7, font_type='normal')
769
+
770
+ bev_t2 = cv2.resize(data.get('map_t2', np.zeros((1,1,3))), (future_bev_w, future_bev_h))
771
+ self._draw_text(bev_t2, "t+2.0s", (10, 20), 0.7, font_type='normal')
772
+
773
+ db[future_y:future_y+future_bev_h, x:x+future_bev_w] = bev_t1
774
+ db[future_y:future_y+future_bev_h, x+future_bev_w:x+panel_w] = bev_t2
775
+
776
+ separator_y = future_y + future_bev_h + self.config.section_spacing
777
+ cv2.line(db, (info_x, separator_y), (self.config.width, separator_y), self.colors['separator'], 2)
778
+ return separator_y
779
+
780
+ def _draw_controls_section(self, db: np.ndarray, data: Dict, info_x: int, start_y: int):
781
+ """يرسم قسم عناصر التحكم بالمركبة."""
782
+ x = info_x + self.config.panel_margin
783
+ self._draw_text(db, "VEHICLE CONTROL", (x, start_y+30), 1.2, color=self.colors['text_header'])
784
+
785
+ self._draw_gauge_display(db, data, x, start_y + 80)
786
+ self._draw_control_bars(db, data, x + 180, start_y + 80)
787
+
788
+ def _draw_gauge_display(self, db: np.ndarray, data: Dict, x: int, y: int):
789
+ """يرسم عداد السرعة."""
790
+ radius = 65
791
+ self._draw_speed_gauge(db, x+radius, y+radius, radius, data.get('speed',0), data.get('target_speed',0), 60.0)
792
+
793
+ def _draw_control_bars(self, db: np.ndarray, data: Dict, x: int, y: int):
794
+ """يرسم مؤشرات التوجيه، الوقود، والمكابح."""
795
+ bar_w = self.config.width - x - self.config.panel_margin
796
+
797
+ self._draw_text(db, f"Steer: {data.get('steer', 0.0):.2f}", (x, y), 0.8, font_type='normal')
798
+ self._draw_steer_indicator(db, x, y+15, bar_w, 20, data.get('steer', 0.0))
799
+
800
+ self._draw_text(db, f"Throttle: {data.get('throttle', 0.0):.2f}", (x, y+50), 0.8, font_type='normal')
801
+ self._draw_bar(db, x, y+65, bar_w, 20, data.get('throttle', 0.0), 1.0, self.colors['throttle'])
802
+
803
+ brake_on = data.get('brake', False)
804
+ brake_color = self.colors['brake'] if brake_on else self.colors['text']
805
+ self._draw_text(db, f"Brake: {'ON' if brake_on else 'OFF'}", (x, y+100), 0.8, brake_color, font_type='normal')
806
+ self._draw_bar(db, x, y+115, bar_w, 20, float(brake_on), 1.0, self.colors['brake'])
807
+
808
+ # --------------------------------------------------------------------------
809
+ # الدوال الأساسية للرسم (Primitives)
810
+ # --------------------------------------------------------------------------
811
+
812
+ def _draw_text(self, img, text, pos, size, color=None, font_type='header', thickness=1):
813
+ color = color if color is not None else self.colors['text']
814
+ font = self.fonts[font_type]
815
+ cv2.putText(img, text, (pos[0]+1, pos[1]+1), font, size, (0,0,0), thickness+1, cv2.LINE_AA)
816
+ cv2.putText(img, text, pos, font, size, color, thickness, cv2.LINE_AA)
817
+
818
+ def _draw_bar(self, img, x, y, w, h, val, max_val, color):
819
+ val=np.clip(val,0,max_val); ratio=val/max_val
820
+ cv2.rectangle(img,(x,y),(x+w,y+h),self.colors['steer_neutral'],-1)
821
+ cv2.rectangle(img,(x,y),(x+int(w*ratio),y+h),color,-1)
822
+ cv2.rectangle(img,(x,y),(x+w,y+h),self.colors['text'],1)
823
+
824
+ def _draw_steer_indicator(self, img, x, y, w, h, val):
825
+ center_x=x+w//2; val=np.clip(val,-1.0,1.0)
826
+ cv2.rectangle(img,(x,y),(x+w,y+h),self.colors['steer_neutral'],-1)
827
+ cv2.line(img,(center_x,y),(center_x,y+h),self.colors['text'],1)
828
+ indicator_x=center_x+int(val*(w//2*0.95))
829
+ cv2.line(img,(indicator_x,y),(indicator_x,y+h),self.colors['steer_bar'],6)
830
+
831
+ def _draw_speed_gauge(self, img, cx, cy, r, spd, t_spd, m_spd):
832
+ cv2.circle(img,(cx,cy),r,self.colors['gauge_bg'],-1); cv2.circle(img,(cx,cy),r,self.colors['text'],2)
833
+ ratio=np.clip(spd/m_spd,0,1); angle=math.radians(135+ratio*270)
834
+ ex=cx+int(r*0.85*math.cos(angle)); ey=cy+int(r*0.85*math.sin(angle))
835
+ cv2.line(img,(cx,cy),(ex,ey),self.colors['gauge_needle'],3); cv2.circle(img,(cx,cy),5,self.colors['gauge_needle'],-1)
836
+ (w,h),_=cv2.getTextSize(f"{spd:.1f}",self.fonts['header'],1.5,3)
837
+ self._draw_text(img,f"{spd:.1f}",(cx-w//2,cy+10),1.5); self._draw_text(img,"km/h",(cx-25,cy+35),0.5, font_type='normal')
838
+ self._draw_text(img,f"Target: {t_spd:.1f}",(cx-50,cy-r-10),0.7)
839
+
840
+ def _create_error_display(self, error_msg: str):
841
+ error_display = np.zeros((self.config.height, self.config.width, 3), dtype=np.uint8)
842
+ error_display[:] = self.colors['panel_bg']
843
+ self._draw_text(error_display, "SYSTEM ERROR", (self.config.width//2-200, self.config.height//2-50), 1.5, self.colors['error'])
844
+ self._draw_text(error_display, error_msg, (50, self.config.height//2+20), 0.8, self.colors['steer_bar'], font_type='normal')
845
+ return error_display
846
+
847
+ def _log_error(self, error_msg: str):
848
+ self._error_log.append(f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {error_msg}")
849
+ if len(self._error_log) > 50: self._error_log.pop(0)
850
+