altawil commited on
Commit
57b9764
·
verified ·
1 Parent(s): ca76580

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +537 -535
app.py CHANGED
@@ -1,594 +1,596 @@
1
  # app.py - InterFuser Self-Driving API Server
2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import uuid
4
  import base64
5
  import cv2
6
  import torch
7
  import numpy as np
 
8
  from fastapi import FastAPI, HTTPException
9
  from fastapi.responses import HTMLResponse
10
- from pydantic import BaseModel
11
- from torchvision import transforms
12
- from typing import List, Dict, Any, Optional
13
- import logging
14
 
15
- # استيراد من ملفاتنا المحلية
 
 
 
 
16
  from model_definition import InterfuserModel, load_and_prepare_model, create_model_config
17
- from simulation_modules import (
18
- InterfuserController, ControllerConfig, Tracker, DisplayInterface,
19
- render, render_waypoints, render_self_car, WAYPOINT_SCALE_FACTOR,
20
- T1_FUTURE_TIME, T2_FUTURE_TIME
21
- )
22
 
23
- # إعداد التسجيل
24
- logging.basicConfig(level=logging.INFO)
25
- logger = logging.getLogger(__name__)
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- # ================== إعدادات عامة وتحميل النموذج ==================
28
  app = FastAPI(
29
  title="Baseer Self-Driving API",
30
- description="API للقيادة الذاتية باستخدام نموذج InterFuser",
31
- version="1.0.0"
32
  )
33
 
34
- device = torch.device("cpu")
35
- logger.info(f"Using device: {device}")
36
-
37
- # تحميل النموذج باستخدام الدالة المحسنة
38
- try:
39
- # إنشاء إعدادات النموذج باستخدام الإعدادات الصحيحة من التدريب
40
- model_config = create_model_config(
41
- model_path="model/best_model.pth"
42
- # الإعدادات الصحيحة من التدريب ستطبق تلقائياً:
43
- # embed_dim=256, rgb_backbone_name='r50', waypoints_pred_head='gru'
44
- # with_lidar=False, with_right_left_sensors=False, with_center_sensor=False
45
- )
46
-
47
- # تحميل النموذج مع الأوزان
48
- model = load_and_prepare_model(model_config, device)
49
- logger.info("✅ تم تحميل النموذج بنجاح")
50
-
51
- except Exception as e:
52
- logger.error(f"❌ خطأ في تحميل النموذج: {e}")
53
- logger.info("🔄 محاولة إنشاء نموذج بأوزان عشوائية...")
54
- try:
55
- model = InterfuserModel()
56
- model.to(device)
57
- model.eval()
58
- logger.warning("⚠️ تم إنشاء النموذج بأوزان عشوائية")
59
- except Exception as e2:
60
- logger.error(f"❌ فشل في إنشاء النموذج: {e2}")
61
- model = None
62
-
63
- # تهيئة واجهة العرض
64
- display = DisplayInterface()
65
-
66
- # قاموس لتخزين جلسات المستخدمين
67
- SESSIONS: Dict[str, Dict] = {}
68
 
69
- # ================== هياكل بيانات Pydantic ==================
 
 
70
  class Measurements(BaseModel):
71
- pos: List[float] = [0.0, 0.0] # [x, y] position
72
- theta: float = 0.0 # orientation angle
73
- speed: float = 0.0 # current speed
74
- steer: float = 0.0 # current steering
75
- throttle: float = 0.0 # current throttle
76
- brake: bool = False # brake status
77
- command: int = 4 # driving command (4 = FollowLane)
78
- target_point: List[float] = [0.0, 0.0] # target point [x, y]
79
-
80
- class ModelOutputs(BaseModel):
81
- traffic: List[List[List[float]]] # 20x20x7 grid
82
- waypoints: List[List[float]] # Nx2 waypoints
83
- is_junction: float
84
- traffic_light_state: float
85
- stop_sign: float
86
 
87
  class ControlCommands(BaseModel):
88
  steer: float
89
  throttle: float
90
  brake: bool
91
 
92
- class RunStepInput(BaseModel):
93
- session_id: str
94
- image_b64: str
95
- measurements: Measurements
96
 
97
- class RunStepOutput(BaseModel):
98
- model_outputs: ModelOutputs
99
  control_commands: ControlCommands
100
- dashboard_image_b64: str
 
 
 
101
 
102
- class SessionResponse(BaseModel):
103
- session_id: str
104
- message: str
 
 
 
 
 
 
 
 
 
 
 
105
 
106
- # ================== دوال المساعدة ==================
107
- def get_image_transform():
108
- """إنشاء تحويلات الصورة كما في PDMDataset"""
109
- return transforms.Compose([
 
110
  transforms.ToTensor(),
111
  transforms.Resize((224, 224), antialias=True),
112
  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
113
  ])
 
 
114
 
115
- # إنشاء كائن التحويل مرة واحدة
116
- image_transform = get_image_transform()
117
-
118
- def preprocess_input(frame_rgb: np.ndarray, measurements: Measurements, device: torch.device) -> Dict[str, torch.Tensor]:
119
- """
120
- تحاكي ما يفعله PDMDataset.__getitem__ لإنشاء دفعة (batch) واحدة.
121
- """
122
- # 1. معالجة الصورة الرئيسية
123
- from PIL import Image
124
- if isinstance(frame_rgb, np.ndarray):
125
- frame_rgb = Image.fromarray(frame_rgb)
126
 
127
- image_tensor = image_transform(frame_rgb).unsqueeze(0).to(device) # إضافة بُعد الدفعة
128
 
129
- # 2. إنشاء مدخلات الكاميرات الأخرى عن طريق الاستنساخ
130
- batch = {
131
  'rgb': image_tensor,
132
- 'rgb_left': image_tensor.clone(),
133
- 'rgb_right': image_tensor.clone(),
134
- 'rgb_center': image_tensor.clone(),
 
135
  }
136
 
137
- # 3. إنشاء مدخل ليدار وهمي (أصفار)
138
- batch['lidar'] = torch.zeros(1, 3, 224, 224, dtype=torch.float32).to(device)
139
-
140
- # 4. تجميع القياسات بنفس ترتيب PDMDataset
141
- m = measurements
142
- measurements_tensor = torch.tensor([[
143
- m.pos[0], m.pos[1], m.theta,
144
- m.steer, m.throttle, float(m.brake),
145
- m.speed, float(m.command)
146
- ]], dtype=torch.float32).to(device)
147
- batch['measurements'] = measurements_tensor
148
-
149
- # 5. إنشاء نقطة هدف
150
- batch['target_point'] = torch.tensor([m.target_point], dtype=torch.float32).to(device)
151
-
152
- # لا نحتاج إلى قيم ground truth (gt_*) أثناء التنبؤ
153
- return batch
154
-
155
- def decode_base64_image(image_b64: str) -> np.ndarray:
156
- """
157
- فك تشفير صورة Base64
158
- """
159
- try:
160
- image_bytes = base64.b64decode(image_b64)
161
- nparr = np.frombuffer(image_bytes, np.uint8)
162
- image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
163
- return image
164
- except Exception as e:
165
- raise HTTPException(status_code=400, detail=f"Invalid image format: {str(e)}")
166
-
167
- def encode_image_to_base64(image: np.ndarray) -> str:
168
- """
169
- تشفير صورة إلى Base64
170
- """
171
- _, buffer = cv2.imencode('.jpg', image, [cv2.IMWRITE_JPEG_QUALITY, 85])
172
- return base64.b64encode(buffer).decode('utf-8')
173
 
174
- # ================== نقاط نهاية الـ API ==================
175
- @app.get("/", response_class=HTMLResponse)
 
 
176
  async def root():
177
- """
178
- الصفحة الرئيسية للـ API
179
- """
180
- html_content = f"""
181
- <!DOCTYPE html>
182
- <html dir="rtl" lang="ar">
183
- <head>
184
- <meta charset="UTF-8">
185
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
186
- <title>🚗 Baseer Self-Driving API</title>
187
- <style>
188
- * {{
189
- margin: 0;
190
- padding: 0;
191
- box-sizing: border-box;
192
- }}
193
- body {{
194
- font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
195
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
196
- min-height: 100vh;
197
- display: flex;
198
- align-items: center;
199
- justify-content: center;
200
- padding: 20px;
201
- }}
202
- .container {{
203
- background: rgba(255, 255, 255, 0.95);
204
- backdrop-filter: blur(10px);
205
- border-radius: 20px;
206
- padding: 40px;
207
- box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1);
208
- text-align: center;
209
- max-width: 600px;
210
- width: 100%;
211
- }}
212
- .logo {{
213
- font-size: 4rem;
214
- margin-bottom: 20px;
215
- animation: bounce 2s infinite;
216
- }}
217
- @keyframes bounce {{
218
- 0%, 20%, 50%, 80%, 100% {{ transform: translateY(0); }}
219
- 40% {{ transform: translateY(-10px); }}
220
- 60% {{ transform: translateY(-5px); }}
221
- }}
222
- h1 {{
223
- color: #333;
224
- margin-bottom: 10px;
225
- font-size: 2.5rem;
226
- }}
227
- .subtitle {{
228
- color: #666;
229
- margin-bottom: 30px;
230
- font-size: 1.2rem;
231
- }}
232
- .status {{
233
- display: inline-block;
234
- background: #4CAF50;
235
- color: white;
236
- padding: 8px 16px;
237
- border-radius: 20px;
238
- margin: 10px 0;
239
- font-weight: bold;
240
- }}
241
- .stats {{
242
- display: grid;
243
- grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
244
- gap: 20px;
245
- margin: 30px 0;
246
- }}
247
- .stat-card {{
248
- background: #f8f9fa;
249
- padding: 20px;
250
- border-radius: 15px;
251
- border-left: 4px solid #667eea;
252
- }}
253
- .stat-number {{
254
- font-size: 2rem;
255
- font-weight: bold;
256
- color: #667eea;
257
- }}
258
- .stat-label {{
259
- color: #666;
260
- margin-top: 5px;
261
- }}
262
- .buttons {{
263
- display: flex;
264
- gap: 15px;
265
- justify-content: center;
266
- flex-wrap: wrap;
267
- margin-top: 30px;
268
- }}
269
- .btn {{
270
- display: inline-block;
271
- padding: 12px 24px;
272
- border-radius: 25px;
273
- text-decoration: none;
274
- font-weight: bold;
275
- transition: all 0.3s ease;
276
- border: none;
277
- cursor: pointer;
278
- }}
279
- .btn-primary {{
280
- background: #667eea;
281
- color: white;
282
- }}
283
- .btn-secondary {{
284
- background: #6c757d;
285
- color: white;
286
- }}
287
- .btn:hover {{
288
- transform: translateY(-2px);
289
- box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2);
290
- }}
291
- .features {{
292
- text-align: right;
293
- margin-top: 30px;
294
- padding: 20px;
295
- background: #f8f9fa;
296
- border-radius: 15px;
297
- }}
298
- .features h3 {{
299
- color: #333;
300
- margin-bottom: 15px;
301
- }}
302
- .features ul {{
303
- list-style: none;
304
- padding: 0;
305
- }}
306
- .features li {{
307
- padding: 5px 0;
308
- color: #666;
309
- }}
310
- .features li:before {{
311
- content: "✅ ";
312
- margin-left: 10px;
313
- }}
314
- </style>
315
- </head>
316
- <body>
317
- <div class="container">
318
- <div class="logo">🚗</div>
319
- <h1>Baseer Self-Driving API</h1>
320
- <p class="subtitle">نظام القيادة الذاتية المتقدم</p>
321
-
322
- <div class="status">🟢 يعمل بنجاح</div>
323
-
324
- <div class="stats">
325
- <div class="stat-card">
326
- <div class="stat-number">{len(SESSIONS)}</div>
327
- <div class="stat-label">الجلسات النشطة</div>
328
- </div>
329
- <div class="stat-card">
330
- <div class="stat-number">v1.0</div>
331
- <div class="stat-label">الإصدار</div>
332
- </div>
333
- <div class="stat-card">
334
- <div class="stat-number">FastAPI</div>
335
- <div class="stat-label">التقنية</div>
336
- </div>
337
- </div>
338
-
339
- <div class="buttons">
340
- <a href="/docs" class="btn btn-primary">📚 توثيق API</a>
341
- <a href="/sessions" class="btn btn-secondary">📊 الجلسات</a>
342
- </div>
343
-
344
- <div class="features">
345
- <h3>🌟 الميزات الرئيسية</h3>
346
- <ul>
347
- <li>نموذج InterFuser للقيادة الذاتية</li>
348
- <li>معالجة الصور في الوقت الفعلي</li>
349
- <li>اكتشاف الكائنات المرورية</li>
350
- <li>تحديد المسارات الذكية</li>
351
- <li>واجهة RESTful سهلة الاستخدام</li>
352
- <li>إدارة جلسات متعددة</li>
353
- </ul>
354
- </div>
355
- </div>
356
- </body>
357
  </html>
358
  """
359
- return html_content
360
 
361
- @app.post("/start_session", response_model=SessionResponse)
362
- async def start_session():
363
- """
364
- بدء جلسة جديدة للمحاكاة
365
- """
366
  session_id = str(uuid.uuid4())
 
 
 
367
 
368
- # إنشاء جلسة جديدة
369
  SESSIONS[session_id] = {
370
- 'tracker': Tracker(frequency=10),
371
- 'controller': InterfuserController(ControllerConfig()),
372
- 'frame_num': 0,
373
- 'created_at': np.datetime64('now'),
374
- 'last_activity': np.datetime64('now')
375
  }
376
-
377
- logger.info(f"New session created: {session_id}")
378
-
379
- return SessionResponse(
380
- session_id=session_id,
381
- message="Session started successfully"
382
- )
383
 
384
- @app.post("/run_step", response_model=RunStepOutput)
385
- async def run_step(data: RunStepInput):
386
- """
387
- تنفيذ خطوة محاكاة كاملة
388
- """
389
- # التحقق من وجود الجلسة
390
- if data.session_id not in SESSIONS:
391
- raise HTTPException(status_code=404, detail="Session not found")
392
-
393
- session = SESSIONS[data.session_id]
394
- tracker = session['tracker']
395
- controller = session['controller']
396
-
397
- # تحديث وقت النشاط
398
- session['last_activity'] = np.datetime64('now')
399
 
400
- try:
401
- # 1. فك تشفير الصورة
402
- frame_bgr = decode_base64_image(data.image_b64)
403
- frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
404
-
405
- # 2. معالجة المدخلات
406
- inputs = preprocess_input(frame_rgb, data.measurements, device)
407
-
408
- # 3. تشغيل النموذج
409
- if model is None:
410
- raise HTTPException(status_code=500, detail="Model not loaded")
411
-
412
- with torch.no_grad():
413
- traffic, waypoints, is_junction, traffic_light, stop_sign, _ = model(inputs)
414
-
415
- # 4. معالجة مخرجات النموذج
416
- traffic_np = traffic.cpu().numpy()[0] # أخذ أول عنصر من الـ batch
417
- waypoints_np = waypoints.cpu().numpy()[0]
418
- is_junction_prob = torch.sigmoid(is_junction)[0, 1].item()
419
- traffic_light_prob = torch.sigmoid(traffic_light)[0, 0].item()
420
- stop_sign_prob = torch.sigmoid(stop_sign)[0, 1].item()
421
-
422
- # 5. تحديث التتبع
423
- # تحويل traffic grid إلى detections للتتبع
424
- detections = []
425
- h, w, c = traffic_np.shape
426
- for y in range(h):
427
- for x in range(w):
428
- for ch in range(c):
429
- if traffic_np[y, x, ch] > 0.2: # عتبة الكشف
430
- world_x = (x / w - 0.5) * 64 # تحويل إلى إحداثيات العالم
431
- world_y = (y / h - 0.5) * 64
432
- detections.append({
433
- 'position': [world_x, world_y],
434
- 'feature': traffic_np[y, x, ch]
435
- })
436
-
437
- updated_traffic = tracker.update_and_predict(detections, session['frame_num'])
438
-
439
- # 6. تشغيل المتحكم
440
- steer, throttle, brake, metadata = controller.run_step(
441
- current_speed=data.measurements.speed,
442
- waypoints=waypoints_np,
443
- junction=is_junction_prob,
444
- traffic_light_state=traffic_light_prob,
445
- stop_sign=stop_sign_prob,
446
- meta_data={'frame': session['frame_num']}
447
- )
448
-
449
- # 7. إنشاء خرائط العرض
450
- surround_t0, counts_t0 = render(updated_traffic, t=0)
451
- surround_t1, counts_t1 = render(updated_traffic, t=T1_FUTURE_TIME)
452
- surround_t2, counts_t2 = render(updated_traffic, t=T2_FUTURE_TIME)
453
-
454
- # إضافة المسار المقترح
455
- wp_map = render_waypoints(waypoints_np)
456
- map_t0 = cv2.add(surround_t0, wp_map)
457
-
458
- # إضافة السيارة الذاتية
459
- map_t0 = render_self_car(map_t0)
460
- map_t1 = render_self_car(surround_t1)
461
- map_t2 = render_self_car(surround_t2)
462
-
463
- # 8. إنشاء لوحة العرض النهائية
464
- interface_data = {
465
- 'camera_view': frame_bgr,
466
- 'map_t0': map_t0,
467
- 'map_t1': map_t1,
468
- 'map_t2': map_t2,
469
- 'text_info': {
470
- 'Frame': f"Frame: {session['frame_num']}",
471
- 'Control': f"Steer: {steer:.2f}, Throttle: {throttle:.2f}, Brake: {brake}",
472
- 'Speed': f"Speed: {data.measurements.speed:.1f} km/h",
473
- 'Junction': f"Junction: {is_junction_prob:.2f}",
474
- 'Traffic Light': f"Red Light: {traffic_light_prob:.2f}",
475
- 'Stop Sign': f"Stop Sign: {stop_sign_prob:.2f}",
476
- 'Metadata': metadata
477
- },
478
- 'object_counts': {
479
- 't0': counts_t0,
480
- 't1': counts_t1,
481
- 't2': counts_t2
482
- }
483
- }
484
-
485
- dashboard_image = display.run_interface(interface_data)
486
- dashboard_b64 = encode_image_to_base64(dashboard_image)
487
-
488
- # 9. تجميع المخرجات النهائية
489
- response = RunStepOutput(
490
- model_outputs=ModelOutputs(
491
- traffic=traffic_np.tolist(),
492
- waypoints=waypoints_np.tolist(),
493
- is_junction=is_junction_prob,
494
- traffic_light_state=traffic_light_prob,
495
- stop_sign=stop_sign_prob
496
- ),
497
- control_commands=ControlCommands(
498
- steer=float(steer),
499
- throttle=float(throttle),
500
- brake=bool(brake)
501
- ),
502
- dashboard_image_b64=dashboard_b64
503
- )
504
-
505
- # تحديث رقم الإطار
506
- session['frame_num'] += 1
507
-
508
- logger.info(f"Step completed for session {data.session_id}, frame {session['frame_num']}")
509
-
510
- return response
511
-
512
- except Exception as e:
513
- logger.error(f"Error in run_step: {str(e)}")
514
- raise HTTPException(status_code=500, detail=f"Processing error: {str(e)}")
515
-
516
- @app.post("/end_session", response_model=SessionResponse)
517
- async def end_session(session_id: str):
518
- """
519
- إنهاء جلسة المحاكاة
520
- """
521
- if session_id not in SESSIONS:
522
- raise HTTPException(status_code=404, detail="Session not found")
523
-
524
- # حذف الجلسة
525
- del SESSIONS[session_id]
526
 
527
- logger.info(f"Session ended: {session_id}")
 
 
 
528
 
529
- return SessionResponse(
530
- session_id=session_id,
531
- message="Session ended successfully"
 
 
532
  )
533
 
534
- @app.get("/sessions")
535
- async def list_sessions():
536
- """
537
- عرض قائمة الجلسات النشطة
538
- """
539
- active_sessions = []
540
- current_time = np.datetime64('now')
541
-
542
- for session_id, session_data in SESSIONS.items():
543
- time_diff = current_time - session_data['last_activity']
544
- active_sessions.append({
545
- 'session_id': session_id,
546
- 'frame_count': session_data['frame_num'],
547
- 'created_at': str(session_data['created_at']),
548
- 'last_activity': str(session_data['last_activity']),
549
- 'inactive_minutes': float(time_diff / np.timedelta64(1, 'm'))
550
- })
551
-
552
- return {
553
- 'total_sessions': len(active_sessions),
554
- 'sessions': active_sessions
555
  }
 
556
 
557
- @app.delete("/sessions/cleanup")
558
- async def cleanup_inactive_sessions(max_inactive_minutes: int = 30):
559
- """
560
- تنظيف الجلسات غير النشطة
561
- """
562
- current_time = np.datetime64('now')
563
- cleaned_sessions = []
564
-
565
- for session_id in list(SESSIONS.keys()):
566
- session = SESSIONS[session_id]
567
- time_diff = current_time - session['last_activity']
568
- inactive_minutes = float(time_diff / np.timedelta64(1, 'm'))
569
-
570
- if inactive_minutes > max_inactive_minutes:
571
- del SESSIONS[session_id]
572
- cleaned_sessions.append(session_id)
573
 
574
- logger.info(f"Cleaned up {len(cleaned_sessions)} inactive sessions")
575
-
576
- return {
577
- 'message': f"Cleaned up {len(cleaned_sessions)} inactive sessions",
578
- 'cleaned_sessions': cleaned_sessions,
579
- 'remaining_sessions': len(SESSIONS)
580
- }
581
-
582
- # ================== معالج الأخطاء ==================
583
- @app.exception_handler(Exception)
584
- async def global_exception_handler(request, exc):
585
- logger.error(f"Global exception: {str(exc)}")
586
- return {
587
- "error": "Internal server error",
588
- "detail": str(exc)
589
- }
590
 
 
 
 
 
 
 
 
591
  # ================== تشغيل الخادم ==================
592
- if __name__ == "__main__":
593
- import uvicorn
594
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
  # app.py - InterFuser Self-Driving API Server
2
 
3
+ import uuid
4
+ import base64
5
+ import cv2
6
+ # import torch
7
+ # import numpy as np
8
+ # from fastapi import FastAPI, HTTPException
9
+ # from fastapi.responses import HTMLResponse
10
+ # from pydantic import BaseModel
11
+ # from torchvision import transforms
12
+ # from typing import List, Dict, Any, Optional
13
+ # import logging
14
+
15
+ # # استيراد من ملفاتنا المحلية
16
+ # from model_definition import InterfuserModel, load_and_prepare_model, create_model_config
17
+ # from simulation_modules import (
18
+ # InterfuserController, ControllerConfig, Tracker, DisplayInterface,
19
+ # render, render_waypoints, render_self_car, WAYPOINT_SCALE_FACTOR,
20
+ # T1_FUTURE_TIME, T2_FUTURE_TIME
21
+ # )
22
+
23
+ # # إعداد التسجيل
24
+ # logging.basicConfig(level=logging.INFO)
25
+ # logger = logging.getLogger(__name__)
26
+
27
+ # # ================== إعدادات عامة وتحميل النموذج ==================
28
+ # app = FastAPI(
29
+ # title="Baseer Self-Driving API",
30
+ # description="API للقيادة الذاتية باستخدام نموذج InterFuser",
31
+ # version="1.0.0"
32
+ # )
33
+
34
+ # device = torch.device("cpu")
35
+ # logger.info(f"Using device: {device}")
36
+
37
+ # # تحميل النموذج باستخدام الدالة المحسنة
38
+ # try:
39
+ # # إنشاء إعدادات النموذج باستخدام الإعدادات الصحيحة من التدريب
40
+ # model_config = create_model_config(
41
+ # model_path="model/best_model.pth"
42
+ # # الإعدادات الصحيحة من التدريب ستطبق تلقائياً:
43
+ # # embed_dim=256, rgb_backbone_name='r50', waypoints_pred_head='gru'
44
+ # # with_lidar=False, with_right_left_sensors=False, with_center_sensor=False
45
+ # )
46
+
47
+ # # تحميل النموذج مع الأوزان
48
+ # model = load_and_prepare_model(model_config, device)
49
+ # logger.info("✅ تم تحميل النموذج بنجاح")
50
+
51
+ # except Exception as e:
52
+ # logger.error(f"❌ خطأ في تحميل النموذج: {e}")
53
+ # logger.info("🔄 محاولة إنشاء نموذج بأوزان عشوائية...")
54
+ # try:
55
+ # model = InterfuserModel()
56
+ # model.to(device)
57
+ # model.eval()
58
+ # logger.warning("⚠️ تم إنشاء النموذج بأوزان عشوائية")
59
+ # except Exception as e2:
60
+ # logger.error(f"❌ فشل في إنشاء النموذج: {e2}")
61
+ # model = None
62
+
63
+ # # تهيئة واجهة العرض
64
+ # display = DisplayInterface()
65
+
66
+ # # قاموس لتخزين جلسات المستخدمين
67
+ # SESSIONS: Dict[str, Dict] = {}
68
+
69
+ # # ================== هياكل بيانات Pydantic ==================
70
+ # class Measurements(BaseModel):
71
+ # pos: List[float] = [0.0, 0.0] # [x, y] position
72
+ # theta: float = 0.0 # orientation angle
73
+ # speed: float = 0.0 # current speed
74
+ # steer: float = 0.0 # current steering
75
+ # throttle: float = 0.0 # current throttle
76
+ # brake: bool = False # brake status
77
+ # command: int = 4 # driving command (4 = FollowLane)
78
+ # target_point: List[float] = [0.0, 0.0] # target point [x, y]
79
+
80
+ # class ModelOutputs(BaseModel):
81
+ # traffic: List[List[List[float]]] # 20x20x7 grid
82
+ # waypoints: List[List[float]] # Nx2 waypoints
83
+ # is_junction: float
84
+ # traffic_light_state: float
85
+ # stop_sign: float
86
+
87
+ # class ControlCommands(BaseModel):
88
+ # steer: float
89
+ # throttle: float
90
+ # brake: bool
91
+
92
+ # class RunStepInput(BaseModel):
93
+ # session_id: str
94
+ # image_b64: str
95
+ # measurements: Measurements
96
+
97
+ # class RunStepOutput(BaseModel):
98
+ # model_outputs: ModelOutputs
99
+ # control_commands: ControlCommands
100
+ # dashboard_image_b64: str
101
+
102
+ # class SessionResponse(BaseModel):
103
+ # session_id: str
104
+ # message: str
105
+
106
+ # # ================== دوال المساعدة ==================
107
+ # def get_image_transform():
108
+ # """إنشاء تحويلات الصورة كما في PDMDataset"""
109
+ # return transforms.Compose([
110
+ # transforms.ToTensor(),
111
+ # transforms.Resize((224, 224), antialias=True),
112
+ # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
113
+ # ])
114
+
115
+ # # إنشاء كائن التحويل مرة واحدة
116
+ # image_transform = get_image_transform()
117
+
118
+ # def preprocess_input(frame_rgb: np.ndarray, measurements: Measurements, device: torch.device) -> Dict[str, torch.Tensor]:
119
+ # """
120
+ # تحاكي ما يفعله PDMDataset.__getitem__ لإنشاء دفعة (batch) واحدة.
121
+ # """
122
+ # # 1. معالجة الصورة الرئيسية
123
+ # from PIL import Image
124
+ # if isinstance(frame_rgb, np.ndarray):
125
+ # frame_rgb = Image.fromarray(frame_rgb)
126
+
127
+ # image_tensor = image_transform(frame_rgb).unsqueeze(0).to(device) # إضافة بُعد الدفعة
128
+
129
+ # # 2. إنشاء مدخلات الكاميرات الأخرى عن طريق الاستنساخ
130
+ # batch = {
131
+ # 'rgb': image_tensor,
132
+ # 'rgb_left': image_tensor.clone(),
133
+ # 'rgb_right': image_tensor.clone(),
134
+ # 'rgb_center': image_tensor.clone(),
135
+ # }
136
+
137
+ # # 3. إنشاء مدخل ليدار وهمي (أصفار)
138
+ # batch['lidar'] = torch.zeros(1, 3, 224, 224, dtype=torch.float32).to(device)
139
+
140
+ # # 4. تجميع القياسات بنفس ترتيب PDMDataset
141
+ # m = measurements
142
+ # measurements_tensor = torch.tensor([[
143
+ # m.pos[0], m.pos[1], m.theta,
144
+ # m.steer, m.throttle, float(m.brake),
145
+ # m.speed, float(m.command)
146
+ # ]], dtype=torch.float32).to(device)
147
+ # batch['measurements'] = measurements_tensor
148
+
149
+ # # 5. إنشاء نقطة هدف
150
+ # batch['target_point'] = torch.tensor([m.target_point], dtype=torch.float32).to(device)
151
+
152
+ # # لا نحتاج إلى قيم ground truth (gt_*) أثناء التنبؤ
153
+ # return batch
154
+
155
+ # def decode_base64_image(image_b64: str) -> np.ndarray:
156
+ # """
157
+ # فك تشفير صورة Base64
158
+ # """
159
+ # try:
160
+ # image_bytes = base64.b64decode(image_b64)
161
+ # nparr = np.frombuffer(image_bytes, np.uint8)
162
+ # image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
163
+ # return image
164
+ # except Exception as e:
165
+ # raise HTTPException(status_code=400, detail=f"Invalid image format: {str(e)}")
166
+
167
+ # def encode_image_to_base64(image: np.ndarray) -> str:
168
+ # """
169
+ # تشفير صورة إلى Base64
170
+ # """
171
+ # _, buffer = cv2.imencode('.jpg', image, [cv2.IMWRITE_JPEG_QUALITY, 85])
172
+ # return base64.b64encode(buffer).decode('utf-8')
173
+
174
+ # # ================== نقاط نهاية الـ API ==================
175
+ # @app.get("/", response_class=HTMLResponse)
176
+ # async def root():
177
+ # """
178
+ # الصفحة الرئيسية للـ API
179
+ # """
180
+ # html_content = f"""
181
+ # <!DOCTYPE html>
182
+ # <html dir="rtl" lang="ar">
183
+ # <head>
184
+ # <meta charset="UTF-8">
185
+ # <meta name="viewport" content="width=device-width, initial-scale=1.0">
186
+ # <title>🚗 Baseer Self-Driving API</title>
187
+ # <style>
188
+ # * {{
189
+ # margin: 0;
190
+ # padding: 0;
191
+ # box-sizing: border-box;
192
+ # }}
193
+ # body {{
194
+ # font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
195
+ # background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
196
+ # min-height: 100vh;
197
+ # display: flex;
198
+ # align-items: center;
199
+ # justify-content: center;
200
+ # padding: 20px;
201
+ # }}
202
+ # .container {{
203
+ # background: rgba(255, 255, 255, 0.95);
204
+ # backdrop-filter: blur(10px);
205
+ # border-radius: 20px;
206
+ # padding: 40px;
207
+ # box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1);
208
+ # text-align: center;
209
+ # max-width: 600px;
210
+ # width: 100%;
211
+ # }}
212
+ # .logo {{
213
+ # font-size: 4rem;
214
+ # margin-bottom: 20px;
215
+ # animation: bounce 2s infinite;
216
+ # }}
217
+ # @keyframes bounce {{
218
+ # 0%, 20%, 50%, 80%, 100% {{ transform: translateY(0); }}
219
+ # 40% {{ transform: translateY(-10px); }}
220
+ # 60% {{ transform: translateY(-5px); }}
221
+ # }}
222
+ # h1 {{
223
+ # color: #333;
224
+ # margin-bottom: 10px;
225
+ # font-size: 2.5rem;
226
+ # }}
227
+ # .subtitle {{
228
+ # color: #666;
229
+ # margin-bottom: 30px;
230
+ # font-size: 1.2rem;
231
+ # }}
232
+ # .status {{
233
+ # display: inline-block;
234
+ # background: #4CAF50;
235
+ # color: white;
236
+ # padding: 8px 16px;
237
+ # border-radius: 20px;
238
+ # margin: 10px 0;
239
+ # font-weight: bold;
240
+ # }}
241
+ # .stats {{
242
+ # display: grid;
243
+ # grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
244
+ # gap: 20px;
245
+ # margin: 30px 0;
246
+ # }}
247
+ # .stat-card {{
248
+ # background: #f8f9fa;
249
+ # padding: 20px;
250
+ # border-radius: 15px;
251
+ # border-left: 4px solid #667eea;
252
+ # }}
253
+ # .stat-number {{
254
+ # font-size: 2rem;
255
+ # font-weight: bold;
256
+ # color: #667eea;
257
+ # }}
258
+ # .stat-label {{
259
+ # color: #666;
260
+ # margin-top: 5px;
261
+ # }}
262
+ # .buttons {{
263
+ # display: flex;
264
+ # gap: 15px;
265
+ # justify-content: center;
266
+ # flex-wrap: wrap;
267
+ # margin-top: 30px;
268
+ # }}
269
+ # .btn {{
270
+ # display: inline-block;
271
+ # padding: 12px 24px;
272
+ # border-radius: 25px;
273
+ # text-decoration: none;
274
+ # font-weight: bold;
275
+ # transition: all 0.3s ease;
276
+ # border: none;
277
+ # cursor: pointer;
278
+ # }}
279
+ # .btn-primary {{
280
+ # background: #667eea;
281
+ # color: white;
282
+ # }}
283
+ # .btn-secondary {{
284
+ # background: #6c757d;
285
+ # color: white;
286
+ # }}
287
+ # .btn:hover {{
288
+ # transform: translateY(-2px);
289
+ # box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2);
290
+ # }}
291
+ # .features {{
292
+ # text-align: right;
293
+ # margin-top: 30px;
294
+ # padding: 20px;
295
+ # background: #f8f9fa;
296
+ # border-radius: 15px;
297
+ # }}
298
+ # .features h3 {{
299
+ # color: #333;
300
+ # margin-bottom: 15px;
301
+ # }}
302
+ # .features ul {{
303
+ # list-style: none;
304
+ # padding: 0;
305
+ # }}
306
+ # .features li {{
307
+ # padding: 5px 0;
308
+ # color: #666;
309
+ # }}
310
+ # .features li:before {{
311
+ # content: "✅ ";
312
+ # margin-left: 10px;
313
+ # }}
314
+ # </style>
315
+ # </head>
316
+ # <body>
317
+ # <div class="container">
318
+ # <div class="logo">🚗</div>
319
+ # <h1>Baseer Self-Driving API</h1>
320
+ # <p class="subtitle">نظام القيادة الذاتية المتقدم</p>
321
+
322
+ # <div class="status">🟢 يعمل بنجاح</div>
323
+
324
+ # <div class="stats">
325
+ # <div class="stat-card">
326
+ # <div class="stat-number">{len(SESSIONS)}</div>
327
+ # <div class="stat-label">الجلسات النشطة</div>
328
+ # </div>
329
+ # <div class="stat-card">
330
+ # <div class="stat-number">v1.0</div>
331
+ # <div class="stat-label">الإصدار</div>
332
+ # </div>
333
+ # <div class="stat-card">
334
+ # <div class="stat-number">FastAPI</div>
335
+ # <div class="stat-label">التقنية</div>
336
+ # </div>
337
+ # </div>
338
+
339
+ # <div class="buttons">
340
+ # <a href="/docs" class="btn btn-primary">📚 توثيق API</a>
341
+ # <a href="/sessions" class="btn btn-secondary">📊 الجلسات</a>
342
+ # </div>
343
+
344
+ # <div class="features">
345
+ # <h3>🌟 الميزات الرئيسية</h3>
346
+ # <ul>
347
+ # <li>نموذج InterFuser للقيادة الذاتية</li>
348
+ # <li>معالجة الصور في الوقت الفعلي</li>
349
+ # <li>اكتشاف الكائنات المرورية</li>
350
+ # <li>تحديد المسارات الذكية</li>
351
+ # <li>واجهة RESTful سهلة الاستخدام</li>
352
+ # <li>إدارة جلسات متعددة</li>
353
+ # </ul>
354
+ # </div>
355
+ # </div>
356
+ # </body>
357
+ # </html>
358
+ # """
359
+ # return html_content
360
+
361
  import uuid
362
  import base64
363
  import cv2
364
  import torch
365
  import numpy as np
366
+ import logging
367
  from fastapi import FastAPI, HTTPException
368
  from fastapi.responses import HTMLResponse
369
+ from pydantic import BaseModel, Field
370
+ from typing import List, Dict, Tuple
 
 
371
 
372
+ # ==============================================================================
373
+ # 1. استيراد كل مكونات المشروع التي قمنا بتطويرها
374
+ # (تأكد من أن هذه الملفات موجودة في نفس المجلد)
375
+ # ==============================================================================
376
+ # من ملف النموذج (يحتوي على كلاس Interfuser والدوال المساعدة)
377
  from model_definition import InterfuserModel, load_and_prepare_model, create_model_config
 
 
 
 
 
378
 
379
+ # من ملفات التحكم والعرض
380
+ from simulation_modules import InterfuserController, Tracker
381
+ from simulation_modules import DisplayInterface, render_bev, unnormalize_image, DisplayConfig
382
+ # # استيراد من ملفاتنا المحلية
383
+ # from model_definition import InterfuserModel, load_and_prepare_model, create_model_config
384
+ # from simulation_modules import (
385
+ # InterfuserController, ControllerConfig, Tracker, DisplayInterface,
386
+ # render, render_waypoints, render_self_car, WAYPOINT_SCALE_FACTOR,
387
+ # T1_FUTURE_TIME, T2_FUTURE_TIME
388
+ # )
389
+ # ==============================================================================
390
+ # 2. إعدادات عامة وتطبيق FastAPI
391
+ # ==============================================================================
392
+ # إعداد التسجيل (Logging)
393
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
394
 
395
+ # تهيئة تطبيق FastAPI
396
  app = FastAPI(
397
  title="Baseer Self-Driving API",
398
+ description="An advanced API for the InterFuser self-driving model, providing real-time control commands and scene analysis.",
399
+ version="1.1.0"
400
  )
401
 
402
+ # متغيرات عامة سيتم تهيئتها عند بدء التشغيل
403
+ MODEL: Interfuser = None
404
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
405
+ SESSIONS: Dict[str, Dict] = {} # قاموس لتخزين حالة الجلسات النشطة
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
406
 
407
+ # ==============================================================================
408
+ # 3. تعريف نماذج البيانات (Pydantic Models) للـ API
409
+ # ==============================================================================
410
  class Measurements(BaseModel):
411
+ pos_global: Tuple[float, float] = Field(..., example=(0.0, 0.0), description="Global [X, Y] position of the vehicle.")
412
+ theta: float = Field(..., example=0.0, description="Global orientation angle of the vehicle in radians.")
413
+ speed: float = Field(..., example=0.0, description="Current speed in m/s.")
414
+ target_point: Tuple[float, float] = Field(..., example=(10.0, 0.0), description="Target point relative to the vehicle.")
415
+
416
+ class RunStepRequest(BaseModel):
417
+ session_id: str
418
+ image_b64: str = Field(..., description="Base64 encoded string of the vehicle's front camera view (BGR format).")
419
+ measurements: Measurements
 
 
 
 
 
 
420
 
421
  class ControlCommands(BaseModel):
422
  steer: float
423
  throttle: float
424
  brake: bool
425
 
426
+ class SceneAnalysis(BaseModel):
427
+ is_junction: float
428
+ traffic_light_state: float
429
+ stop_sign: float
430
 
431
+ class RunStepResponse(BaseModel):
 
432
  control_commands: ControlCommands
433
+ scene_analysis: SceneAnalysis
434
+ predicted_waypoints: List[Tuple[float, float]]
435
+ dashboard_b64: str = Field(..., description="Base64 encoded string of the comprehensive dashboard view.")
436
+ reason: str = Field(..., description="The reason for the current control action (e.g., 'Following ID 12', 'Red Light').")
437
 
438
+ # ==============================================================================
439
+ # 4. دوال مساعدة (Helpers)
440
+ # ==============================================================================
441
+ def b64_to_cv2(b64_string: str) -> np.ndarray:
442
+ try:
443
+ img_bytes = base64.b64decode(b64_string)
444
+ img_array = np.frombuffer(img_bytes, dtype=np.uint8)
445
+ return cv2.imdecode(img_array, cv2.IMREAD_COLOR)
446
+ except Exception:
447
+ raise HTTPException(status_code=400, detail="Invalid Base64 image string.")
448
+
449
+ def cv2_to_b64(img: np.ndarray) -> str:
450
+ _, buffer = cv2.imencode('.jpg', img)
451
+ return base64.b64encode(buffer).decode('utf-8')
452
 
453
+ def prepare_model_input(image: np.ndarray, measurements: Measurements) -> Dict[str, torch.Tensor]:
454
+ """
455
+ إعداد دفعة (batch of 1) لتمريرها إلى النموذج.
456
+ """
457
+ transform = transforms.Compose([
458
  transforms.ToTensor(),
459
  transforms.Resize((224, 224), antialias=True),
460
  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
461
  ])
462
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
463
+ image_tensor = transform(image_rgb).unsqueeze(0).to(DEVICE)
464
 
465
+ measurements_tensor = torch.tensor([[
466
+ measurements.pos_global[0], measurements.pos_global[1], measurements.theta,
467
+ 0.0, 0.0, 0.0, # Steer, throttle, brake (not used by model)
468
+ measurements.speed, 4.0 # Command (default to FollowLane)
469
+ ]], dtype=torch.float32).to(DEVICE)
 
 
 
 
 
 
470
 
471
+ target_point_tensor = torch.tensor([measurements.target_point], dtype=torch.float32).to(DEVICE)
472
 
473
+ return {
 
474
  'rgb': image_tensor,
475
+ 'rgb_left': image_tensor.clone(), 'rgb_right': image_tensor.clone(), 'rgb_center': image_tensor.clone(),
476
+ 'measurements': measurements_tensor,
477
+ 'target_point': target_point_tensor,
478
+ 'lidar': torch.zeros_like(image_tensor)
479
  }
480
 
481
+ # ==============================================================================
482
+ # 5. أحداث دورة حياة التطبيق (Startup/Shutdown)
483
+ # ==============================================================================
484
+ @app.on_event("startup")
485
+ async def startup_event():
486
+ global MODEL
487
+ logging.info("🚗 Server starting up...")
488
+ logging.info(f"Using device: {DEVICE}")
489
+ MODEL = load_and_prepare_model(DEVICE)
490
+ if MODEL:
491
+ logging.info("✅ Model loaded successfully. Server is ready!")
492
+ else:
493
+ logging.error("❌ CRITICAL: Model could not be loaded. The API will not function correctly.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494
 
495
+ # ==============================================================================
496
+ # 6. نقاط النهاية الرئيسية (API Endpoints)
497
+ # ==============================================================================
498
+ @app.get("/", response_class=HTMLResponse, include_in_schema=False)
499
  async def root():
500
+ # هذا يعرض صفحة رئيسية بسيطة وجميلة للمستخدمين
501
+ return """
502
+ <html>
503
+ <head><title>Baseer API</title></head>
504
+ <body style='font-family: sans-serif; text-align: center; padding-top: 50px;'>
505
+ <h1>🚗 Baseer Self-Driving API</h1>
506
+ <p>Welcome! The API is running.</p>
507
+ <p>Navigate to <a href="/docs">/docs</a> for the interactive API documentation.</p>
508
+ </body>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509
  </html>
510
  """
 
511
 
512
+ @app.post("/start_session", summary="Start a new driving session", tags=["Session Management"])
513
+ def start_session():
 
 
 
514
  session_id = str(uuid.uuid4())
515
+ config = create_model_config()
516
+ controller_params = config.get('controller_params', {})
517
+ controller_params.update({'frequency': 10.0}) # Set default frequency
518
 
 
519
  SESSIONS[session_id] = {
520
+ 'tracker': Tracker(grid_conf=config['grid_conf']),
521
+ 'controller': InterfuserController({'controller_params': controller_params, 'grid_conf': config['grid_conf']}),
522
+ 'frame_num': 0
 
 
523
  }
524
+ logging.info(f"New session started: {session_id}")
525
+ return {"session_id": session_id}
 
 
 
 
 
526
 
527
+ @app.post("/run_step", response_model=RunStepResponse, summary="Process a single simulation step", tags=["Core"])
528
+ @torch.no_grad()
529
+ def run_step(request: RunStepRequest):
530
+ if MODEL is None:
531
+ raise HTTPException(status_code=503, detail="Model is not available.")
 
 
 
 
 
 
 
 
 
 
532
 
533
+ session = SESSIONS.get(request.session_id)
534
+ if not session:
535
+ raise HTTPException(status_code=404, detail="Session ID not found.")
536
+
537
+ # --- 1. الإدراك (Perception) ---
538
+ image = b64_to_cv2(request.image_b64)
539
+ model_input = prepare_model_input(image, request.measurements)
540
+ traffic, waypoints, junc, light, stop, _ = MODEL(model_input)
541
+
542
+ # --- 2. معالجة مخرجات النموذج ---
543
+ traffic_processed = torch.cat([torch.sigmoid(traffic[0][:, 0:1]), traffic[0][:, 1:]], dim=1)
544
+ traffic_np = traffic_processed.cpu().numpy().reshape(20, 20, -1)
545
+ waypoints_np = waypoints[0].cpu().numpy()
546
+ junction_prob = torch.softmax(junc, dim=1)[0, 1].item()
547
+ light_prob = torch.softmax(light, dim=1)[0, 1].item()
548
+ stop_prob = torch.softmax(stop, dim=1)[0, 1].item()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549
 
550
+ # --- 3. التتبع والتحكم ---
551
+ ego_pos = np.array(request.measurements.pos_global)
552
+ ego_theta = request.measurements.theta
553
+ frame_num = session['frame_num']
554
 
555
+ active_tracks = session['tracker'].process_frame(traffic_np, ego_pos, ego_theta, frame_num)
556
+ steer, throttle, brake, ctrl_info = session['controller'].run_step(
557
+ speed=request.measurements.speed, waypoints=torch.from_numpy(waypoints_np),
558
+ junction=junction_prob, traffic_light=light_prob, stop_sign=stop_prob,
559
+ bev_map=traffic_np, ego_pos=ego_pos, ego_theta=ego_theta, frame_num=frame_num
560
  )
561
 
562
+ # --- 4. إنشاء الواجهة المرئية ---
563
+ display_iface = DisplayInterface(DisplayConfig(width=1280, height=720))
564
+ bev_maps = render_bev(active_tracks, waypoints_np, ego_pos, ego_theta)
565
+ display_data = {
566
+ 'camera_view': image, 'map_t0': bev_maps['t0'], 'map_t1': bev_maps['t1'], 'map_t2': bev_maps['t2'],
567
+ 'frame_num': frame_num, 'speed': request.measurements.speed * 3.6,
568
+ 'target_speed': ctrl_info.get('target_speed', 0) * 3.6,
569
+ 'steer': steer, 'throttle': throttle, 'brake': brake,
570
+ 'light_prob': light_prob, 'stop_prob': stop_prob,
571
+ 'object_counts': {'car': len(active_tracks)}
 
 
 
 
 
 
 
 
 
 
 
572
  }
573
+ dashboard = display_iface.run_interface(display_data)
574
 
575
+ # --- 5. تحديث الجلسة وإرجاع الرد ---
576
+ session['frame_num'] += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
577
 
578
+ return RunStepResponse(
579
+ control_commands=ControlCommands(steer=steer, throttle=throttle, brake=brake),
580
+ scene_analysis=SceneAnalysis(is_junction=junction_prob, traffic_light_state=light_prob, stop_sign=stop_prob),
581
+ predicted_waypoints=[tuple(wp) for wp in waypoints_np.tolist()],
582
+ dashboard_b64=cv2_to_b64(dashboard),
583
+ reason=ctrl_info.get('brake_reason', "Cruising")
584
+ )
 
 
 
 
 
 
 
 
 
585
 
586
+ @app.post("/end_session", summary="End and clean up a session", tags=["Session Management"])
587
+ def end_session(session_id: str):
588
+ if session_id in SESSIONS:
589
+ del SESSIONS[session_id]
590
+ logging.info(f"Session ended: {session_id}")
591
+ return {"message": f"Session {session_id} ended."}
592
+ raise HTTPException(status_code=404, detail="Session not found.")
593
  # ================== تشغيل الخادم ==================
594
+ # if __name__ == "__main__":
595
+ # import uvicorn
596
+ # uvicorn.run(app, host="0.0.0.0", port=7860)