بصيـر API
مستقبل القيادة الذاتية بين يديك
# app.py - InterFuser Self-Driving API Server import uuid import base64 import cv2 import torch import numpy as np from fastapi import FastAPI, HTTPException from fastapi.responses import HTMLResponse from pydantic import BaseModel from torchvision import transforms from typing import List, Dict, Any, Optional import logging import uuid import base64 import cv2 import torch import numpy as np import logging from fastapi import FastAPI, HTTPException from fastapi.responses import HTMLResponse from pydantic import BaseModel, Field from typing import List, Dict, Tuple from model_definition import InterfuserHDPE , load_and_prepare_model, get_master_config from simulation_modules import InterfuserController, Tracker from simulation_modules import DisplayInterface, render_bev, unnormalize_image, DisplayConfig # ============================================================================== # 2. إعدادات عامة وتطبيق FastAPI # ============================================================================== # إعداد التسجيل (Logging) logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # تهيئة تطبيق FastAPI app = FastAPI( title="Baseer Self-Driving API", description="An advanced API for the InterFuser self-driving model, providing real-time control commands and scene analysis.", version="1.1.0" ) # متغيرات عامة سيتم تهيئتها عند بدء التشغيل MODEL: InterfuserHDPE = None DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") SESSIONS: Dict[str, Dict] = {} # قاموس لتخزين حالة الجلسات النشطة # ============================================================================== # 3. تعريف نماذج البيانات (Pydantic Models) للـ API # ============================================================================== class Measurements(BaseModel): pos_global: Tuple[float, float] = Field(..., example=(0.0, 0.0), description="Global [X, Y] position of the vehicle.") theta: float = Field(..., example=0.0, description="Global orientation angle of the vehicle in radians.") speed: float = Field(..., example=0.0, description="Current speed in m/s.") target_point: Tuple[float, float] = Field(..., example=(10.0, 0.0), description="Target point relative to the vehicle.") class RunStepRequest(BaseModel): session_id: str image_b64: str = Field(..., description="Base64 encoded string of the vehicle's front camera view (BGR format).") measurements: Measurements class ControlCommands(BaseModel): steer: float throttle: float brake: bool class SceneAnalysis(BaseModel): is_junction: float traffic_light_state: float stop_sign: float class RunStepResponse(BaseModel): control_commands: ControlCommands scene_analysis: SceneAnalysis predicted_waypoints: List[Tuple[float, float]] dashboard_b64: str = Field(..., description="Base64 encoded string of the comprehensive dashboard view.") reason: str = Field(..., description="The reason for the current control action (e.g., 'Following ID 12', 'Red Light').") # ============================================================================== # 4. دوال مساعدة (Helpers) # ============================================================================== def b64_to_cv2(b64_string: str) -> np.ndarray: try: img_bytes = base64.b64decode(b64_string) img_array = np.frombuffer(img_bytes, dtype=np.uint8) return cv2.imdecode(img_array, cv2.IMREAD_COLOR) except Exception: raise HTTPException(status_code=400, detail="Invalid Base64 image string.") def cv2_to_b64(img: np.ndarray) -> str: _, buffer = cv2.imencode('.jpg', img) return base64.b64encode(buffer).decode('utf-8') def prepare_model_input(image: np.ndarray, measurements: Measurements) -> Dict[str, torch.Tensor]: """ إعداد دفعة (batch of 1) لتمريرها إلى النموذج. """ transform = transforms.Compose([ transforms.ToTensor(), transforms.Resize((224, 224), antialias=True), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image_tensor = transform(image_rgb).unsqueeze(0).to(DEVICE) measurements_tensor = torch.tensor([[ measurements.pos_global[0], measurements.pos_global[1], measurements.theta, 0.0, 0.0, 0.0, # Steer, throttle, brake (not used by model) measurements.speed, 4.0 # Command (default to FollowLane) ]], dtype=torch.float32).to(DEVICE) target_point_tensor = torch.tensor([measurements.target_point], dtype=torch.float32).to(DEVICE) return { 'rgb': image_tensor, 'rgb_left': image_tensor.clone(), 'rgb_right': image_tensor.clone(), 'rgb_center': image_tensor.clone(), 'measurements': measurements_tensor, 'target_point': target_point_tensor, 'lidar': torch.zeros_like(image_tensor) } # ============================================================================== # 5. أحداث دورة حياة التطبيق (Startup/Shutdown) # ============================================================================== @app.on_event("startup") async def startup_event(): global MODEL logging.info("🚗 Server starting up...") logging.info(f"Using device: {DEVICE}") MODEL = load_and_prepare_model(DEVICE) if MODEL: logging.info("✅ Model loaded successfully. Server is ready!") else: logging.error("❌ CRITICAL: Model could not be loaded. The API will not function correctly.") # ============================================================================== # 6. نقاط النهاية الرئيسية (API Endpoints) # ============================================================================== @app.get("/", response_class=HTMLResponse, include_in_schema=False, tags=["General"]) async def root(): """ [النسخة النهائية مع التمرير] يعرض صفحة رئيسية احترافية وجذابة بصريًا مع تمكين التمرير العمودي. """ active_sessions_count = len(SESSIONS) status_color = "#00ff7f" # SpringGreen status_text = "متصل ويعمل" if MODEL is None: status_color = "#ff4757" # Red status_text = "خطأ: النموذج غير متاح" html_content = f"""
مستقبل القيادة الذاتية بين يديك