from fastapi import APIRouter from datetime import datetime from datasets import load_dataset import numpy as np from sklearn.metrics import accuracy_score import random import os import os import torch import numpy as np from PIL import Image from transformers import MobileViTImageProcessor, MobileViTForSemanticSegmentation import cv2 from tqdm import tqdm from torch.utils.data import DataLoader from dotenv import load_dotenv load_dotenv() router = APIRouter() DESCRIPTION = "Mobile-ViT Smoke Detection" ROUTE = "/image" model_path = "mobilevit_segmentation_full_data.pth" feature_extractor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model.load_state_dict(torch.load(model_path)) model.eval() def preprocess(image): image = image.resize((512,512)) # Convert to BGR image = np.array(image)[:, :, ::-1] # Convert RGB to BGR image = Image.fromarray(image) image = image.resize(self.image_size) # Normalize pixel values to [0, 1] image = np.array(image, dtype=np.float32) / 255.0 return image def get_bounding_boxes_from_mask(mask): """Extract bounding boxes from a binary mask.""" pred_boxes = [] contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for contour in contours: if len(contour) > 5: # Ignore small/noisy contours x, y, w, h = cv2.boundingRect(contour) pred_boxes.append((x, y, x + w, y + h)) return pred_boxes def parse_boxes(annotation_string): """Parse multiple boxes from a single annotation string. Each box has 5 values: class_id, x_center, y_center, width, height""" values = [float(x) for x in annotation_string.strip().split()] boxes = [] # Each box has 5 values for i in range(0, len(values), 5): if i + 5 <= len(values): # Skip class_id (first value) and take the next 4 values box = values[i+1:i+5] boxes.append(box) return boxes def compute_iou(box1, box2): """Compute Intersection over Union (IoU) between two YOLO format boxes.""" # Convert YOLO format (x_center, y_center, width, height) to corners def yolo_to_corners(box): x_center, y_center, width, height = box x1 = x_center - width/2 y1 = y_center - height/2 x2 = x_center + width/2 y2 = y_center + height/2 return np.array([x1, y1, x2, y2]) box1_corners = yolo_to_corners(box1) box2_corners = yolo_to_corners(box2) # Calculate intersection x1 = max(box1_corners[0], box2_corners[0]) y1 = max(box1_corners[1], box2_corners[1]) x2 = min(box1_corners[2], box2_corners[2]) y2 = min(box1_corners[3], box2_corners[3]) intersection = max(0, x2 - x1) * max(0, y2 - y1) # Calculate union box1_area = (box1_corners[2] - box1_corners[0]) * (box1_corners[3] - box1_corners[1]) box2_area = (box2_corners[2] - box2_corners[0]) * (box2_corners[3] - box2_corners[1]) union = box1_area + box2_area - intersection return intersection / (union + 1e-6) def compute_max_iou(true_boxes, pred_box): """Compute maximum IoU between a predicted box and all true boxes""" max_iou = 0 for true_box in true_boxes: iou = compute_iou(true_box, pred_box) max_iou = max(max_iou, iou) return max_iou @router.post(ROUTE, tags=["Image Task"], description=DESCRIPTION) async def evaluate_image(request: ImageEvaluationRequest): """ Evaluate image classification and object detection for forest fire smoke. Current Model: Random Baseline - Makes random predictions for both classification and bounding boxes - Used as a baseline for comparison Metrics: - Classification accuracy: Whether an image contains smoke or not - Object Detection accuracy: IoU (Intersection over Union) for smoke bounding boxes """ # Get space info username, space_url = get_space_info() # Load and prepare the dataset dataset = load_dataset(request.dataset_name, token=os.getenv("HF_TOKEN")) # Split dataset train_test = dataset["train"].train_test_split(test_size=request.test_size, seed=request.test_seed) test_dataset = dataset["val"]#train_test["test"] # Start tracking emissions tracker.start() tracker.start_task("inference") #-------------------------------------------------------------------------------------------- # YOUR MODEL INFERENCE CODE HERE # Update the code below to replace the random baseline with your model inference #-------------------------------------------------------------------------------------------- predictions = [] true_labels = [] pred_boxes = [] true_boxes_list = [] for example in test_dataset: # Extract image and annotations image = example["image"] original_shape = (len(image), len(image[0])) image = preprocess(image) annotation = example.get("annotations", "").strip() has_smoke = len(annotation) > 0 true_labels.append(1 if has_smoke else 0) if has_smoke: image_true_boxes = parse_boxes(annotation) if image_true_boxes: true_boxes_list.append(image_true_boxes) else: true_boxes_list.append([]) else: true_boxes_list.append([]) # Model Inference image_input = feature_extractor(images=image, return_tensors="pt").pixel_values with torch.no_grad(): outputs = model(pixel_values=image_input) logits = outputs.logits probabilities = torch.sigmoid(logits) predicted_mask = (probabilities[0, 1] > 0.30).cpu().numpy().astype(np.uint8) # predicted_mask_resized = cv2.resize(predicted_mask, (512, 512), interpolation=cv2.INTER_NEAREST) predicted_mask_resized = cv2.resize(predicted_mask, original_shape, interpolation=cv2.INTER_NEAREST) # Extract predicted bounding boxes predicted_boxes = get_bounding_boxes_from_mask(predicted_mask_resized) pred_boxes.append(predicted_boxes) # Binary prediction for smoke detection print(1 if len(predicted_boxes) > 0 else 0) predictions.append(1 if len(predicted_boxes) > 0 else 0) # Filter only valid box pairs filtered_true_boxes_list = [] filtered_pred_boxes = [] for true_boxes, pred_boxes_entry in zip(true_boxes_list, pred_boxes): if true_boxes and pred_boxes_entry: filtered_true_boxes_list.append(true_boxes) filtered_pred_boxes.append(pred_boxes_entry) true_boxes_list = filtered_true_boxes_list pred_boxes = filtered_pred_boxes #-------------------------------------------------------------------------------------------- # YOUR MODEL INFERENCE STOPS HERE #-------------------------------------------------------------------------------------------- # Stop tracking emissions emissions_data = tracker.stop_task() # Calculate classification accuracy classification_accuracy = accuracy_score(true_labels, predictions) # Calculate mean IoU for object detection (only for images with smoke) # For each image, we compute the max IoU between the predicted box and all true boxes ious = [] for true_boxes, pred_box in zip(true_boxes_list, pred_boxes): max_iou = compute_max_iou(true_boxes, pred_box) ious.append(max_iou) mean_iou = float(np.mean(ious)) if ious else 0.0 # Prepare results dictionary results = { "username": username, "space_url": space_url, "submission_timestamp": datetime.now().isoformat(), "model_description": DESCRIPTION, "classification_accuracy": float(classification_accuracy), "mean_iou": mean_iou, "energy_consumed_wh": emissions_data.energy_consumed * 1000, "emissions_gco2eq": emissions_data.emissions * 1000, "emissions_data": clean_emissions_data(emissions_data), "api_route": ROUTE, "dataset_config": { "dataset_name": request.dataset_name, "test_size": request.test_size, "test_seed": request.test_seed } } return results