import streamlit as st import cv2 import supervision as sv from ultralytics import YOLO import numpy as np from PIL import Image import io import torch # Set page config st.set_page_config(page_title="Building Detection App", page_icon="🏢", layout="wide") # Custom CSS with theme compatibility st.markdown(""" """, unsafe_allow_html=True) # Load the YOLO model @st.cache_resource def load_model(): model = YOLO("mosaic_medium_100_tiny_object.pt") # Update this to the filename of your model model.to('cpu') # Ensure the model is on CPU return model model = load_model() def process_image(image): # Convert PIL Image to numpy array image_np = np.array(image) # Convert RGB to BGR (OpenCV uses BGR) image_cv2 = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR) def callback(image_slice: np.ndarray) -> sv.Detections: result = model(image_slice)[0] return sv.Detections.from_ultralytics(result) slicer = sv.InferenceSlicer(callback=callback, slice_wh=(256, 256), iou_threshold=0.8) detections = slicer(image_cv2) # Filter detections for building class (assuming class_id 2 is for buildings) building_detections = detections[detections.class_id == 2] label_annotator = sv.LabelAnnotator() box_annotator = sv.BoxAnnotator() annotated_image = box_annotator.annotate(scene=image_cv2.copy(), detections=building_detections) annotated_image = label_annotator.annotate(scene=annotated_image, detections=building_detections) # Convert BGR back to RGB for displaying in Streamlit return cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) def main(): st.title("Building Detection App") st.markdown('
Upload an image to detect buildings using our advanced AI model.
', unsafe_allow_html=True) col1, col2 = st.columns(2) with col1: st.markdown('Supported formats: JPG, JPEG, PNG
1. Upload an image using the file uploader on the left.
2. Click the Detect Buildings button to process the image.
3. View the results with bounding boxes around detected buildings.
Our AI model is trained to identify various types of buildings in different environments.