Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import cv2 | |
| import PIL | |
| from ultralytics import YOLO | |
| import tempfile | |
| import time | |
| # ---------------------------------------------------------------- | |
| # Load the model (using a URL to your weight file) | |
| model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/blob/main/best.pt' | |
| try: | |
| model = YOLO(model_path) | |
| except Exception as ex: | |
| st.error(f"Unable to load model. Check the specified path: {model_path}") | |
| st.error(ex) | |
| # ---------------------------------------------------------------- | |
| # Set page configuration | |
| st.set_page_config( | |
| page_title="WildfireWatch", | |
| page_icon="🔥", | |
| layout="wide", | |
| initial_sidebar_state="expanded" | |
| ) | |
| # ---------------------------------------------------------------- | |
| # App Title and Description | |
| st.title("WildfireWatch: Detecting Wildfire using AI") | |
| st.markdown( | |
| """ | |
| **Wildfires are a critical threat to ecosystems and communities.** | |
| Early detection can save lives and reduce environmental damage. | |
| Use this app to analyze images, videos, or live webcam streams for signs of fire. | |
| """ | |
| ) | |
| # ---------------------------------------------------------------- | |
| # Create two tabs: one for file uploads and one for live webcam stream detection | |
| tab_upload, tab_live = st.tabs(["Upload Image/Video", "Live Webcam Stream"]) | |
| # ========================= | |
| # Tab 1: File Upload for Image/Video Detection | |
| with tab_upload: | |
| st.header("Upload an Image or Video") | |
| uploaded_file = st.file_uploader( | |
| "Choose an image or video...", | |
| type=["jpg", "jpeg", "png", "bmp", "webp", "mp4"] | |
| ) | |
| confidence = st.slider("Select Model Confidence", 25, 100, 40) / 100 | |
| if uploaded_file is not None: | |
| if uploaded_file.type.split('/')[0] == 'image': | |
| # Process uploaded image | |
| image = PIL.Image.open(uploaded_file) | |
| st.image(image, caption="Uploaded Image", use_column_width=True) | |
| if st.button("Detect Wildfire in Image"): | |
| results = model.predict(image, conf=confidence) | |
| annotated_image = results[0].plot()[:, :, ::-1] | |
| st.image(annotated_image, caption="Detection Result", use_column_width=True) | |
| with st.expander("Detection Details"): | |
| for box in results[0].boxes: | |
| st.write("Box coordinates (xywh):", box.xywh) | |
| elif uploaded_file.type.split('/')[0] == 'video': | |
| # Process uploaded video | |
| tfile = tempfile.NamedTemporaryFile(delete=False) | |
| tfile.write(uploaded_file.read()) | |
| cap = cv2.VideoCapture(tfile.name) | |
| if st.button("Detect Wildfire in Video"): | |
| frame_placeholder = st.empty() | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| results = model.predict(frame, conf=confidence) | |
| annotated_frame = results[0].plot()[:, :, ::-1] | |
| frame_placeholder.image(annotated_frame, channels="BGR", use_column_width=True) | |
| time.sleep(0.05) # Adjust delay for processing speed | |
| cap.release() | |
| # ========================= | |
| # Tab 2: Live Webcam Stream Detection | |
| with tab_live: | |
| st.header("Live Webcam Stream Detection") | |
| st.markdown("Enter the URL for your online hosted webcam stream (e.g., an IP camera stream).") | |
| webcam_url = st.text_input("Webcam Stream URL", value="http://<your_webcam_stream_url>") | |
| live_confidence = st.slider("Select Live Detection Confidence", 25, 100, 40) / 100 | |
| # Initialize a session state flag for stopping the live loop | |
| if "stop_live" not in st.session_state: | |
| st.session_state.stop_live = False | |
| def stop_live_detection(): | |
| st.session_state.stop_live = True | |
| if st.button("Start Live Detection"): | |
| cap = cv2.VideoCapture(webcam_url) | |
| if not cap.isOpened(): | |
| st.error("Unable to open webcam stream. Please check the URL.") | |
| else: | |
| live_frame_placeholder = st.empty() | |
| st.button("Stop Live Detection", on_click=stop_live_detection) | |
| while cap.isOpened() and not st.session_state.stop_live: | |
| ret, frame = cap.read() | |
| if not ret: | |
| st.error("Failed to retrieve frame from stream.") | |
| break | |
| results = model.predict(frame, conf=live_confidence) | |
| annotated_frame = results[0].plot()[:, :, ::-1] | |
| live_frame_placeholder.image(annotated_frame, channels="BGR", use_column_width=True) | |
| time.sleep(0.05) | |
| cap.release() | |
| st.session_state.stop_live = False | |