File size: 11,244 Bytes
b051880
 
d79abfc
f98a043
0707d05
b051880
f0f9dff
f6a8624
f0f9dff
d79abfc
f6a8624
d79abfc
f6a8624
 
d79abfc
 
f6a8624
d79abfc
 
f6a8624
 
 
d79abfc
 
 
 
 
f6a8624
d79abfc
 
 
 
 
 
 
f8ceae5
d79abfc
f6a8624
d79abfc
 
8a3e216
d79abfc
8a3e216
 
 
36fbec5
9d79b23
f6a8624
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d79abfc
f6a8624
d79abfc
8a3e216
d79abfc
f6a8624
d79abfc
 
 
 
 
 
f6a8624
d79abfc
086ae8e
cb79d6c
 
 
 
086ae8e
 
cac62cc
d79abfc
f6a8624
d79abfc
 
f0f9dff
d79abfc
 
 
8a3e216
 
 
d79abfc
8a3e216
cac62cc
d79abfc
f6a8624
d79abfc
 
f6a8624
d79abfc
 
f6a8624
d79abfc
f6a8624
 
 
d79abfc
f6a8624
 
 
 
 
 
 
d79abfc
f6a8624
d79abfc
 
 
f6a8624
d79abfc
 
 
 
f6a8624
d79abfc
f6a8624
 
d79abfc
 
 
 
f6a8624
d79abfc
 
f6a8624
d79abfc
 
f6a8624
d79abfc
 
 
 
 
 
f6a8624
d79abfc
 
 
f6a8624
 
 
 
 
 
 
 
d79abfc
f6a8624
 
 
 
 
cb79d6c
d79abfc
f6a8624
d79abfc
f6a8624
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d79abfc
 
f6a8624
d79abfc
af04f31
 
d79abfc
af04f31
d79abfc
af04f31
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
import os
import tempfile
import base64
import cv2
import streamlit as st
import PIL
from ultralytics import YOLO
import requests

###############################################################################
# Helper function to embed an HTML5 video that autoplays (muted) with controls.
###############################################################################
def show_autoplay_video(video_bytes: bytes, title: str = "Video"):
    if not video_bytes:
        st.warning(f"No {title} video available.")
        return
    video_base64 = base64.b64encode(video_bytes).decode()
    video_html = f"""
    <h4>{title}</h4>
    <video width="100%" controls autoplay muted>
      <source src="data:video/mp4;base64,{video_base64}" type="video/mp4">
      Your browser does not support the video tag.
    </video>
    """
    st.markdown(video_html, unsafe_allow_html=True)

###############################################################################
# Session state initialization (for uploaded processing results)
###############################################################################
if "processed_frames" not in st.session_state:
    st.session_state["processed_frames"] = []
if "shortened_video_data" not in st.session_state:
    st.session_state["shortened_video_data"] = None
if "shortened_video_ready" not in st.session_state:
    st.session_state["shortened_video_ready"] = False

###############################################################################
# Configure YOLO model path and page layout
###############################################################################
model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/best.pt'
st.set_page_config(
    page_title="Fire Detection: Original vs. Processed Video",
    page_icon="🔥",
    layout="wide",
    initial_sidebar_state="expanded"
)

st.title("Fire Watch: Detecting fire using AI vision models")
col1, col2 = st.columns(2)
with col1:
    st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_1.jpeg", use_column_width=True)
with col2:
    st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_3.png", use_column_width=True)

st.markdown("""
Fires in Colorado present a serious challenge, threatening urban communities, highways, and even remote areas. 
Early detection is critical. Fire Watch uses the model YOLOv8 for real-time fire and smoke detection 
in images and videos.
""")
st.markdown("---")
st.header("Fire Detection:")


###############################################################################
# SIDEBAR: Video input options, confidence, sampling options, and example selection
###############################################################################
with st.sidebar:
    st.header("Video Input Options")
    # Option to select an example pair; "None" means use an uploaded file.
    example_option = st.selectbox(
        "Select Example Pair (optional)",
        ["None", "T Example", "LA Example"]
    )
    source_file = st.file_uploader(
        "Or upload your own file...",
        type=("mp4", "jpg", "jpeg", "png", "bmp", "webp")
    )
    confidence = float(st.slider("Select Model Confidence", 25, 100, 40)) / 100
    video_option = st.selectbox(
        "Select Video Shortening Option",
        ["Original FPS", "1 fps", "1 frame per 5 seconds", "1 frame per 10 seconds", "1 frame per 15 seconds"]
    )
    progress_text = st.empty()
    progress_bar = st.progress(0)

###############################################################################
# MAIN TITLE
###############################################################################
st.title("Fire Detection: Original vs. Processed Video")

###############################################################################
# Load YOLO model
###############################################################################
try:
    model = YOLO(model_path)
except Exception as ex:
    st.error(f"Unable to load model. Check model path: {model_path}")
    st.error(ex)

###############################################################################
# Determine source video(s): Example pair or uploaded file.
###############################################################################
original_video_data = None
processed_video_data = None  # For example pairs

if example_option != "None":
    # Use example videos from remote URLs.
    if example_option == "T Example":
        # For T Example: set your URLs for original and processed videos.
        orig_url = "https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/T1.mp4"
        proc_url = "https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/T2.mpg"
    elif example_option == "LA Example":
        orig_url = "https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/LA1.mp4"
        proc_url = "https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/LA2.mp4"
    try:
        original_video_data = requests.get(orig_url).content
        processed_video_data = requests.get(proc_url).content
    except Exception as ex:
        st.error("Error loading example videos. Check your URLs.")
else:
    # No example selected. If a file is uploaded, use it.
    if source_file:
        file_type = source_file.type.split('/')[0]
        if file_type == 'image':
            # For images, convert to video-like display (or you could run image detection).
            original_image = PIL.Image.open(source_file)
            buf = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
            original_image.save(buf.name, format="PNG")
            with open(buf.name, "rb") as f:
                original_video_data = f.read()
        else:
            # For video uploads, save to a temp file.
            tfile = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
            tfile.write(source_file.read())
            tfile.flush()
            with open(tfile.name, "rb") as vf:
                original_video_data = vf.read()
            # Open with OpenCV for processing.
            vidcap = cv2.VideoCapture(tfile.name)
    else:
        st.info("Please select an example pair or upload a video file.")

###############################################################################
# Layout: Two columns for Original and Processed videos
###############################################################################
col1, col2 = st.columns(2)

with col1:
    st.subheader("Original File")
    if original_video_data:
        show_autoplay_video(original_video_data, title="Original Video")
    else:
        st.info("No original video available.")

with col2:
    st.subheader("Result File")
    if example_option != "None":
        # For example pairs, the processed video is already available.
        if processed_video_data:
            show_autoplay_video(processed_video_data, title="Processed Video")
        else:
            st.info("No processed video available in example.")
    else:
        # For uploaded files, if a processed video is ready, show it.
        if st.session_state["shortened_video_ready"] and st.session_state["shortened_video_data"]:
            show_autoplay_video(st.session_state["shortened_video_data"], title="Processed Video")
        else:
            st.info("Processed video will appear here once detection is run.")

###############################################################################
# DETECTION: Process the uploaded video if no example is selected.
###############################################################################
if example_option == "None" and source_file and source_file.type.split('/')[0] != 'image':
    if st.sidebar.button("Let's Detect Wildfire"):
        # Reset any previous processed results.
        st.session_state["processed_frames"] = []
        st.session_state["shortened_video_data"] = None
        st.session_state["shortened_video_ready"] = False

        processed_frames = st.session_state["processed_frames"]

        frame_count = 0
        orig_fps = vidcap.get(cv2.CAP_PROP_FPS)
        total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
        width  = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        # Determine sampling interval based on option.
        if video_option == "Original FPS":
            sample_interval = 1
            output_fps = orig_fps
        elif video_option == "1 fps":
            sample_interval = int(orig_fps) if orig_fps > 0 else 1
            output_fps = 1
        elif video_option == "1 frame per 5 seconds":
            sample_interval = int(orig_fps * 5) if orig_fps > 0 else 5
            output_fps = 1
        elif video_option == "1 frame per 10 seconds":
            sample_interval = int(orig_fps * 10) if orig_fps > 0 else 10
            output_fps = 1
        elif video_option == "1 frame per 15 seconds":
            sample_interval = int(orig_fps * 15) if orig_fps > 0 else 15
            output_fps = 1
        else:
            sample_interval = 1
            output_fps = orig_fps

        success, image = vidcap.read()
        while success:
            if frame_count % sample_interval == 0:
                res = model.predict(image, conf=confidence)
                res_plotted = res[0].plot()[:, :, ::-1]
                processed_frames.append(res_plotted)
                # Update progress
                if total_frames > 0:
                    progress_pct = int((frame_count / total_frames) * 100)
                    progress_text.text(f"Processing frame {frame_count} / {total_frames} ({progress_pct}%)")
                    progress_bar.progress(min(100, progress_pct))
                else:
                    progress_text.text(f"Processing frame {frame_count}")
            frame_count += 1
            success, image = vidcap.read()

        progress_text.text("Video processing complete!")
        progress_bar.progress(100)

        # Create shortened video from processed frames.
        if processed_frames:
            temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            out = cv2.VideoWriter(temp_video_file.name, fourcc, output_fps, (width, height))
            for frame in processed_frames:
                out.write(frame)
            out.release()

            with open(temp_video_file.name, 'rb') as video_file:
                st.session_state["shortened_video_data"] = video_file.read()
                st.session_state["shortened_video_ready"] = True

            st.success("Processed video created successfully!")
        else:
            st.error("No frames were processed from the video.")

###############################################################################
# Always show the download button if a processed video is ready.
###############################################################################
if st.session_state["shortened_video_ready"] and st.session_state["shortened_video_data"]:
    st.download_button(
        label="Download Processed Video",
        data=st.session_state["shortened_video_data"],
        file_name="processed_video.mp4",
        mime="video/mp4"
    )