Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,10 @@ import base64
|
|
4 |
import cv2
|
5 |
import streamlit as st
|
6 |
import PIL
|
7 |
-
from ultralytics import YOLO
|
8 |
import requests
|
|
|
|
|
|
|
9 |
|
10 |
###############################################################################
|
11 |
# Helper: Embed an HTML5 video that autoplays (muted) with controls.
|
@@ -35,9 +37,21 @@ if "shortened_video_ready" not in st.session_state:
|
|
35 |
st.session_state["shortened_video_ready"] = False
|
36 |
|
37 |
###############################################################################
|
38 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
###############################################################################
|
40 |
-
model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/best.pt'
|
41 |
st.set_page_config(
|
42 |
page_title="Fire Detection: Original vs. Processed Video",
|
43 |
page_icon="🔥",
|
@@ -46,7 +60,7 @@ st.set_page_config(
|
|
46 |
)
|
47 |
|
48 |
###############################################################################
|
49 |
-
# SIDEBAR: Video input options
|
50 |
###############################################################################
|
51 |
with st.sidebar:
|
52 |
st.header("Video Input Options")
|
@@ -65,7 +79,7 @@ with st.sidebar:
|
|
65 |
)
|
66 |
progress_text = st.empty()
|
67 |
progress_bar = st.progress(0)
|
68 |
-
download_placeholder = st.empty() #
|
69 |
|
70 |
###############################################################################
|
71 |
# MAIN TITLE
|
@@ -73,33 +87,27 @@ with st.sidebar:
|
|
73 |
st.title("Fire Detection: Original vs. Processed Video")
|
74 |
|
75 |
###############################################################################
|
76 |
-
# Load
|
77 |
-
###############################################################################
|
78 |
-
try:
|
79 |
-
model = YOLO(model_path)
|
80 |
-
except Exception as ex:
|
81 |
-
st.error(f"Unable to load model. Check model path: {model_path}")
|
82 |
-
st.error(ex)
|
83 |
-
|
84 |
-
###############################################################################
|
85 |
-
# Determine source video(s): Example pair or uploaded file.
|
86 |
###############################################################################
|
87 |
original_video_data = None
|
88 |
-
processed_video_data = None
|
89 |
|
90 |
if example_option != "None":
|
91 |
# Use example videos from remote URLs.
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
103 |
else:
|
104 |
# No example selected. If a file is uploaded, use it.
|
105 |
if source_file:
|
@@ -111,18 +119,12 @@ else:
|
|
111 |
with open(buf.name, "rb") as f:
|
112 |
original_video_data = f.read()
|
113 |
else:
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
with open(tfile.name, "rb") as vf:
|
118 |
-
original_video_data = vf.read()
|
119 |
-
# Open with OpenCV for processing.
|
120 |
-
vidcap = cv2.VideoCapture(tfile.name)
|
121 |
-
else:
|
122 |
-
st.info("Please select an example pair or upload a video file.")
|
123 |
|
124 |
###############################################################################
|
125 |
-
# Layout: Two
|
126 |
###############################################################################
|
127 |
col1, col2 = st.columns(2)
|
128 |
|
@@ -135,7 +137,6 @@ with col1:
|
|
135 |
|
136 |
with col2:
|
137 |
st.subheader("Result File")
|
138 |
-
# Create a dedicated placeholder for the processed video.
|
139 |
viewer_slot = st.empty()
|
140 |
if example_option != "None":
|
141 |
if processed_video_data:
|
@@ -146,73 +147,40 @@ with col2:
|
|
146 |
viewer_slot.info("Processed video will appear here once detection is run.")
|
147 |
|
148 |
###############################################################################
|
149 |
-
#
|
150 |
###############################################################################
|
151 |
if example_option == "None" and source_file and source_file.type.split('/')[0] != 'image':
|
152 |
if st.sidebar.button("Let's Detect Wildfire"):
|
153 |
-
# Reset previous processed results.
|
154 |
st.session_state["processed_frames"] = []
|
155 |
-
st.session_state["shortened_video_data"] = None
|
156 |
-
st.session_state["shortened_video_ready"] = False
|
157 |
-
|
158 |
processed_frames = st.session_state["processed_frames"]
|
159 |
|
|
|
|
|
|
|
|
|
160 |
frame_count = 0
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
sample_interval = int(orig_fps) if orig_fps > 0 else 1
|
172 |
-
output_fps = 1
|
173 |
-
elif video_option == "1 frame per 5 seconds":
|
174 |
-
sample_interval = int(orig_fps * 5) if orig_fps > 0 else 5
|
175 |
-
output_fps = 1
|
176 |
-
elif video_option == "1 frame per 10 seconds":
|
177 |
-
sample_interval = int(orig_fps * 10) if orig_fps > 0 else 10
|
178 |
-
output_fps = 1
|
179 |
-
elif video_option == "1 frame per 15 seconds":
|
180 |
-
sample_interval = int(orig_fps * 15) if orig_fps > 0 else 15
|
181 |
-
output_fps = 1
|
182 |
-
else:
|
183 |
-
sample_interval = 1
|
184 |
-
output_fps = orig_fps
|
185 |
-
|
186 |
-
success, image = vidcap.read()
|
187 |
-
while success:
|
188 |
-
if frame_count % sample_interval == 0:
|
189 |
-
res = model.predict(image, conf=confidence)
|
190 |
-
res_plotted = res[0].plot()[:, :, ::-1]
|
191 |
-
processed_frames.append(res_plotted)
|
192 |
-
# Update progress.
|
193 |
-
if total_frames > 0:
|
194 |
-
progress_pct = int((frame_count / total_frames) * 100)
|
195 |
-
progress_text.text(f"Processing frame {frame_count} / {total_frames} ({progress_pct}%)")
|
196 |
-
progress_bar.progress(min(100, progress_pct))
|
197 |
-
else:
|
198 |
-
progress_text.text(f"Processing frame {frame_count}")
|
199 |
-
# Update the viewer with the most recent processed frame.
|
200 |
-
viewer_slot.image(res_plotted, caption=f"Frame {frame_count}", use_column_width=True)
|
201 |
frame_count += 1
|
202 |
-
success, image = vidcap.read()
|
203 |
|
204 |
progress_text.text("Video processing complete!")
|
205 |
progress_bar.progress(100)
|
206 |
|
207 |
-
# Create shortened video from processed frames.
|
208 |
if processed_frames:
|
209 |
temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
for frame in processed_frames:
|
214 |
-
|
215 |
-
out.write(frame_out)
|
216 |
out.release()
|
217 |
|
218 |
with open(temp_video_file.name, 'rb') as video_file:
|
@@ -220,16 +188,12 @@ if example_option == "None" and source_file and source_file.type.split('/')[0] !
|
|
220 |
st.session_state["shortened_video_ready"] = True
|
221 |
|
222 |
st.success("Processed video created successfully!")
|
223 |
-
# Update the viewer with the final processed video.
|
224 |
-
viewer_slot.empty()
|
225 |
show_autoplay_video(st.session_state["shortened_video_data"], title="Processed Video")
|
226 |
-
else:
|
227 |
-
st.error("No frames were processed from the video.")
|
228 |
|
229 |
###############################################################################
|
230 |
-
#
|
231 |
###############################################################################
|
232 |
-
if st.session_state["shortened_video_ready"]
|
233 |
download_placeholder.download_button(
|
234 |
label="Download Processed Video",
|
235 |
data=st.session_state["shortened_video_data"],
|
|
|
4 |
import cv2
|
5 |
import streamlit as st
|
6 |
import PIL
|
|
|
7 |
import requests
|
8 |
+
import imageio
|
9 |
+
from ultralytics import YOLO
|
10 |
+
from huggingface_hub import hf_hub_download
|
11 |
|
12 |
###############################################################################
|
13 |
# Helper: Embed an HTML5 video that autoplays (muted) with controls.
|
|
|
37 |
st.session_state["shortened_video_ready"] = False
|
38 |
|
39 |
###############################################################################
|
40 |
+
# Download YOLO Model from Hugging Face
|
41 |
+
###############################################################################
|
42 |
+
repo_id = "tstone87/ccr-colorado"
|
43 |
+
model_filename = "best.pt"
|
44 |
+
|
45 |
+
try:
|
46 |
+
local_model_path = hf_hub_download(repo_id=repo_id, filename=model_filename)
|
47 |
+
model = YOLO(local_model_path)
|
48 |
+
except Exception as ex:
|
49 |
+
st.error(f"Unable to load model. Check model path: {model_filename}")
|
50 |
+
st.error(ex)
|
51 |
+
|
52 |
+
###############################################################################
|
53 |
+
# Configure Streamlit Page Layout
|
54 |
###############################################################################
|
|
|
55 |
st.set_page_config(
|
56 |
page_title="Fire Detection: Original vs. Processed Video",
|
57 |
page_icon="🔥",
|
|
|
60 |
)
|
61 |
|
62 |
###############################################################################
|
63 |
+
# SIDEBAR: Video input options
|
64 |
###############################################################################
|
65 |
with st.sidebar:
|
66 |
st.header("Video Input Options")
|
|
|
79 |
)
|
80 |
progress_text = st.empty()
|
81 |
progress_bar = st.progress(0)
|
82 |
+
download_placeholder = st.empty() # Placeholder for download button
|
83 |
|
84 |
###############################################################################
|
85 |
# MAIN TITLE
|
|
|
87 |
st.title("Fire Detection: Original vs. Processed Video")
|
88 |
|
89 |
###############################################################################
|
90 |
+
# Load Example Video Data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
###############################################################################
|
92 |
original_video_data = None
|
93 |
+
processed_video_data = None
|
94 |
|
95 |
if example_option != "None":
|
96 |
# Use example videos from remote URLs.
|
97 |
+
example_videos = {
|
98 |
+
"T Example": ("T1.mp4", "T2.mpg"),
|
99 |
+
"LA Example": ("LA1.mp4", "LA2.mp4")
|
100 |
+
}
|
101 |
+
orig_filename, proc_filename = example_videos.get(example_option, (None, None))
|
102 |
+
|
103 |
+
if orig_filename and proc_filename:
|
104 |
+
try:
|
105 |
+
orig_url = f"https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/{orig_filename}"
|
106 |
+
proc_url = f"https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/{proc_filename}"
|
107 |
+
original_video_data = requests.get(orig_url).content
|
108 |
+
processed_video_data = requests.get(proc_url).content
|
109 |
+
except Exception as ex:
|
110 |
+
st.error("Error loading example videos. Check your URLs.")
|
111 |
else:
|
112 |
# No example selected. If a file is uploaded, use it.
|
113 |
if source_file:
|
|
|
119 |
with open(buf.name, "rb") as f:
|
120 |
original_video_data = f.read()
|
121 |
else:
|
122 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tfile:
|
123 |
+
tfile.write(source_file.read())
|
124 |
+
original_video_data = tfile.name
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
###############################################################################
|
127 |
+
# Layout: Two Columns for Original and Processed Videos
|
128 |
###############################################################################
|
129 |
col1, col2 = st.columns(2)
|
130 |
|
|
|
137 |
|
138 |
with col2:
|
139 |
st.subheader("Result File")
|
|
|
140 |
viewer_slot = st.empty()
|
141 |
if example_option != "None":
|
142 |
if processed_video_data:
|
|
|
147 |
viewer_slot.info("Processed video will appear here once detection is run.")
|
148 |
|
149 |
###############################################################################
|
150 |
+
# Process Video if No Example is Selected
|
151 |
###############################################################################
|
152 |
if example_option == "None" and source_file and source_file.type.split('/')[0] != 'image':
|
153 |
if st.sidebar.button("Let's Detect Wildfire"):
|
|
|
154 |
st.session_state["processed_frames"] = []
|
|
|
|
|
|
|
155 |
processed_frames = st.session_state["processed_frames"]
|
156 |
|
157 |
+
vid_reader = imageio.get_reader(original_video_data)
|
158 |
+
fps = vid_reader.get_meta_data()['fps']
|
159 |
+
width, height = vid_reader.get_meta_data()['size']
|
160 |
+
|
161 |
frame_count = 0
|
162 |
+
total_frames = vid_reader.get_length()
|
163 |
+
|
164 |
+
for frame in vid_reader:
|
165 |
+
res = model.predict(frame, conf=confidence)
|
166 |
+
res_plotted = res[0].plot()[:, :, ::-1]
|
167 |
+
processed_frames.append(res_plotted)
|
168 |
+
|
169 |
+
progress_pct = int((frame_count / total_frames) * 100)
|
170 |
+
progress_text.text(f"Processing frame {frame_count} / {total_frames} ({progress_pct}%)")
|
171 |
+
progress_bar.progress(min(100, progress_pct))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
frame_count += 1
|
|
|
173 |
|
174 |
progress_text.text("Video processing complete!")
|
175 |
progress_bar.progress(100)
|
176 |
|
|
|
177 |
if processed_frames:
|
178 |
temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
|
179 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Use widely supported codec
|
180 |
+
out = cv2.VideoWriter(temp_video_file.name, fourcc, fps, (width, height))
|
181 |
+
|
182 |
for frame in processed_frames:
|
183 |
+
out.write(frame)
|
|
|
184 |
out.release()
|
185 |
|
186 |
with open(temp_video_file.name, 'rb') as video_file:
|
|
|
188 |
st.session_state["shortened_video_ready"] = True
|
189 |
|
190 |
st.success("Processed video created successfully!")
|
|
|
|
|
191 |
show_autoplay_video(st.session_state["shortened_video_data"], title="Processed Video")
|
|
|
|
|
192 |
|
193 |
###############################################################################
|
194 |
+
# Show Download Button if Ready
|
195 |
###############################################################################
|
196 |
+
if st.session_state["shortened_video_ready"]:
|
197 |
download_placeholder.download_button(
|
198 |
label="Download Processed Video",
|
199 |
data=st.session_state["shortened_video_data"],
|