Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,10 +8,11 @@ from ultralytics import YOLO
|
|
8 |
import time
|
9 |
import numpy as np
|
10 |
import imageio_ffmpeg as ffmpeg
|
|
|
11 |
|
12 |
# Page config first
|
13 |
st.set_page_config(
|
14 |
-
page_title="Fire Watch:
|
15 |
page_icon="🔥",
|
16 |
layout="wide",
|
17 |
initial_sidebar_state="expanded"
|
@@ -40,40 +41,79 @@ with st.sidebar:
|
|
40 |
"1 frame/30s": 0.0333
|
41 |
}
|
42 |
video_option = st.selectbox("Output Frame Rate", list(fps_options.keys()))
|
43 |
-
process_button = st.button("Detect fire
|
44 |
progress_bar = st.progress(0)
|
45 |
progress_text = st.empty()
|
46 |
download_slot = st.empty()
|
47 |
|
48 |
# Main page
|
49 |
-
st.title("Fire Watch: AI-Powered Fire Detection")
|
|
|
|
|
50 |
col1, col2 = st.columns(2)
|
51 |
with col1:
|
52 |
-
|
|
|
|
|
53 |
with col2:
|
54 |
-
|
|
|
55 |
|
56 |
st.markdown("""
|
57 |
-
Early wildfire detection using YOLOv8 AI vision model. See examples below or upload your own content!
|
|
|
58 |
""")
|
59 |
|
60 |
-
#
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
st.header("Your Results")
|
74 |
result_cols = st.columns(2)
|
75 |
viewer_slot = st.empty()
|
76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
# Load model
|
78 |
try:
|
79 |
model = YOLO(model_path)
|
@@ -106,6 +146,9 @@ if process_button and source_file and model:
|
|
106 |
output_fps = fps_options[video_option] if fps_options[video_option] else orig_fps
|
107 |
sample_interval = max(1, int(orig_fps / output_fps)) if output_fps else 1
|
108 |
|
|
|
|
|
|
|
109 |
st.session_state.start_time = time.time()
|
110 |
frame_count = 0
|
111 |
processed_count = 0
|
@@ -115,7 +158,6 @@ if process_button and source_file and model:
|
|
115 |
if frame_count % sample_interval == 0:
|
116 |
res = model.predict(frame, conf=confidence)
|
117 |
processed_frame = res[0].plot()[:, :, ::-1]
|
118 |
-
# Ensure frame is C-contiguous
|
119 |
if not processed_frame.flags['C_CONTIGUOUS']:
|
120 |
processed_frame = np.ascontiguousarray(processed_frame)
|
121 |
st.session_state.processed_frames.append(processed_frame)
|
@@ -124,16 +166,17 @@ if process_button and source_file and model:
|
|
124 |
elapsed = time.time() - st.session_state.start_time
|
125 |
progress = frame_count / total_frames
|
126 |
|
127 |
-
if elapsed > 0 and
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
eta_str = f"{int(eta // 60)}m {int(eta % 60)}s"
|
132 |
else:
|
|
|
133 |
eta_str = "Calculating..."
|
134 |
|
135 |
progress_bar.progress(min(progress, 1.0))
|
136 |
-
progress_text.text(f"Progress: {progress:.1%}
|
137 |
|
138 |
frame_count += 1
|
139 |
success, frame = vidcap.read()
|
@@ -146,7 +189,7 @@ if process_button and source_file and model:
|
|
146 |
writer = ffmpeg.write_frames(
|
147 |
out_path,
|
148 |
(width, height),
|
149 |
-
fps=
|
150 |
codec='libx264',
|
151 |
pix_fmt_in='bgr24',
|
152 |
pix_fmt_out='yuv420p'
|
@@ -161,8 +204,10 @@ if process_button and source_file and model:
|
|
161 |
st.session_state.processed_video = f.read()
|
162 |
os.unlink(out_path)
|
163 |
|
|
|
|
|
164 |
progress_bar.progress(1.0)
|
165 |
-
progress_text.text("
|
166 |
with result_cols[0]:
|
167 |
st.video(source_file)
|
168 |
with result_cols[1]:
|
@@ -170,7 +215,7 @@ if process_button and source_file and model:
|
|
170 |
download_slot.download_button(
|
171 |
label="Download Processed Video",
|
172 |
data=st.session_state.processed_video,
|
173 |
-
file_name="
|
174 |
mime="video/mp4"
|
175 |
)
|
176 |
|
|
|
8 |
import time
|
9 |
import numpy as np
|
10 |
import imageio_ffmpeg as ffmpeg
|
11 |
+
import base64
|
12 |
|
13 |
# Page config first
|
14 |
st.set_page_config(
|
15 |
+
page_title="Fire Watch: Fire and Smoke Detection with an AI Vision Model",
|
16 |
page_icon="🔥",
|
17 |
layout="wide",
|
18 |
initial_sidebar_state="expanded"
|
|
|
41 |
"1 frame/30s": 0.0333
|
42 |
}
|
43 |
video_option = st.selectbox("Output Frame Rate", list(fps_options.keys()))
|
44 |
+
process_button = st.button("Detect fire")
|
45 |
progress_bar = st.progress(0)
|
46 |
progress_text = st.empty()
|
47 |
download_slot = st.empty()
|
48 |
|
49 |
# Main page
|
50 |
+
st.title("Fire Watch: AI-Powered Fire and Smoke Detection")
|
51 |
+
|
52 |
+
# Display result images directly
|
53 |
col1, col2 = st.columns(2)
|
54 |
with col1:
|
55 |
+
fire_4a_url = "https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_4a.jpg"
|
56 |
+
st.image(fire_4a_url, use_column_width=True)
|
57 |
+
|
58 |
with col2:
|
59 |
+
fire_3a_url = "https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_3a.jpg"
|
60 |
+
st.image(fire_3a_url, use_column_width=True)
|
61 |
|
62 |
st.markdown("""
|
63 |
+
Early wildfire detection using YOLOv8 AI vision model. See detected results above and video examples below, or upload your own content!
|
64 |
+
Click on video frames to load and play examples.
|
65 |
""")
|
66 |
|
67 |
+
# Function to create simple video pair HTML
|
68 |
+
def create_video_pair(orig_url, proc_url):
|
69 |
+
try:
|
70 |
+
orig_bytes = requests.get(orig_url).content
|
71 |
+
proc_bytes = requests.get(proc_url).content
|
72 |
+
orig_b64 = base64.b64encode(orig_bytes).decode('utf-8')
|
73 |
+
proc_b64 = base64.b64encode(proc_bytes).decode('utf-8')
|
74 |
+
|
75 |
+
html = f"""
|
76 |
+
<div style="display: flex; gap: 10px; margin-bottom: 20px;">
|
77 |
+
<div style="flex: 1;">
|
78 |
+
<h4>Original</h4>
|
79 |
+
<video width="100%" controls>
|
80 |
+
<source src="data:video/mp4;base64,{orig_b64}" type="video/mp4">
|
81 |
+
Your browser does not support the video tag.
|
82 |
+
</video>
|
83 |
+
</div>
|
84 |
+
<div style="flex: 1;">
|
85 |
+
<h4>Processed</h4>
|
86 |
+
<video width="100%" controls>
|
87 |
+
<source src="data:video/mp4;base64,{proc_b64}" type="video/mp4">
|
88 |
+
Your browser does not support the video tag.
|
89 |
+
</video>
|
90 |
+
</div>
|
91 |
+
</div>
|
92 |
+
"""
|
93 |
+
return html
|
94 |
+
except Exception as e:
|
95 |
+
return f"<p>Error loading videos: {str(e)}</p>"
|
96 |
+
|
97 |
+
if not source_file:
|
98 |
+
st.info("Please upload a file to begin.")
|
99 |
|
100 |
st.header("Your Results")
|
101 |
result_cols = st.columns(2)
|
102 |
viewer_slot = st.empty()
|
103 |
|
104 |
+
# Example videos (LA before T)
|
105 |
+
st.header("Example Results")
|
106 |
+
examples = [
|
107 |
+
("LA Example", "LA1.mp4", "LA2.mp4"),
|
108 |
+
("T Example", "T1.mp4", "T2.mp4")
|
109 |
+
]
|
110 |
+
for title, orig_file, proc_file in examples:
|
111 |
+
st.subheader(title)
|
112 |
+
orig_url = f"https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/{orig_file}"
|
113 |
+
proc_url = f"https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/{proc_file}"
|
114 |
+
video_html = create_video_pair(orig_url, proc_url)
|
115 |
+
st.markdown(video_html, unsafe_allow_html=True)
|
116 |
+
|
117 |
# Load model
|
118 |
try:
|
119 |
model = YOLO(model_path)
|
|
|
146 |
output_fps = fps_options[video_option] if fps_options[video_option] else orig_fps
|
147 |
sample_interval = max(1, int(orig_fps / output_fps)) if output_fps else 1
|
148 |
|
149 |
+
# Set fixed output FPS to 2 (500ms per frame = 2 FPS)
|
150 |
+
fixed_output_fps = 2
|
151 |
+
|
152 |
st.session_state.start_time = time.time()
|
153 |
frame_count = 0
|
154 |
processed_count = 0
|
|
|
158 |
if frame_count % sample_interval == 0:
|
159 |
res = model.predict(frame, conf=confidence)
|
160 |
processed_frame = res[0].plot()[:, :, ::-1]
|
|
|
161 |
if not processed_frame.flags['C_CONTIGUOUS']:
|
162 |
processed_frame = np.ascontiguousarray(processed_frame)
|
163 |
st.session_state.processed_frames.append(processed_frame)
|
|
|
166 |
elapsed = time.time() - st.session_state.start_time
|
167 |
progress = frame_count / total_frames
|
168 |
|
169 |
+
if elapsed > 0 and progress > 0:
|
170 |
+
total_estimated_time = elapsed / progress
|
171 |
+
eta = total_estimated_time - elapsed
|
172 |
+
elapsed_str = f"{int(elapsed // 60)}m {int(elapsed % 60)}s"
|
173 |
+
eta_str = f"{int(eta // 60)}m {int(eta % 60)}s" if eta > 0 else "Almost done"
|
174 |
else:
|
175 |
+
elapsed_str = "0s"
|
176 |
eta_str = "Calculating..."
|
177 |
|
178 |
progress_bar.progress(min(progress, 1.0))
|
179 |
+
progress_text.text(f"Progress: {progress:.1%}\nElapsed: {elapsed_str}\nETA: {eta_str}")
|
180 |
|
181 |
frame_count += 1
|
182 |
success, frame = vidcap.read()
|
|
|
189 |
writer = ffmpeg.write_frames(
|
190 |
out_path,
|
191 |
(width, height),
|
192 |
+
fps=fixed_output_fps, # Fixed at 2 FPS (500ms per frame)
|
193 |
codec='libx264',
|
194 |
pix_fmt_in='bgr24',
|
195 |
pix_fmt_out='yuv420p'
|
|
|
204 |
st.session_state.processed_video = f.read()
|
205 |
os.unlink(out_path)
|
206 |
|
207 |
+
elapsed_final = time.time() - st.session_state.start_time
|
208 |
+
elapsed_final_str = f"{int(elapsed_final // 60)}m {int(elapsed_final % 60)}s"
|
209 |
progress_bar.progress(1.0)
|
210 |
+
progress_text.text(f"Progress: 100%\nElapsed: {elapsed_final_str}\nETA: 0m 0s")
|
211 |
with result_cols[0]:
|
212 |
st.video(source_file)
|
213 |
with result_cols[1]:
|
|
|
215 |
download_slot.download_button(
|
216 |
label="Download Processed Video",
|
217 |
data=st.session_state.processed_video,
|
218 |
+
file_name="results_fire_analysis.mp4",
|
219 |
mime="video/mp4"
|
220 |
)
|
221 |
|