Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -8,11 +8,10 @@ from ultralytics import YOLO
|
|
8 |
import time
|
9 |
import numpy as np
|
10 |
import imageio_ffmpeg as ffmpeg
|
11 |
-
import base64
|
12 |
|
13 |
# Page config first
|
14 |
st.set_page_config(
|
15 |
-
page_title="Fire Watch:
|
16 |
page_icon="🔥",
|
17 |
layout="wide",
|
18 |
initial_sidebar_state="expanded"
|
@@ -29,7 +28,7 @@ for key in ["processed_frames", "slider_value", "processed_video", "start_time"]
|
|
29 |
# Sidebar
|
30 |
with st.sidebar:
|
31 |
st.header("Upload & Settings")
|
32 |
-
source_file = st.file_uploader("Upload image
|
33 |
confidence = float(st.slider("Confidence Threshold", 10, 100, 20)) / 100
|
34 |
fps_options = {
|
35 |
"Original FPS": None,
|
@@ -41,52 +40,40 @@ with st.sidebar:
|
|
41 |
"1 frame/30s": 0.0333
|
42 |
}
|
43 |
video_option = st.selectbox("Output Frame Rate", list(fps_options.keys()))
|
44 |
-
process_button = st.button("Detect fire")
|
45 |
progress_bar = st.progress(0)
|
46 |
progress_text = st.empty()
|
47 |
download_slot = st.empty()
|
48 |
|
49 |
# Main page
|
50 |
-
st.title("Fire Watch: AI-Powered Fire
|
51 |
-
|
52 |
-
# Display result images directly
|
53 |
col1, col2 = st.columns(2)
|
54 |
with col1:
|
55 |
-
|
56 |
-
st.image(fire_4a_url, use_column_width=True)
|
57 |
-
|
58 |
with col2:
|
59 |
-
|
60 |
-
st.image(fire_3a_url, use_column_width=True)
|
61 |
|
62 |
st.markdown("""
|
63 |
-
Early wildfire detection using YOLOv8 AI vision model. See
|
64 |
-
Click on video frames to load and play examples.
|
65 |
""")
|
66 |
|
67 |
-
#
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
st.header("Your Results")
|
74 |
result_cols = st.columns(2)
|
75 |
viewer_slot = st.empty()
|
76 |
|
77 |
-
# Example videos (LA before T)
|
78 |
-
#st.header("Example Results")
|
79 |
-
#examples = [
|
80 |
-
# ("LA Example", "LA1.mp4", "LA2.mp4"),
|
81 |
-
# ("T Example", "T1.mp4", "T2.mp4")
|
82 |
-
#]
|
83 |
-
#for title, orig_file, proc_file in examples:
|
84 |
-
# st.subheader(title)
|
85 |
-
# orig_url = f"https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/{orig_file}"
|
86 |
-
# proc_url = f"https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/{proc_file}"
|
87 |
-
# video_html = create_video_pair(orig_url, proc_url)
|
88 |
-
# st.markdown(video_html, unsafe_allow_html=True)
|
89 |
-
|
90 |
# Load model
|
91 |
try:
|
92 |
model = YOLO(model_path)
|
@@ -119,9 +106,6 @@ if process_button and source_file and model:
|
|
119 |
output_fps = fps_options[video_option] if fps_options[video_option] else orig_fps
|
120 |
sample_interval = max(1, int(orig_fps / output_fps)) if output_fps else 1
|
121 |
|
122 |
-
# Set fixed output FPS to 2 (500ms per frame = 2 FPS)
|
123 |
-
fixed_output_fps = 2
|
124 |
-
|
125 |
st.session_state.start_time = time.time()
|
126 |
frame_count = 0
|
127 |
processed_count = 0
|
@@ -131,6 +115,7 @@ if process_button and source_file and model:
|
|
131 |
if frame_count % sample_interval == 0:
|
132 |
res = model.predict(frame, conf=confidence)
|
133 |
processed_frame = res[0].plot()[:, :, ::-1]
|
|
|
134 |
if not processed_frame.flags['C_CONTIGUOUS']:
|
135 |
processed_frame = np.ascontiguousarray(processed_frame)
|
136 |
st.session_state.processed_frames.append(processed_frame)
|
@@ -139,17 +124,16 @@ if process_button and source_file and model:
|
|
139 |
elapsed = time.time() - st.session_state.start_time
|
140 |
progress = frame_count / total_frames
|
141 |
|
142 |
-
if elapsed > 0 and
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
eta_str = f"{int(eta // 60)}m {int(eta % 60)}s"
|
147 |
else:
|
148 |
-
elapsed_str = "0s"
|
149 |
eta_str = "Calculating..."
|
150 |
|
151 |
progress_bar.progress(min(progress, 1.0))
|
152 |
-
progress_text.text(f"Progress: {progress:.1%}
|
153 |
|
154 |
frame_count += 1
|
155 |
success, frame = vidcap.read()
|
@@ -162,7 +146,7 @@ if process_button and source_file and model:
|
|
162 |
writer = ffmpeg.write_frames(
|
163 |
out_path,
|
164 |
(width, height),
|
165 |
-
fps=
|
166 |
codec='libx264',
|
167 |
pix_fmt_in='bgr24',
|
168 |
pix_fmt_out='yuv420p'
|
@@ -177,10 +161,8 @@ if process_button and source_file and model:
|
|
177 |
st.session_state.processed_video = f.read()
|
178 |
os.unlink(out_path)
|
179 |
|
180 |
-
elapsed_final = time.time() - st.session_state.start_time
|
181 |
-
elapsed_final_str = f"{int(elapsed_final // 60)}m {int(elapsed_final % 60)}s"
|
182 |
progress_bar.progress(1.0)
|
183 |
-
progress_text.text(
|
184 |
with result_cols[0]:
|
185 |
st.video(source_file)
|
186 |
with result_cols[1]:
|
@@ -188,7 +170,7 @@ if process_button and source_file and model:
|
|
188 |
download_slot.download_button(
|
189 |
label="Download Processed Video",
|
190 |
data=st.session_state.processed_video,
|
191 |
-
file_name="
|
192 |
mime="video/mp4"
|
193 |
)
|
194 |
|
|
|
8 |
import time
|
9 |
import numpy as np
|
10 |
import imageio_ffmpeg as ffmpeg
|
|
|
11 |
|
12 |
# Page config first
|
13 |
st.set_page_config(
|
14 |
+
page_title="Fire Watch: AI model detection of fire and smoke",
|
15 |
page_icon="🔥",
|
16 |
layout="wide",
|
17 |
initial_sidebar_state="expanded"
|
|
|
28 |
# Sidebar
|
29 |
with st.sidebar:
|
30 |
st.header("Upload & Settings")
|
31 |
+
source_file = st.file_uploader("Upload image/video", type=["jpg", "jpeg", "png", "bmp", "webp", "mp4"])
|
32 |
confidence = float(st.slider("Confidence Threshold", 10, 100, 20)) / 100
|
33 |
fps_options = {
|
34 |
"Original FPS": None,
|
|
|
40 |
"1 frame/30s": 0.0333
|
41 |
}
|
42 |
video_option = st.selectbox("Output Frame Rate", list(fps_options.keys()))
|
43 |
+
process_button = st.button("Detect fire and smoke")
|
44 |
progress_bar = st.progress(0)
|
45 |
progress_text = st.empty()
|
46 |
download_slot = st.empty()
|
47 |
|
48 |
# Main page
|
49 |
+
st.title("Fire Watch: AI-Powered Fire Detection")
|
|
|
|
|
50 |
col1, col2 = st.columns(2)
|
51 |
with col1:
|
52 |
+
st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_1.jpeg", use_column_width=True)
|
|
|
|
|
53 |
with col2:
|
54 |
+
st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_3.png", use_column_width=True)
|
|
|
55 |
|
56 |
st.markdown("""
|
57 |
+
Early wildfire detection using YOLOv8 AI vision model. See examples below or upload your own content!
|
|
|
58 |
""")
|
59 |
|
60 |
+
# Example videos
|
61 |
+
st.header("Example Results")
|
62 |
+
for example in [("T1.mp4", "T2.mp4"), ("LA1.mp4", "LA2.mp4")]:
|
63 |
+
col1, col2 = st.columns(2)
|
64 |
+
orig_url = f"https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/{example[0]}"
|
65 |
+
proc_url = f"https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/{example[1]}"
|
66 |
+
orig_data = requests.get(orig_url).content
|
67 |
+
proc_data = requests.get(proc_url).content
|
68 |
+
with col1:
|
69 |
+
st.video(orig_data)
|
70 |
+
with col2:
|
71 |
+
st.video(proc_data)
|
72 |
|
73 |
st.header("Your Results")
|
74 |
result_cols = st.columns(2)
|
75 |
viewer_slot = st.empty()
|
76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
# Load model
|
78 |
try:
|
79 |
model = YOLO(model_path)
|
|
|
106 |
output_fps = fps_options[video_option] if fps_options[video_option] else orig_fps
|
107 |
sample_interval = max(1, int(orig_fps / output_fps)) if output_fps else 1
|
108 |
|
|
|
|
|
|
|
109 |
st.session_state.start_time = time.time()
|
110 |
frame_count = 0
|
111 |
processed_count = 0
|
|
|
115 |
if frame_count % sample_interval == 0:
|
116 |
res = model.predict(frame, conf=confidence)
|
117 |
processed_frame = res[0].plot()[:, :, ::-1]
|
118 |
+
# Ensure frame is C-contiguous
|
119 |
if not processed_frame.flags['C_CONTIGUOUS']:
|
120 |
processed_frame = np.ascontiguousarray(processed_frame)
|
121 |
st.session_state.processed_frames.append(processed_frame)
|
|
|
124 |
elapsed = time.time() - st.session_state.start_time
|
125 |
progress = frame_count / total_frames
|
126 |
|
127 |
+
if elapsed > 0 and processed_count > 0:
|
128 |
+
time_per_frame = elapsed / processed_count
|
129 |
+
frames_left = (total_frames - frame_count) / sample_interval
|
130 |
+
eta = frames_left * time_per_frame
|
131 |
+
eta_str = f"{int(eta // 60)}m {int(eta % 60)}s"
|
132 |
else:
|
|
|
133 |
eta_str = "Calculating..."
|
134 |
|
135 |
progress_bar.progress(min(progress, 1.0))
|
136 |
+
progress_text.text(f"Progress: {progress:.1%} | ETA: {eta_str}")
|
137 |
|
138 |
frame_count += 1
|
139 |
success, frame = vidcap.read()
|
|
|
146 |
writer = ffmpeg.write_frames(
|
147 |
out_path,
|
148 |
(width, height),
|
149 |
+
fps=output_fps or orig_fps,
|
150 |
codec='libx264',
|
151 |
pix_fmt_in='bgr24',
|
152 |
pix_fmt_out='yuv420p'
|
|
|
161 |
st.session_state.processed_video = f.read()
|
162 |
os.unlink(out_path)
|
163 |
|
|
|
|
|
164 |
progress_bar.progress(1.0)
|
165 |
+
progress_text.text("Processing complete!")
|
166 |
with result_cols[0]:
|
167 |
st.video(source_file)
|
168 |
with result_cols[1]:
|
|
|
170 |
download_slot.download_button(
|
171 |
label="Download Processed Video",
|
172 |
data=st.session_state.processed_video,
|
173 |
+
file_name="processed_wildfire.mp4",
|
174 |
mime="video/mp4"
|
175 |
)
|
176 |
|