Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -73,7 +73,7 @@ st.markdown("---")
|
|
73 |
# Tabs
|
74 |
tabs = st.tabs(["Upload", "Webcam"])
|
75 |
|
76 |
-
# Tab 1: Upload (
|
77 |
with tabs[0]:
|
78 |
col1, col2 = st.columns(2)
|
79 |
with col1:
|
@@ -81,7 +81,16 @@ with tabs[0]:
|
|
81 |
st.write("Upload an image or video to scan for fire or smoke.")
|
82 |
source_file = st.file_uploader("", type=["jpg", "jpeg", "png", "mp4"], label_visibility="collapsed")
|
83 |
confidence = st.slider("Detection Threshold", 0.25, 1.0, 0.4, key="upload_conf")
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
sampling_rate = st.selectbox("Analysis Rate", list(sampling_options.keys()), index=1, key="sampling_rate")
|
86 |
|
87 |
with col2:
|
@@ -91,7 +100,7 @@ with tabs[0]:
|
|
91 |
download_placeholder = st.empty()
|
92 |
|
93 |
if source_file:
|
94 |
-
st.write(f"File size: {source_file.size / 1024 / 1024:.2f} MB")
|
95 |
if st.button("Detect Wildfire", key="upload_detect"):
|
96 |
file_type = source_file.type.split('/')[0]
|
97 |
if file_type == 'image':
|
@@ -117,9 +126,19 @@ with tabs[0]:
|
|
117 |
frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
118 |
frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
119 |
|
120 |
-
#
|
121 |
-
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
# Output video
|
125 |
output_tfile = tempfile.NamedTemporaryFile(delete=False, suffix='_detected.mp4')
|
@@ -130,18 +149,27 @@ with tabs[0]:
|
|
130 |
frame_count = 0
|
131 |
processed_count = 0
|
132 |
last_detected_frame = None
|
|
|
133 |
|
134 |
while success:
|
135 |
if frame_count % frame_skip == 0:
|
136 |
-
|
137 |
-
|
138 |
-
last_detected_frame = detected_frame
|
139 |
-
frame_placeholder.image(detected_frame, use_column_width=True)
|
140 |
-
status_placeholder.write(f"Frame {frame_count}: Objects detected: {len(res[0].boxes)}")
|
141 |
processed_count += 1
|
142 |
-
elif last_detected_frame is not None:
|
143 |
-
frame_placeholder.image(last_detected_frame, use_column_width=True)
|
144 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
if last_detected_frame is not None:
|
146 |
out.write(last_detected_frame[:, :, ::-1])
|
147 |
|
@@ -154,7 +182,9 @@ with tabs[0]:
|
|
154 |
|
155 |
success, frame = vidcap.read()
|
156 |
frame_count += 1
|
157 |
-
|
|
|
|
|
158 |
|
159 |
vidcap.release()
|
160 |
out.release()
|
|
|
73 |
# Tabs
|
74 |
tabs = st.tabs(["Upload", "Webcam"])
|
75 |
|
76 |
+
# Tab 1: Upload (Optimized with new sampling options)
|
77 |
with tabs[0]:
|
78 |
col1, col2 = st.columns(2)
|
79 |
with col1:
|
|
|
81 |
st.write("Upload an image or video to scan for fire or smoke.")
|
82 |
source_file = st.file_uploader("", type=["jpg", "jpeg", "png", "mp4"], label_visibility="collapsed")
|
83 |
confidence = st.slider("Detection Threshold", 0.25, 1.0, 0.4, key="upload_conf")
|
84 |
+
# Expanded sampling options
|
85 |
+
sampling_options = {
|
86 |
+
"Every Frame": 0,
|
87 |
+
"1 FPS": 1,
|
88 |
+
"2 FPS": 2,
|
89 |
+
"5 FPS": 5,
|
90 |
+
"1 frame / 5s": 5,
|
91 |
+
"1 frame / 10s": 10,
|
92 |
+
"1 frame / 15s": 15
|
93 |
+
}
|
94 |
sampling_rate = st.selectbox("Analysis Rate", list(sampling_options.keys()), index=1, key="sampling_rate")
|
95 |
|
96 |
with col2:
|
|
|
100 |
download_placeholder = st.empty()
|
101 |
|
102 |
if source_file:
|
103 |
+
st.write(f"File size: {source_file.size / 1024 / 1024:.2f} MB")
|
104 |
if st.button("Detect Wildfire", key="upload_detect"):
|
105 |
file_type = source_file.type.split('/')[0]
|
106 |
if file_type == 'image':
|
|
|
126 |
frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
127 |
frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
128 |
|
129 |
+
# Calculate frame skip
|
130 |
+
target_rate = sampling_options[sampling_rate]
|
131 |
+
if target_rate == 0: # Every frame
|
132 |
+
frame_skip = 1
|
133 |
+
elif target_rate <= 5: # FPS-based (1, 2, 5)
|
134 |
+
frame_skip = max(1, int(fps / target_rate))
|
135 |
+
else: # Seconds-based (5s, 10s, 15s)
|
136 |
+
frame_skip = max(1, int(fps * target_rate))
|
137 |
+
|
138 |
+
# Batch processing setup
|
139 |
+
batch_size = 10 # Process 10 frames at a time
|
140 |
+
frames_to_process = []
|
141 |
+
frame_indices = []
|
142 |
|
143 |
# Output video
|
144 |
output_tfile = tempfile.NamedTemporaryFile(delete=False, suffix='_detected.mp4')
|
|
|
149 |
frame_count = 0
|
150 |
processed_count = 0
|
151 |
last_detected_frame = None
|
152 |
+
last_ui_update = time.time()
|
153 |
|
154 |
while success:
|
155 |
if frame_count % frame_skip == 0:
|
156 |
+
frames_to_process.append(frame)
|
157 |
+
frame_indices.append(frame_count)
|
|
|
|
|
|
|
158 |
processed_count += 1
|
|
|
|
|
159 |
|
160 |
+
# Process batch when full or at end
|
161 |
+
if len(frames_to_process) >= batch_size or (not success and frames_to_process):
|
162 |
+
res = model.predict(frames_to_process, conf=confidence)
|
163 |
+
for i, (result, idx) in enumerate(zip(res, frame_indices)):
|
164 |
+
detected_frame = result.plot()[:, :, ::-1]
|
165 |
+
last_detected_frame = detected_frame
|
166 |
+
# Update UI sparingly (every 1s)
|
167 |
+
if time.time() - last_ui_update >= 1.0:
|
168 |
+
frame_placeholder.image(detected_frame, use_column_width=True)
|
169 |
+
status_placeholder.write(f"Frame {idx}: Objects detected: {len(result.boxes)}")
|
170 |
+
last_ui_update = time.time()
|
171 |
+
|
172 |
+
# Write all frames (processed or last detected)
|
173 |
if last_detected_frame is not None:
|
174 |
out.write(last_detected_frame[:, :, ::-1])
|
175 |
|
|
|
182 |
|
183 |
success, frame = vidcap.read()
|
184 |
frame_count += 1
|
185 |
+
if len(frames_to_process) >= batch_size:
|
186 |
+
frames_to_process = []
|
187 |
+
frame_indices = []
|
188 |
|
189 |
vidcap.release()
|
190 |
out.release()
|