tstone87 commited on
Commit
d79abfc
·
verified ·
1 Parent(s): f68dccf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +196 -176
app.py CHANGED
@@ -1,25 +1,62 @@
1
  import os
2
  import tempfile
 
3
  import cv2
4
  import streamlit as st
5
  import PIL
6
  from ultralytics import YOLO
7
 
8
- # Ensure your model path points directly to the .pt file
9
- model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/best.pt'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
 
 
 
 
11
  st.set_page_config(
12
- page_title="Fire Watch using AI vision models",
13
  page_icon="🔥",
14
  layout="wide",
15
  initial_sidebar_state="expanded"
16
  )
17
 
18
- # --- SIDEBAR ---
 
 
19
  with st.sidebar:
20
- st.header("IMAGE/VIDEO UPLOAD")
21
- source_file = st.file_uploader("Choose an image or video...",
22
- type=("jpg", "jpeg", "png", "bmp", "webp", "mp4"))
 
 
 
 
 
 
23
  confidence = float(st.slider("Select Model Confidence", 25, 100, 40)) / 100
24
  video_option = st.selectbox(
25
  "Select Video Shortening Option",
@@ -28,189 +65,172 @@ with st.sidebar:
28
  progress_text = st.empty()
29
  progress_bar = st.progress(0)
30
 
31
- # --- MAIN PAGE TITLE AND IMAGES ---
32
- st.title("Fire Watch: Detecting fire using AI vision models")
33
- col1, col2 = st.columns(2)
34
- with col1:
35
- st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_1.jpeg", use_column_width=True)
36
- with col2:
37
- st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_3.png", use_column_width=True)
38
-
39
- st.markdown("""
40
- Fires in Colorado present a serious challenge, threatening urban communities, highways, and even remote areas.
41
- Early detection is critical. WildfireWatch leverages YOLOv8 for real-time fire and smoke detection
42
- in images and videos.
43
- """)
44
- st.markdown("---")
45
- st.header("Fire Detection:")
46
-
47
- # --- DISPLAY UPLOADED FILE ---
48
- col1, col2 = st.columns(2)
49
- if source_file:
50
- file_type = source_file.type.split('/')[0]
51
- if file_type == 'image':
52
- uploaded_image = PIL.Image.open(source_file)
53
- st.image(uploaded_image, caption="Uploaded Image", use_column_width=True)
54
- else:
55
- # Temporarily store the uploaded video
56
- tfile = tempfile.NamedTemporaryFile(delete=False)
57
- tfile.write(source_file.read())
58
- vidcap = cv2.VideoCapture(tfile.name)
59
- else:
60
- st.info("Please upload an image or video file to begin.")
61
 
62
- # --- LOAD YOLO MODEL ---
 
 
63
  try:
64
  model = YOLO(model_path)
65
  except Exception as ex:
66
- st.error(f"Unable to load model. Check the specified path: {model_path}")
67
  st.error(ex)
68
 
69
- # --- SESSION STATE SETUP ---
70
- if "processed_frames" not in st.session_state:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  st.session_state["processed_frames"] = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
- # If you want bounding box data per frame:
74
- if "frame_detections" not in st.session_state:
75
- st.session_state["frame_detections"] = []
76
 
77
- # We'll store the shortened video data so the download button remains visible
78
- if "shortened_video_data" not in st.session_state:
79
- st.session_state["shortened_video_data"] = None
80
- if "shortened_video_ready" not in st.session_state:
81
- st.session_state["shortened_video_ready"] = False
 
 
 
 
82
 
83
- # --- DETECT BUTTON ---
84
- if st.sidebar.button("Let's Detect fire"):
85
- if not source_file:
86
- st.warning("No file uploaded!")
87
- elif file_type == 'image':
88
- # Reset previous video data
89
- st.session_state["shortened_video_ready"] = False
90
- st.session_state["shortened_video_data"] = None
91
-
92
- # IMAGE DETECTION
93
- res = model.predict(uploaded_image, conf=confidence)
94
- boxes = res[0].boxes
95
- res_plotted = res[0].plot()[:, :, ::-1]
96
- with col2:
97
- st.image(res_plotted, caption='Detected Image', use_column_width=True)
98
- with st.expander("Detection Results"):
99
- for box in boxes:
100
- st.write(box.xywh)
101
- else:
102
- # Reset previous frames and video data
103
- st.session_state["processed_frames"] = []
104
- st.session_state["frame_detections"] = []
105
- st.session_state["shortened_video_ready"] = False
106
- st.session_state["shortened_video_data"] = None
107
-
108
- processed_frames = st.session_state["processed_frames"]
109
- frame_detections = st.session_state["frame_detections"]
110
-
111
- frame_count = 0
112
- orig_fps = vidcap.get(cv2.CAP_PROP_FPS)
113
- total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
114
- width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
115
- height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
116
-
117
- # Determine sampling interval
118
- if video_option == "Original FPS":
119
- sample_interval = 1
120
- output_fps = orig_fps
121
- elif video_option == "1 fps":
122
- sample_interval = int(orig_fps) if orig_fps > 0 else 1
123
- output_fps = 1
124
- elif video_option == "1 frame per 5 seconds":
125
- sample_interval = int(orig_fps * 5) if orig_fps > 0 else 5
126
- output_fps = 1
127
- elif video_option == "1 frame per 10 seconds":
128
- sample_interval = int(orig_fps * 10) if orig_fps > 0 else 10
129
- output_fps = 1
130
- elif video_option == "1 frame per 15 seconds":
131
- sample_interval = int(orig_fps * 15) if orig_fps > 0 else 15
132
- output_fps = 1
133
- else:
134
- sample_interval = 1
135
- output_fps = orig_fps
136
 
137
- success, image = vidcap.read()
138
- while success:
139
- if frame_count % sample_interval == 0:
140
- # Run detection
141
- res = model.predict(image, conf=confidence)
142
- res_plotted = res[0].plot()[:, :, ::-1]
143
-
144
- processed_frames.append(res_plotted)
145
- frame_detections.append(res[0].boxes) # optional
146
-
147
- # Update progress
148
- if total_frames > 0:
149
- progress_pct = int((frame_count / total_frames) * 100)
150
- progress_text.text(f"Processing frame {frame_count} / {total_frames} ({progress_pct}%)")
151
- progress_bar.progress(min(100, progress_pct))
152
- else:
153
- progress_text.text(f"Processing frame {frame_count}")
154
-
155
- frame_count += 1
156
- success, image = vidcap.read()
157
-
158
- # Processing complete
159
- progress_text.text("Video processing complete!")
160
- progress_bar.progress(100)
161
-
162
- # Create shortened video
163
- if processed_frames:
164
- temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
165
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
166
- out = cv2.VideoWriter(temp_video_file.name, fourcc, output_fps, (width, height))
167
- for frame in processed_frames:
168
- out.write(frame)
169
- out.release()
170
-
171
- # Store the video data in session_state
172
- with open(temp_video_file.name, 'rb') as video_file:
173
- st.session_state["shortened_video_data"] = video_file.read()
174
- st.session_state["shortened_video_ready"] = True
175
-
176
- st.success("Shortened video created successfully!")
177
- else:
178
- st.error("No frames were processed from the video.")
179
 
180
- # --- SHOW THE DOWNLOAD BUTTON IF READY ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  if st.session_state["shortened_video_ready"] and st.session_state["shortened_video_data"]:
182
  st.download_button(
183
- label="Download Shortened Video",
184
  data=st.session_state["shortened_video_data"],
185
- file_name="shortened_video.mp4",
186
  mime="video/mp4"
187
  )
188
-
189
- # --- DISPLAY PROCESSED FRAMES IF ANY ---
190
- if st.session_state["processed_frames"]:
191
- st.markdown("### Browse Detected Frames")
192
- num_frames = len(st.session_state["processed_frames"])
193
-
194
- if num_frames == 1:
195
- st.image(st.session_state["processed_frames"][0], caption="Frame 0", use_column_width=True)
196
- if st.session_state["frame_detections"]:
197
- with st.expander("Detection Results for Frame 0"):
198
- for box in st.session_state["frame_detections"][0]:
199
- st.write(box.xywh)
200
- else:
201
- frame_idx = st.slider(
202
- "Select Frame",
203
- min_value=0,
204
- max_value=num_frames - 1,
205
- value=0,
206
- step=1
207
- )
208
- st.image(st.session_state["processed_frames"][frame_idx],
209
- caption=f"Frame {frame_idx}",
210
- use_column_width=True)
211
-
212
- # Optionally show bounding box data
213
- if st.session_state["frame_detections"]:
214
- with st.expander(f"Detection Results for Frame {frame_idx}"):
215
- for box in st.session_state["frame_detections"][frame_idx]:
216
- st.write(box.xywh)
 
1
  import os
2
  import tempfile
3
+ import base64
4
  import cv2
5
  import streamlit as st
6
  import PIL
7
  from ultralytics import YOLO
8
 
9
+ ###############################################################################
10
+ # Helper function: Display an HTML5 video with autoplay, controls, and muted
11
+ ###############################################################################
12
+ def show_autoplay_video(video_data: bytes, title: str = "Video"):
13
+ if not video_data:
14
+ st.warning(f"No {title} video available.")
15
+ return
16
+ video_base64 = base64.b64encode(video_data).decode()
17
+ video_html = f"""
18
+ <h4>{title}</h4>
19
+ <video width="100%" height="auto" controls autoplay muted>
20
+ <source src="data:video/mp4;base64,{video_base64}" type="video/mp4">
21
+ Your browser does not support the video tag.
22
+ </video>
23
+ """
24
+ st.markdown(video_html, unsafe_allow_html=True)
25
+
26
+ ###############################################################################
27
+ # Session state initialization for processed results (for uploaded files)
28
+ ###############################################################################
29
+ if "processed_frames" not in st.session_state:
30
+ st.session_state["processed_frames"] = []
31
+ if "shortened_video_data" not in st.session_state:
32
+ st.session_state["shortened_video_data"] = None
33
+ if "shortened_video_ready" not in st.session_state:
34
+ st.session_state["shortened_video_ready"] = False
35
 
36
+ ###############################################################################
37
+ # Configure YOLO model path and Streamlit page
38
+ ###############################################################################
39
+ model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/best.pt'
40
  st.set_page_config(
41
+ page_title="Fire Detection: Original vs. Processed Video",
42
  page_icon="🔥",
43
  layout="wide",
44
  initial_sidebar_state="expanded"
45
  )
46
 
47
+ ###############################################################################
48
+ # SIDEBAR: Upload file, set confidence, video option, and select an example pair
49
+ ###############################################################################
50
  with st.sidebar:
51
+ st.header("Video Input Options")
52
+ example_option = st.selectbox(
53
+ "Select Example Pair (optional)",
54
+ ["None", "T Example", "LA Example"]
55
+ )
56
+ source_file = st.file_uploader(
57
+ "Or upload your own file...",
58
+ type=("jpg", "jpeg", "png", "bmp", "webp", "mp4")
59
+ )
60
  confidence = float(st.slider("Select Model Confidence", 25, 100, 40)) / 100
61
  video_option = st.selectbox(
62
  "Select Video Shortening Option",
 
65
  progress_text = st.empty()
66
  progress_bar = st.progress(0)
67
 
68
+ ###############################################################################
69
+ # MAIN PAGE TITLE
70
+ ###############################################################################
71
+ st.title("Fire Detection: Original vs. Processed Video")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
+ ###############################################################################
74
+ # Load YOLO model
75
+ ###############################################################################
76
  try:
77
  model = YOLO(model_path)
78
  except Exception as ex:
79
+ st.error(f"Unable to load model. Check model path: {model_path}")
80
  st.error(ex)
81
 
82
+ ###############################################################################
83
+ # Determine source: Example or Uploaded File
84
+ ###############################################################################
85
+ original_video_data = None
86
+ processed_video_data = None # For example pairs, these are loaded directly
87
+
88
+ if example_option != "None":
89
+ # An example pair was chosen. Load the videos from disk.
90
+ if example_option == "T Example":
91
+ # T1.mp4: original, T2.mpg: processed (analysis completed video)
92
+ try:
93
+ with open("T1.mp4", "rb") as f:
94
+ original_video_data = f.read()
95
+ with open("T2.mpg", "rb") as f:
96
+ processed_video_data = f.read()
97
+ except Exception as ex:
98
+ st.error("Error loading T Example videos. Ensure T1.mp4 and T2.mpg are in your repo.")
99
+ elif example_option == "LA Example":
100
+ # LA1.mp4: original, LA2.mp4: processed
101
+ try:
102
+ with open("LA1.mp4", "rb") as f:
103
+ original_video_data = f.read()
104
+ with open("LA2.mp4", "rb") as f:
105
+ processed_video_data = f.read()
106
+ except Exception as ex:
107
+ st.error("Error loading LA Example videos. Ensure LA1.mp4 and LA2.mp4 are in your repo.")
108
+ else:
109
+ # No example selected. Use uploaded file if available.
110
+ if source_file:
111
+ file_type = source_file.type.split('/')[0]
112
+ if file_type == 'image':
113
+ # For images, simply show the uploaded image (and detection result below)
114
+ original_image = PIL.Image.open(source_file)
115
+ # Convert image to bytes for display if needed
116
+ buf = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
117
+ original_image.save(buf.name, format="PNG")
118
+ with open(buf.name, "rb") as f:
119
+ original_video_data = f.read() # Actually, this is just an image preview.
120
+ else:
121
+ # For video, save to a temporary file and load its bytes.
122
+ tfile = tempfile.NamedTemporaryFile(delete=False)
123
+ tfile.write(source_file.read())
124
+ tfile.flush()
125
+ with open(tfile.name, "rb") as vf:
126
+ original_video_data = vf.read()
127
+ # Also open video with OpenCV for processing below.
128
+ vidcap = cv2.VideoCapture(tfile.name)
129
+ else:
130
+ st.info("Please select an example pair or upload a file.")
131
+
132
+ ###############################################################################
133
+ # Display the Original and Result columns side-by-side
134
+ ###############################################################################
135
+ col1, col2 = st.columns(2)
136
+
137
+ # Left column: Original video
138
+ with col1:
139
+ st.subheader("Original File")
140
+ if original_video_data:
141
+ show_autoplay_video(original_video_data, title="Original")
142
+ else:
143
+ st.info("No original video available.")
144
+
145
+ ###############################################################################
146
+ # DETECTION: For uploaded video files (not example pairs) run YOLO analysis
147
+ ###############################################################################
148
+ # We only run detection if no example pair is selected and if an upload is provided.
149
+ if example_option == "None" and source_file and source_file.type.split('/')[0] != 'image':
150
+ # Reset processed frames for a new analysis
151
  st.session_state["processed_frames"] = []
152
+ frame_count = 0
153
+ orig_fps = vidcap.get(cv2.CAP_PROP_FPS)
154
+ total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
155
+ width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
156
+ height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
157
+
158
+ # Determine sampling interval
159
+ if video_option == "Original FPS":
160
+ sample_interval = 1
161
+ output_fps = orig_fps
162
+ elif video_option == "1 fps":
163
+ sample_interval = int(orig_fps) if orig_fps > 0 else 1
164
+ output_fps = 1
165
+ elif video_option == "1 frame per 5 seconds":
166
+ sample_interval = int(orig_fps * 5) if orig_fps > 0 else 5
167
+ output_fps = 1
168
+ elif video_option == "1 frame per 10 seconds":
169
+ sample_interval = int(orig_fps * 10) if orig_fps > 0 else 10
170
+ output_fps = 1
171
+ elif video_option == "1 frame per 15 seconds":
172
+ sample_interval = int(orig_fps * 15) if orig_fps > 0 else 15
173
+ output_fps = 1
174
+ else:
175
+ sample_interval = 1
176
+ output_fps = orig_fps
177
+
178
+ success, image = vidcap.read()
179
+ while success:
180
+ if frame_count % sample_interval == 0:
181
+ res = model.predict(image, conf=confidence)
182
+ res_plotted = res[0].plot()[:, :, ::-1]
183
+ st.session_state["processed_frames"].append(res_plotted)
184
+ # Update progress
185
+ if total_frames > 0:
186
+ progress_pct = int((frame_count / total_frames) * 100)
187
+ progress_text.text(f"Processing frame {frame_count} / {total_frames} ({progress_pct}%)")
188
+ progress_bar.progress(min(100, progress_pct))
189
+ else:
190
+ progress_text.text(f"Processing frame {frame_count}")
191
+ frame_count += 1
192
+ success, image = vidcap.read()
193
 
194
+ progress_text.text("Video processing complete!")
195
+ progress_bar.progress(100)
 
196
 
197
+ # Create shortened video from processed frames
198
+ processed_frames = st.session_state["processed_frames"]
199
+ if processed_frames:
200
+ temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
201
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
202
+ out = cv2.VideoWriter(temp_video_file.name, fourcc, output_fps, (width, height))
203
+ for frame in processed_frames:
204
+ out.write(frame)
205
+ out.release()
206
 
207
+ with open(temp_video_file.name, 'rb') as video_file:
208
+ st.session_state["shortened_video_data"] = video_file.read()
209
+ st.session_state["shortened_video_ready"] = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
+ st.success("Processed video created successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
 
213
+ ###############################################################################
214
+ # Right column: Display the Processed (Result) video
215
+ ###############################################################################
216
+ with col2:
217
+ st.subheader("Result File")
218
+ # For example pairs, use the preloaded processed_video_data
219
+ if processed_video_data:
220
+ show_autoplay_video(processed_video_data, title="Processed")
221
+ # Otherwise, if a processed video has been generated from an upload, show it
222
+ elif st.session_state["shortened_video_ready"] and st.session_state["shortened_video_data"]:
223
+ show_autoplay_video(st.session_state["shortened_video_data"], title="Processed")
224
+ else:
225
+ st.info("No processed video available yet. Run detection if you uploaded a file.")
226
+
227
+ ###############################################################################
228
+ # Always display the download button if a processed video is ready
229
+ ###############################################################################
230
  if st.session_state["shortened_video_ready"] and st.session_state["shortened_video_data"]:
231
  st.download_button(
232
+ label="Download Processed Video",
233
  data=st.session_state["shortened_video_data"],
234
+ file_name="processed_video.mp4",
235
  mime="video/mp4"
236
  )