tstone87 commited on
Commit
f6a8624
·
verified ·
1 Parent(s): d79abfc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +137 -120
app.py CHANGED
@@ -5,26 +5,27 @@ import cv2
5
  import streamlit as st
6
  import PIL
7
  from ultralytics import YOLO
 
8
 
9
  ###############################################################################
10
- # Helper function: Display an HTML5 video with autoplay, controls, and muted
11
  ###############################################################################
12
- def show_autoplay_video(video_data: bytes, title: str = "Video"):
13
- if not video_data:
14
  st.warning(f"No {title} video available.")
15
  return
16
- video_base64 = base64.b64encode(video_data).decode()
17
  video_html = f"""
18
  <h4>{title}</h4>
19
- <video width="100%" height="auto" controls autoplay muted>
20
- <source src="data:video/mp4;base64,{video_base64}" type="video/mp4">
21
- Your browser does not support the video tag.
22
  </video>
23
  """
24
  st.markdown(video_html, unsafe_allow_html=True)
25
 
26
  ###############################################################################
27
- # Session state initialization for processed results (for uploaded files)
28
  ###############################################################################
29
  if "processed_frames" not in st.session_state:
30
  st.session_state["processed_frames"] = []
@@ -34,7 +35,7 @@ if "shortened_video_ready" not in st.session_state:
34
  st.session_state["shortened_video_ready"] = False
35
 
36
  ###############################################################################
37
- # Configure YOLO model path and Streamlit page
38
  ###############################################################################
39
  model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/best.pt'
40
  st.set_page_config(
@@ -44,18 +45,35 @@ st.set_page_config(
44
  initial_sidebar_state="expanded"
45
  )
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  ###############################################################################
48
- # SIDEBAR: Upload file, set confidence, video option, and select an example pair
49
  ###############################################################################
50
  with st.sidebar:
51
  st.header("Video Input Options")
 
52
  example_option = st.selectbox(
53
  "Select Example Pair (optional)",
54
  ["None", "T Example", "LA Example"]
55
  )
56
  source_file = st.file_uploader(
57
  "Or upload your own file...",
58
- type=("jpg", "jpeg", "png", "bmp", "webp", "mp4")
59
  )
60
  confidence = float(st.slider("Select Model Confidence", 25, 100, 40)) / 100
61
  video_option = st.selectbox(
@@ -66,7 +84,7 @@ with st.sidebar:
66
  progress_bar = st.progress(0)
67
 
68
  ###############################################################################
69
- # MAIN PAGE TITLE
70
  ###############################################################################
71
  st.title("Fire Detection: Original vs. Processed Video")
72
 
@@ -80,152 +98,151 @@ except Exception as ex:
80
  st.error(ex)
81
 
82
  ###############################################################################
83
- # Determine source: Example or Uploaded File
84
  ###############################################################################
85
  original_video_data = None
86
- processed_video_data = None # For example pairs, these are loaded directly
87
 
88
  if example_option != "None":
89
- # An example pair was chosen. Load the videos from disk.
90
  if example_option == "T Example":
91
- # T1.mp4: original, T2.mpg: processed (analysis completed video)
92
- try:
93
- with open("T1.mp4", "rb") as f:
94
- original_video_data = f.read()
95
- with open("T2.mpg", "rb") as f:
96
- processed_video_data = f.read()
97
- except Exception as ex:
98
- st.error("Error loading T Example videos. Ensure T1.mp4 and T2.mpg are in your repo.")
99
  elif example_option == "LA Example":
100
- # LA1.mp4: original, LA2.mp4: processed
101
- try:
102
- with open("LA1.mp4", "rb") as f:
103
- original_video_data = f.read()
104
- with open("LA2.mp4", "rb") as f:
105
- processed_video_data = f.read()
106
- except Exception as ex:
107
- st.error("Error loading LA Example videos. Ensure LA1.mp4 and LA2.mp4 are in your repo.")
108
  else:
109
- # No example selected. Use uploaded file if available.
110
  if source_file:
111
  file_type = source_file.type.split('/')[0]
112
  if file_type == 'image':
113
- # For images, simply show the uploaded image (and detection result below)
114
  original_image = PIL.Image.open(source_file)
115
- # Convert image to bytes for display if needed
116
  buf = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
117
  original_image.save(buf.name, format="PNG")
118
  with open(buf.name, "rb") as f:
119
- original_video_data = f.read() # Actually, this is just an image preview.
120
  else:
121
- # For video, save to a temporary file and load its bytes.
122
- tfile = tempfile.NamedTemporaryFile(delete=False)
123
  tfile.write(source_file.read())
124
  tfile.flush()
125
  with open(tfile.name, "rb") as vf:
126
  original_video_data = vf.read()
127
- # Also open video with OpenCV for processing below.
128
  vidcap = cv2.VideoCapture(tfile.name)
129
  else:
130
- st.info("Please select an example pair or upload a file.")
131
 
132
  ###############################################################################
133
- # Display the Original and Result columns side-by-side
134
  ###############################################################################
135
  col1, col2 = st.columns(2)
136
 
137
- # Left column: Original video
138
  with col1:
139
  st.subheader("Original File")
140
  if original_video_data:
141
- show_autoplay_video(original_video_data, title="Original")
142
  else:
143
  st.info("No original video available.")
144
 
145
- ###############################################################################
146
- # DETECTION: For uploaded video files (not example pairs) run YOLO analysis
147
- ###############################################################################
148
- # We only run detection if no example pair is selected and if an upload is provided.
149
- if example_option == "None" and source_file and source_file.type.split('/')[0] != 'image':
150
- # Reset processed frames for a new analysis
151
- st.session_state["processed_frames"] = []
152
- frame_count = 0
153
- orig_fps = vidcap.get(cv2.CAP_PROP_FPS)
154
- total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
155
- width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
156
- height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
157
-
158
- # Determine sampling interval
159
- if video_option == "Original FPS":
160
- sample_interval = 1
161
- output_fps = orig_fps
162
- elif video_option == "1 fps":
163
- sample_interval = int(orig_fps) if orig_fps > 0 else 1
164
- output_fps = 1
165
- elif video_option == "1 frame per 5 seconds":
166
- sample_interval = int(orig_fps * 5) if orig_fps > 0 else 5
167
- output_fps = 1
168
- elif video_option == "1 frame per 10 seconds":
169
- sample_interval = int(orig_fps * 10) if orig_fps > 0 else 10
170
- output_fps = 1
171
- elif video_option == "1 frame per 15 seconds":
172
- sample_interval = int(orig_fps * 15) if orig_fps > 0 else 15
173
- output_fps = 1
174
  else:
175
- sample_interval = 1
176
- output_fps = orig_fps
177
-
178
- success, image = vidcap.read()
179
- while success:
180
- if frame_count % sample_interval == 0:
181
- res = model.predict(image, conf=confidence)
182
- res_plotted = res[0].plot()[:, :, ::-1]
183
- st.session_state["processed_frames"].append(res_plotted)
184
- # Update progress
185
- if total_frames > 0:
186
- progress_pct = int((frame_count / total_frames) * 100)
187
- progress_text.text(f"Processing frame {frame_count} / {total_frames} ({progress_pct}%)")
188
- progress_bar.progress(min(100, progress_pct))
189
- else:
190
- progress_text.text(f"Processing frame {frame_count}")
191
- frame_count += 1
192
- success, image = vidcap.read()
193
-
194
- progress_text.text("Video processing complete!")
195
- progress_bar.progress(100)
196
-
197
- # Create shortened video from processed frames
198
- processed_frames = st.session_state["processed_frames"]
199
- if processed_frames:
200
- temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
201
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
202
- out = cv2.VideoWriter(temp_video_file.name, fourcc, output_fps, (width, height))
203
- for frame in processed_frames:
204
- out.write(frame)
205
- out.release()
206
-
207
- with open(temp_video_file.name, 'rb') as video_file:
208
- st.session_state["shortened_video_data"] = video_file.read()
209
- st.session_state["shortened_video_ready"] = True
210
-
211
- st.success("Processed video created successfully!")
212
 
213
  ###############################################################################
214
- # Right column: Display the Processed (Result) video
215
  ###############################################################################
216
- with col2:
217
- st.subheader("Result File")
218
- # For example pairs, use the preloaded processed_video_data
219
- if processed_video_data:
220
- show_autoplay_video(processed_video_data, title="Processed")
221
- # Otherwise, if a processed video has been generated from an upload, show it
222
- elif st.session_state["shortened_video_ready"] and st.session_state["shortened_video_data"]:
223
- show_autoplay_video(st.session_state["shortened_video_data"], title="Processed")
224
- else:
225
- st.info("No processed video available yet. Run detection if you uploaded a file.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
  ###############################################################################
228
- # Always display the download button if a processed video is ready
229
  ###############################################################################
230
  if st.session_state["shortened_video_ready"] and st.session_state["shortened_video_data"]:
231
  st.download_button(
 
5
  import streamlit as st
6
  import PIL
7
  from ultralytics import YOLO
8
+ import requests
9
 
10
  ###############################################################################
11
+ # Helper function to embed an HTML5 video that autoplays (muted) with controls.
12
  ###############################################################################
13
+ def show_autoplay_video(video_bytes: bytes, title: str = "Video"):
14
+ if not video_bytes:
15
  st.warning(f"No {title} video available.")
16
  return
17
+ video_base64 = base64.b64encode(video_bytes).decode()
18
  video_html = f"""
19
  <h4>{title}</h4>
20
+ <video width="100%" controls autoplay muted>
21
+ <source src="data:video/mp4;base64,{video_base64}" type="video/mp4">
22
+ Your browser does not support the video tag.
23
  </video>
24
  """
25
  st.markdown(video_html, unsafe_allow_html=True)
26
 
27
  ###############################################################################
28
+ # Session state initialization (for uploaded processing results)
29
  ###############################################################################
30
  if "processed_frames" not in st.session_state:
31
  st.session_state["processed_frames"] = []
 
35
  st.session_state["shortened_video_ready"] = False
36
 
37
  ###############################################################################
38
+ # Configure YOLO model path and page layout
39
  ###############################################################################
40
  model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/best.pt'
41
  st.set_page_config(
 
45
  initial_sidebar_state="expanded"
46
  )
47
 
48
+ st.title("Fire Watch: Detecting fire using AI vision models")
49
+ col1, col2 = st.columns(2)
50
+ with col1:
51
+ st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_1.jpeg", use_column_width=True)
52
+ with col2:
53
+ st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_3.png", use_column_width=True)
54
+
55
+ st.markdown("""
56
+ Fires in Colorado present a serious challenge, threatening urban communities, highways, and even remote areas.
57
+ Early detection is critical. Fire Watch uses the model YOLOv8 for real-time fire and smoke detection
58
+ in images and videos.
59
+ """)
60
+ st.markdown("---")
61
+ st.header("Fire Detection:")
62
+
63
+
64
  ###############################################################################
65
+ # SIDEBAR: Video input options, confidence, sampling options, and example selection
66
  ###############################################################################
67
  with st.sidebar:
68
  st.header("Video Input Options")
69
+ # Option to select an example pair; "None" means use an uploaded file.
70
  example_option = st.selectbox(
71
  "Select Example Pair (optional)",
72
  ["None", "T Example", "LA Example"]
73
  )
74
  source_file = st.file_uploader(
75
  "Or upload your own file...",
76
+ type=("mp4", "jpg", "jpeg", "png", "bmp", "webp")
77
  )
78
  confidence = float(st.slider("Select Model Confidence", 25, 100, 40)) / 100
79
  video_option = st.selectbox(
 
84
  progress_bar = st.progress(0)
85
 
86
  ###############################################################################
87
+ # MAIN TITLE
88
  ###############################################################################
89
  st.title("Fire Detection: Original vs. Processed Video")
90
 
 
98
  st.error(ex)
99
 
100
  ###############################################################################
101
+ # Determine source video(s): Example pair or uploaded file.
102
  ###############################################################################
103
  original_video_data = None
104
+ processed_video_data = None # For example pairs
105
 
106
  if example_option != "None":
107
+ # Use example videos from remote URLs.
108
  if example_option == "T Example":
109
+ # For T Example: set your URLs for original and processed videos.
110
+ orig_url = "https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/T1.mp4"
111
+ proc_url = "https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/T2.mpg"
 
 
 
 
 
112
  elif example_option == "LA Example":
113
+ orig_url = "https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/LA1.mp4"
114
+ proc_url = "https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/LA2.mp4"
115
+ try:
116
+ original_video_data = requests.get(orig_url).content
117
+ processed_video_data = requests.get(proc_url).content
118
+ except Exception as ex:
119
+ st.error("Error loading example videos. Check your URLs.")
 
120
  else:
121
+ # No example selected. If a file is uploaded, use it.
122
  if source_file:
123
  file_type = source_file.type.split('/')[0]
124
  if file_type == 'image':
125
+ # For images, convert to video-like display (or you could run image detection).
126
  original_image = PIL.Image.open(source_file)
 
127
  buf = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
128
  original_image.save(buf.name, format="PNG")
129
  with open(buf.name, "rb") as f:
130
+ original_video_data = f.read()
131
  else:
132
+ # For video uploads, save to a temp file.
133
+ tfile = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
134
  tfile.write(source_file.read())
135
  tfile.flush()
136
  with open(tfile.name, "rb") as vf:
137
  original_video_data = vf.read()
138
+ # Open with OpenCV for processing.
139
  vidcap = cv2.VideoCapture(tfile.name)
140
  else:
141
+ st.info("Please select an example pair or upload a video file.")
142
 
143
  ###############################################################################
144
+ # Layout: Two columns for Original and Processed videos
145
  ###############################################################################
146
  col1, col2 = st.columns(2)
147
 
 
148
  with col1:
149
  st.subheader("Original File")
150
  if original_video_data:
151
+ show_autoplay_video(original_video_data, title="Original Video")
152
  else:
153
  st.info("No original video available.")
154
 
155
+ with col2:
156
+ st.subheader("Result File")
157
+ if example_option != "None":
158
+ # For example pairs, the processed video is already available.
159
+ if processed_video_data:
160
+ show_autoplay_video(processed_video_data, title="Processed Video")
161
+ else:
162
+ st.info("No processed video available in example.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  else:
164
+ # For uploaded files, if a processed video is ready, show it.
165
+ if st.session_state["shortened_video_ready"] and st.session_state["shortened_video_data"]:
166
+ show_autoplay_video(st.session_state["shortened_video_data"], title="Processed Video")
167
+ else:
168
+ st.info("Processed video will appear here once detection is run.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
  ###############################################################################
171
+ # DETECTION: Process the uploaded video if no example is selected.
172
  ###############################################################################
173
+ if example_option == "None" and source_file and source_file.type.split('/')[0] != 'image':
174
+ if st.sidebar.button("Let's Detect Wildfire"):
175
+ # Reset any previous processed results.
176
+ st.session_state["processed_frames"] = []
177
+ st.session_state["shortened_video_data"] = None
178
+ st.session_state["shortened_video_ready"] = False
179
+
180
+ processed_frames = st.session_state["processed_frames"]
181
+
182
+ frame_count = 0
183
+ orig_fps = vidcap.get(cv2.CAP_PROP_FPS)
184
+ total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
185
+ width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
186
+ height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
187
+
188
+ # Determine sampling interval based on option.
189
+ if video_option == "Original FPS":
190
+ sample_interval = 1
191
+ output_fps = orig_fps
192
+ elif video_option == "1 fps":
193
+ sample_interval = int(orig_fps) if orig_fps > 0 else 1
194
+ output_fps = 1
195
+ elif video_option == "1 frame per 5 seconds":
196
+ sample_interval = int(orig_fps * 5) if orig_fps > 0 else 5
197
+ output_fps = 1
198
+ elif video_option == "1 frame per 10 seconds":
199
+ sample_interval = int(orig_fps * 10) if orig_fps > 0 else 10
200
+ output_fps = 1
201
+ elif video_option == "1 frame per 15 seconds":
202
+ sample_interval = int(orig_fps * 15) if orig_fps > 0 else 15
203
+ output_fps = 1
204
+ else:
205
+ sample_interval = 1
206
+ output_fps = orig_fps
207
+
208
+ success, image = vidcap.read()
209
+ while success:
210
+ if frame_count % sample_interval == 0:
211
+ res = model.predict(image, conf=confidence)
212
+ res_plotted = res[0].plot()[:, :, ::-1]
213
+ processed_frames.append(res_plotted)
214
+ # Update progress
215
+ if total_frames > 0:
216
+ progress_pct = int((frame_count / total_frames) * 100)
217
+ progress_text.text(f"Processing frame {frame_count} / {total_frames} ({progress_pct}%)")
218
+ progress_bar.progress(min(100, progress_pct))
219
+ else:
220
+ progress_text.text(f"Processing frame {frame_count}")
221
+ frame_count += 1
222
+ success, image = vidcap.read()
223
+
224
+ progress_text.text("Video processing complete!")
225
+ progress_bar.progress(100)
226
+
227
+ # Create shortened video from processed frames.
228
+ if processed_frames:
229
+ temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
230
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
231
+ out = cv2.VideoWriter(temp_video_file.name, fourcc, output_fps, (width, height))
232
+ for frame in processed_frames:
233
+ out.write(frame)
234
+ out.release()
235
+
236
+ with open(temp_video_file.name, 'rb') as video_file:
237
+ st.session_state["shortened_video_data"] = video_file.read()
238
+ st.session_state["shortened_video_ready"] = True
239
+
240
+ st.success("Processed video created successfully!")
241
+ else:
242
+ st.error("No frames were processed from the video.")
243
 
244
  ###############################################################################
245
+ # Always show the download button if a processed video is ready.
246
  ###############################################################################
247
  if st.session_state["shortened_video_ready"] and st.session_state["shortened_video_data"]:
248
  st.download_button(