tstone87 commited on
Commit
086ae8e
·
verified ·
1 Parent(s): a7b0109

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -28
app.py CHANGED
@@ -5,17 +5,12 @@ import streamlit as st
5
  import PIL
6
  from ultralytics import YOLO
7
 
8
- # Required libraries (ensure these are in your requirements.txt):
9
- # streamlit
10
- # opencv-python-headless
11
- # ultralytics
12
- # Pillow
13
 
14
- # Replace with your model's URL or local path
15
- model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/best.pt' # Your correct model
16
 
17
-
18
- # Configure the page for Hugging Face Spaces
19
  st.set_page_config(
20
  page_title="Fire Watch using AI vision models",
21
  page_icon="🔥",
@@ -23,19 +18,23 @@ st.set_page_config(
23
  initial_sidebar_state="expanded"
24
  )
25
 
26
- # Sidebar for file upload and settings
27
  with st.sidebar:
28
  st.header("IMAGE/VIDEO UPLOAD")
29
  source_file = st.file_uploader(
30
  "Choose an image or video...", type=("jpg", "jpeg", "png", "bmp", "webp", "mp4"))
31
- confidence = float(st.slider("Select Model Confidence", 20, 100, 30)) / 100
32
  video_option = st.selectbox(
33
  "Select Video Shortening Option",
34
  ["Original FPS", "1 fps", "1 frame per 5 seconds", "1 frame per 10 seconds", "1 frame per 15 seconds"]
35
  )
 
 
 
 
36
 
37
- # Main page header and introduction images
38
- st.title("Fire Watch: Detecting fire or smoke using AI vision models")
39
  col1, col2 = st.columns(2)
40
  with col1:
41
  st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_1.jpeg", use_column_width=True)
@@ -43,11 +42,12 @@ with col2:
43
  st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_3.png", use_column_width=True)
44
 
45
  st.markdown("""
46
- Fires in Colorado present a serious challenge, threatening urban communities, highways, and even remote, unpopulated areas. Unexpected ncidents like lightning strike brushes fires can cause significant property damage, environmental degradation, and even loss of life. Early detection is critical to mitigating these risks. The idea for, Fire Watch, leverages a vision model called YOLOv8 for real-time detection of fires and smoke in images and videos, ensuring rapid response across Colorado’s diverse landscapes.
47
  """)
48
  st.markdown("---")
49
  st.header("Fire Detection:")
50
 
 
51
  col1, col2 = st.columns(2)
52
  if source_file:
53
  if source_file.type.split('/')[0] == 'image':
@@ -60,18 +60,26 @@ if source_file:
60
  else:
61
  st.info("Please upload an image or video file to begin.")
62
 
63
- # Load the YOLO model
64
  try:
65
  model = YOLO(model_path)
66
  except Exception as ex:
67
  st.error(f"Unable to load model. Check the specified path: {model_path}")
68
  st.error(ex)
69
 
70
- if st.sidebar.button("Let's Detect fire"):
 
 
 
 
 
 
 
 
71
  if not source_file:
72
  st.warning("No file uploaded!")
73
  elif source_file.type.split('/')[0] == 'image':
74
- # Process image input
75
  res = model.predict(uploaded_image, conf=confidence)
76
  boxes = res[0].boxes
77
  res_plotted = res[0].plot()[:, :, ::-1]
@@ -81,18 +89,19 @@ if st.sidebar.button("Let's Detect fire"):
81
  for box in boxes:
82
  st.write(box.xywh)
83
  else:
84
- # Process video input and shorten video based on sampling option
85
  processed_frames = []
86
  frame_count = 0
87
 
88
- # Video properties
89
  orig_fps = vidcap.get(cv2.CAP_PROP_FPS)
 
90
  width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
91
  height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
92
-
93
- # Determine sampling interval and output fps
94
  if video_option == "Original FPS":
95
- sample_interval = 1 # process every frame
96
  output_fps = orig_fps
97
  elif video_option == "1 fps":
98
  sample_interval = int(orig_fps) if orig_fps > 0 else 1
@@ -110,20 +119,47 @@ if st.sidebar.button("Let's Detect fire"):
110
  sample_interval = 1
111
  output_fps = orig_fps
112
 
 
 
 
 
113
  success, image = vidcap.read()
114
  while success:
115
  if frame_count % sample_interval == 0:
 
116
  res = model.predict(image, conf=confidence)
117
  res_plotted = res[0].plot()[:, :, ::-1]
118
  processed_frames.append(res_plotted)
119
- with col2:
120
- st.image(res_plotted, caption=f'Detected Frame {frame_count}', use_column_width=True)
121
- with st.expander("Detection Results"):
122
- for box in res[0].boxes:
123
- st.write(box.xywh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  frame_count += 1
125
  success, image = vidcap.read()
126
-
 
 
 
 
 
127
  if processed_frames:
128
  temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
129
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
 
5
  import PIL
6
  from ultralytics import YOLO
7
 
8
+ # Required libraries: streamlit, opencv-python-headless, ultralytics, Pillow
 
 
 
 
9
 
10
+ # Replace with your model URL or local file path
11
+ model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/best.pt'
12
 
13
+ # Configure page layout for Hugging Face Spaces
 
14
  st.set_page_config(
15
  page_title="Fire Watch using AI vision models",
16
  page_icon="🔥",
 
18
  initial_sidebar_state="expanded"
19
  )
20
 
21
+ # Sidebar: Upload file, select confidence and video shortening options.
22
  with st.sidebar:
23
  st.header("IMAGE/VIDEO UPLOAD")
24
  source_file = st.file_uploader(
25
  "Choose an image or video...", type=("jpg", "jpeg", "png", "bmp", "webp", "mp4"))
26
+ confidence = float(st.slider("Select Model Confidence", 25, 100, 40)) / 100
27
  video_option = st.selectbox(
28
  "Select Video Shortening Option",
29
  ["Original FPS", "1 fps", "1 frame per 5 seconds", "1 frame per 10 seconds", "1 frame per 15 seconds"]
30
  )
31
+ progress_text = st.empty()
32
+ progress_bar = st.progress(0)
33
+ # A container for our frame slider (viewer)
34
+ slider_container = st.empty()
35
 
36
+ # Main page header and intro images
37
+ st.title("WildfireWatch: Detecting Wildfire using AI")
38
  col1, col2 = st.columns(2)
39
  with col1:
40
  st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_1.jpeg", use_column_width=True)
 
42
  st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_3.png", use_column_width=True)
43
 
44
  st.markdown("""
45
+ Fires in Colorado present a serious challenge, threatening urban communities, highways, and even remote areas. Early detection is critical. WildfireWatch leverages YOLOv8 for realtime fire and smoke detection in images and videos.
46
  """)
47
  st.markdown("---")
48
  st.header("Fire Detection:")
49
 
50
+ # Create two columns for displaying the upload and results.
51
  col1, col2 = st.columns(2)
52
  if source_file:
53
  if source_file.type.split('/')[0] == 'image':
 
60
  else:
61
  st.info("Please upload an image or video file to begin.")
62
 
63
+ # Load YOLO model
64
  try:
65
  model = YOLO(model_path)
66
  except Exception as ex:
67
  st.error(f"Unable to load model. Check the specified path: {model_path}")
68
  st.error(ex)
69
 
70
+ # Initialize session state for frame viewer if not already set
71
+ if 'viewer_frame' not in st.session_state:
72
+ st.session_state.viewer_frame = 0
73
+
74
+ # This container will display the currently viewed frame
75
+ viewer_slot = st.empty()
76
+
77
+ # When the user clicks the detect button...
78
+ if st.sidebar.button("Let's Detect Wildfire"):
79
  if not source_file:
80
  st.warning("No file uploaded!")
81
  elif source_file.type.split('/')[0] == 'image':
82
+ # Process image input.
83
  res = model.predict(uploaded_image, conf=confidence)
84
  boxes = res[0].boxes
85
  res_plotted = res[0].plot()[:, :, ::-1]
 
89
  for box in boxes:
90
  st.write(box.xywh)
91
  else:
92
+ # Process video input.
93
  processed_frames = []
94
  frame_count = 0
95
 
96
+ # Get video properties.
97
  orig_fps = vidcap.get(cv2.CAP_PROP_FPS)
98
+ total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
99
  width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
100
  height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
101
+
102
+ # Determine sampling interval and output fps based on option selected.
103
  if video_option == "Original FPS":
104
+ sample_interval = 1
105
  output_fps = orig_fps
106
  elif video_option == "1 fps":
107
  sample_interval = int(orig_fps) if orig_fps > 0 else 1
 
119
  sample_interval = 1
120
  output_fps = orig_fps
121
 
122
+ # Initial slider for frame viewing.
123
+ slider_val = st.session_state.viewer_frame
124
+ slider = slider_container.slider("Frame Viewer", min_value=0, max_value=0, value=slider_val, step=1, key="frame_slider")
125
+
126
  success, image = vidcap.read()
127
  while success:
128
  if frame_count % sample_interval == 0:
129
+ # Run detection on current frame.
130
  res = model.predict(image, conf=confidence)
131
  res_plotted = res[0].plot()[:, :, ::-1]
132
  processed_frames.append(res_plotted)
133
+
134
+ # Update progress.
135
+ if total_frames > 0:
136
+ progress_pct = int((frame_count / total_frames) * 100)
137
+ progress_text.text(f"Processing frame {frame_count} / {total_frames} ({progress_pct}%)")
138
+ progress_bar.progress(min(100, progress_pct))
139
+ else:
140
+ progress_text.text(f"Processing frame {frame_count}")
141
+
142
+ # Update the slider's max value. Preserve current value.
143
+ current_index = st.session_state.get("frame_slider", len(processed_frames) - 1)
144
+ slider = slider_container.slider("Frame Viewer",
145
+ min_value=0,
146
+ max_value=len(processed_frames)-1,
147
+ value=current_index,
148
+ step=1,
149
+ key="frame_slider")
150
+
151
+ # If the user is at the latest frame, update the viewer.
152
+ if st.session_state.frame_slider == len(processed_frames)-1:
153
+ viewer_slot.image(processed_frames[-1], caption=f"Frame {len(processed_frames)-1}", use_column_width=True)
154
+
155
  frame_count += 1
156
  success, image = vidcap.read()
157
+
158
+ # Video processing complete.
159
+ progress_text.text("Video processing complete!")
160
+ progress_bar.progress(100)
161
+
162
+ # After processing, allow downloading the shortened video.
163
  if processed_frames:
164
  temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
165
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')