tstone87 commited on
Commit
2d60aec
·
verified ·
1 Parent(s): 1966824

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +143 -168
app.py CHANGED
@@ -1,185 +1,160 @@
 
 
 
1
  import cv2
2
- import streamlit as st
3
  from ultralytics import YOLO
4
- import time
5
- import numpy as np
6
- from datetime import datetime
7
- import pytz
8
 
9
- # Page config and header
10
  st.set_page_config(
11
- page_title="Your Original App",
12
- page_icon="🔥",
13
  layout="wide",
14
  initial_sidebar_state="expanded"
15
  )
16
- st.title("Your Original App")
17
 
18
- # --- Session State Initialization ---
19
- if "streams" not in st.session_state:
20
- st.session_state.streams = []
21
- if "num_streams" not in st.session_state:
22
- st.session_state.num_streams = 1
23
- if "confidence" not in st.session_state:
24
- st.session_state.confidence = 0.30 # default 30%
25
- if "target_fps" not in st.session_state:
26
- st.session_state.target_fps = 1.0 # default 1 FPS
27
 
28
- # --- Default URLs and Names for 10 Streams ---
29
- default_m3u8_urls = [
30
- "https://publicstreamer4.cotrip.org/rtplive/070E27890CAM1RHS/playlist.m3u8", # EB at i270
31
- "https://publicstreamer2.cotrip.org/rtplive/070E27555CAM1RP1/playlist.m3u8", # EB @ York St Denver
32
- "https://publicstreamer1.cotrip.org/rtplive/225N00535CAM1RP1/playlist.m3u8", # NB at Iliff Denver
33
- "https://publicstreamer2.cotrip.org/rtplive/070W28220CAM1RHS/playlist.m3u8", # WB Half Mile West of I225 Denver
34
- "https://publicstreamer1.cotrip.org/rtplive/070W26805CAM1RHS/playlist.m3u8", # 1 mile E of Kipling Denver
35
- "https://publicstreamer4.cotrip.org/rtplive/076W03150CAM1RP1/playlist.m3u8", # Main St Hudson
36
- "https://publicstreamer2.cotrip.org/rtplive/070E27660CAM1NEC/playlist.m3u8", # EB Colorado Blvd i70 Denver
37
- "https://publicstreamer2.cotrip.org/rtplive/070W27475CAM1RHS/playlist.m3u8", # E of Washington St Denver
38
- "https://publicstreamer3.cotrip.org/rtplive/070W28155CAM1RHS/playlist.m3u8", # WB Peroia St Underpass Denver
39
- "https://publicstreamer3.cotrip.org/rtplive/070E11660CAM1RHS/playlist.m3u8" # Grand Ave Glenwood
40
- ]
41
- default_names = [
42
- "EB at i270",
43
- "EB @ York St Denver",
44
- "NB at Iliff Denver",
45
- "WB Half Mile West of I225 Denver",
46
- "1 mile E of Kipling Denver",
47
- "Main St Hudson",
48
- "EB Colorado Blvd i70 Denver",
49
- "E of Washington St Denver",
50
- "WB Peroia St Underpass Denver",
51
- "Grand Ave Glenwood"
52
- ]
53
 
54
- # --- Sidebar Settings ---
55
- with st.sidebar:
56
- st.header("Stream Settings")
57
- # Custom configuration for stream 1 (optional)
58
- custom_m3u8 = st.text_input("Custom M3U8 URL for Stream 1 (optional)", value="", key="custom_m3u8")
59
- custom_name = st.text_input("Custom Webcam Name for Stream 1 (optional)", value="", key="custom_name")
60
-
61
- # Choose number of streams (1 to 10)
62
- num_streams = st.selectbox("Number of Streams", list(range(1, 11)), index=0)
63
- st.session_state.num_streams = num_streams
64
 
65
- # Global settings for confidence and processing rate.
66
- confidence = float(st.slider("Confidence Threshold", 5, 100, 30)) / 100
67
- st.session_state.confidence = confidence
68
- fps_options = {
69
- "1 FPS": 1,
70
- "1 frame/2s": 0.5,
71
- "1 frame/3s": 0.3333,
72
- "1 frame/5s": 0.2,
73
- "1 frame/15s": 0.0667,
74
- "1 frame/30s": 0.0333
75
- }
76
- video_option = st.selectbox("Processing Rate", list(fps_options.keys()), index=3)
77
- st.session_state.target_fps = fps_options[video_option]
78
-
79
- # Initialize or update stream state
80
- if len(st.session_state.streams) != st.session_state.num_streams:
81
- st.session_state.streams = []
82
- for i in range(st.session_state.num_streams):
83
- if i == 0:
84
- url = custom_m3u8.strip() if custom_m3u8.strip() else default_m3u8_urls[0]
85
- display_name = custom_name.strip() if custom_name.strip() else default_names[0]
86
- else:
87
- url = default_m3u8_urls[i] if i < len(default_m3u8_urls) else ""
88
- display_name = default_names[i] if i < len(default_names) else f"Stream {i+1}"
89
- st.session_state.streams.append({
90
- "current_m3u8_url": url,
91
- "processed_frame": np.zeros((480, 640, 3), dtype=np.uint8),
92
- "start_time": time.time(),
93
- "processed_count": 0,
94
- "detected_frames": [],
95
- "last_processed_time": 0,
96
- "stats_text": "Processing FPS: 0.00\nFrame Delay: 0.00 sec\nTensor Results: No detections",
97
- "highest_match": 0.0,
98
- "display_name": display_name
99
- })
100
- else:
101
- if st.session_state.num_streams > 0:
102
- url = custom_m3u8.strip() if custom_m3u8.strip() else default_m3u8_urls[0]
103
- display_name = custom_name.strip() if custom_name.strip() else default_names[0]
104
- st.session_state.streams[0]["current_m3u8_url"] = url
105
- st.session_state.streams[0]["display_name"] = display_name
106
 
107
- confidence = st.session_state.confidence
108
- target_fps = st.session_state.target_fps
109
 
110
- # --- Load Model ---
111
- model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/best.pt'
112
- @st.cache_resource
113
- def load_model():
114
- return YOLO(model_path)
115
 
116
- try:
117
- model = load_model()
118
- except Exception as ex:
119
- st.error(f"Model loading failed: {str(ex)}")
120
- st.stop()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
- # --- Create Placeholders for Streams in a 2-Column Grid ---
123
- num_streams = st.session_state.num_streams
124
- feed_placeholders = []
125
- stats_placeholders = []
126
- cols = st.columns(2)
127
- for i in range(num_streams):
128
- col_index = i % 2
129
- if i >= 2 and col_index == 0:
130
- cols = st.columns(2)
131
- feed_placeholders.append(cols[col_index].empty())
132
- stats_placeholders.append(cols[col_index].empty())
133
- if num_streams == 1:
134
- _ = st.columns(2)
135
 
136
- def update_stream(i):
137
- current_time = time.time()
138
- sleep_time = 1.0 / target_fps
139
- stream_state = st.session_state.streams[i]
140
- if current_time - stream_state["last_processed_time"] >= sleep_time:
141
- url = stream_state["current_m3u8_url"]
142
- cap = cv2.VideoCapture(url)
143
- if not cap.isOpened():
144
- stats_placeholders[i].text("Failed to open M3U8 stream.")
145
- return
146
- ret, frame = cap.read()
147
- cap.release()
148
- if not ret:
149
- stats_placeholders[i].text("Stream interrupted or ended.")
150
- return
151
- res = model.predict(frame, conf=confidence)
152
- processed_frame = res[0].plot()[:, :, ::-1]
153
- tensor_info = "No detections"
154
- max_conf = 0.0
155
- try:
156
- boxes = res[0].boxes
157
- if boxes is not None and len(boxes) > 0:
158
- max_conf = float(boxes.conf.max())
159
- tensor_info = f"Detections: {len(boxes)} | Max Confidence: {max_conf:.2f}"
160
- except Exception as ex:
161
- tensor_info = f"Error extracting detections: {ex}"
162
- if max_conf >= stream_state["highest_match"]:
163
- stream_state["highest_match"] = max_conf
164
- stream_state["detected_frames"].append(processed_frame)
165
- stream_state["processed_count"] += 1
166
- stream_state["last_processed_time"] = current_time
167
- mt_time = datetime.now(pytz.timezone('America/Denver')).strftime('%Y-%m-%d %H:%M:%S MT')
168
- stream_state["processed_frame"] = processed_frame
169
- stream_state["stats_text"] = (
170
- f"Processing FPS: {stream_state['processed_count'] / (current_time - stream_state['start_time']):.2f}\n"
171
- f"{tensor_info}\n"
172
- f"Highest Match: {stream_state['highest_match']:.2f}"
173
- )
174
- feed_placeholders[i].image(
175
- processed_frame,
176
- caption=f"Stream {i+1} - {stream_state['display_name']} - {mt_time}",
177
- use_container_width=True
178
- )
179
- stats_placeholders[i].text(stream_state["stats_text"])
180
 
181
- # --- Update Streams and Rerun ---
182
- for i in range(num_streams):
183
- update_stream(i)
184
- time.sleep(1.0 / target_fps)
185
- st.experimental_rerun()
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import tempfile
4
  import cv2
5
+ import requests
6
  from ultralytics import YOLO
7
+ import streamlit as st
 
 
 
8
 
9
+ # Set page configuration
10
  st.set_page_config(
11
+ page_title="People Tracking with YOLO11-pose",
12
+ page_icon="👥",
13
  layout="wide",
14
  initial_sidebar_state="expanded"
15
  )
16
+ st.title("People Tracking with YOLO11-pose")
17
 
18
+ # Sidebar: Input method and settings
19
+ st.sidebar.header("Input Settings")
20
+ uploaded_file = st.sidebar.file_uploader("Upload Image/Video", type=["jpg", "jpeg", "png", "bmp", "webp", "mp4"])
21
+ youtube_link = st.sidebar.text_input("YouTube Link (optional)", "")
22
+ image_url = st.sidebar.text_input("Image URL (optional)", "")
23
+ sensitivity = st.sidebar.slider("Sensitivity (Confidence Threshold)", 0.0, 1.0, 0.2, step=0.01)
24
+ process_button = st.sidebar.button("Process Input")
 
 
25
 
26
+ # Define the video extensions for later use
27
+ video_exts = [".mp4", ".mov", ".avi", ".webm"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ def process_input(uploaded_file, youtube_link, image_url, sensitivity):
30
+ input_path = None
31
+ temp_files = []
 
 
 
 
 
 
 
32
 
33
+ # Input priority: YouTube link > Image URL > Uploaded file.
34
+ if youtube_link and youtube_link.strip():
35
+ try:
36
+ from pytubefix import YouTube
37
+ yt = YouTube(youtube_link)
38
+ stream = yt.streams.filter(file_extension='mp4', progressive=True).order_by("resolution").desc().first()
39
+ if not stream:
40
+ return None, None, None, "No suitable mp4 stream found."
41
+ temp_path = os.path.join(tempfile.gettempdir(), f"yt_{os.urandom(8).hex()}.mp4")
42
+ stream.download(output_path=tempfile.gettempdir(), filename=os.path.basename(temp_path))
43
+ input_path = temp_path
44
+ temp_files.append(input_path)
45
+ except Exception as e:
46
+ return None, None, None, f"Error downloading YouTube video: {str(e)}"
47
+ elif image_url and image_url.strip():
48
+ try:
49
+ response = requests.get(image_url, stream=True, timeout=10)
50
+ response.raise_for_status()
51
+ temp_path = os.path.join(tempfile.gettempdir(), f"img_{os.urandom(8).hex()}.jpg")
52
+ with open(temp_path, "wb") as f:
53
+ f.write(response.content)
54
+ input_path = temp_path
55
+ temp_files.append(input_path)
56
+ except Exception as e:
57
+ return None, None, None, f"Error downloading image: {str(e)}"
58
+ elif uploaded_file is not None:
59
+ # Save the uploaded file to a temporary file
60
+ ext = os.path.splitext(uploaded_file.name)[1]
61
+ with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as tmp:
62
+ tmp.write(uploaded_file.read())
63
+ input_path = tmp.name
64
+ temp_files.append(input_path)
65
+ else:
66
+ return None, None, None, "Please provide an input."
 
 
 
 
 
 
 
67
 
68
+ ext = os.path.splitext(input_path)[1].lower()
69
+ output_path = None
70
 
71
+ # Load the YOLO model (ensure the model file is available in your repository)
72
+ model = YOLO("yolo11n-pose.pt")
 
 
 
73
 
74
+ try:
75
+ if ext in video_exts:
76
+ # Video processing
77
+ cap = cv2.VideoCapture(input_path)
78
+ if not cap.isOpened():
79
+ return None, None, None, f"Cannot open video file: {input_path}"
80
+
81
+ fps = cap.get(cv2.CAP_PROP_FPS)
82
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
83
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
84
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
85
+
86
+ if fps <= 0 or width <= 0 or height <= 0:
87
+ return None, None, None, "Invalid video properties detected."
88
+
89
+ output_path = os.path.join(tempfile.gettempdir(), f"out_{os.urandom(8).hex()}.mp4")
90
+ # Use 'mp4v' as codec
91
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
92
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
93
+
94
+ if not out.isOpened():
95
+ return None, None, None, "Video processing failed: No suitable encoder available."
96
+
97
+ processed_frames = 0
98
+ while True:
99
+ ret, frame = cap.read()
100
+ if not ret:
101
+ break
102
+
103
+ # Process frame: convert to RGB, run YOLO, then annotate and convert back to BGR.
104
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
105
+ results = model.predict(source=frame_rgb, conf=sensitivity)[0]
106
+ annotated_frame = results.plot()
107
+ annotated_frame_bgr = cv2.cvtColor(annotated_frame, cv2.COLOR_RGB2BGR)
108
+
109
+ out.write(annotated_frame_bgr)
110
+ processed_frames += 1
111
+
112
+ cap.release()
113
+ out.release()
114
+ temp_files.append(output_path)
115
+
116
+ if processed_frames == 0:
117
+ return None, None, None, "No frames processed from video."
118
+
119
+ if not os.path.exists(output_path) or os.path.getsize(output_path) < 1024:
120
+ return None, None, None, f"Output video created but too small ({os.path.getsize(output_path)} bytes) - processing failed."
121
+
122
+ return output_path, None, output_path, f"Video processed successfully! ({processed_frames}/{frame_count} frames)"
123
 
124
+ else:
125
+ # Image processing
126
+ results = model.predict(source=input_path, conf=sensitivity)[0]
127
+ annotated = results.plot()
128
+ output_path = os.path.join(tempfile.gettempdir(), f"out_{os.urandom(8).hex()}.jpg")
129
+ cv2.imwrite(output_path, annotated)
130
+ temp_files.append(output_path)
131
+ return output_path, output_path, None, "Image processed successfully!"
 
 
 
 
 
132
 
133
+ except Exception as e:
134
+ return None, None, None, f"Processing error: {str(e)}"
135
+
136
+ finally:
137
+ # Clean up temporary files except the final output
138
+ for f in temp_files[:-1]:
139
+ if f and os.path.exists(f):
140
+ try:
141
+ os.remove(f)
142
+ except:
143
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
+ # When the user clicks "Process Input"
146
+ if process_button:
147
+ out_file, out_img, out_vid, status = process_input(uploaded_file, youtube_link, image_url, sensitivity)
148
+ st.write(status)
149
+ if out_img:
150
+ st.image(out_img, caption="Annotated Output (Image)", use_column_width=True)
151
+ if out_vid:
152
+ st.video(out_vid)
153
+ if out_file:
154
+ with open(out_file, "rb") as f:
155
+ st.download_button(
156
+ label="Download Annotated Output",
157
+ data=f,
158
+ file_name=os.path.basename(out_file),
159
+ mime="video/mp4" if os.path.splitext(out_file)[1].lower() in video_exts else "image/jpeg"
160
+ )