Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import cv2
|
2 |
import gradio as gr
|
3 |
import numpy as np
|
4 |
from PIL import Image, ImageDraw
|
5 |
from ultralytics import YOLO
|
|
|
6 |
import logging
|
7 |
import math
|
8 |
|
@@ -97,6 +114,37 @@ def reset_line():
|
|
97 |
line_params = None
|
98 |
return None, "Line reset. Click to draw a new line."
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
def is_object_crossing_line(box, line_params):
|
101 |
"""
|
102 |
Determines if an object's bounding box is fully intersected by the user-drawn line.
|
@@ -154,8 +202,9 @@ def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=No
|
|
154 |
errors.append("Error: Could not open stream.")
|
155 |
return None, "\n".join(errors)
|
156 |
|
157 |
-
model = YOLO(model="
|
158 |
-
crossed_objects =
|
|
|
159 |
|
160 |
logger.info("Starting to process the stream...")
|
161 |
while cap.isOpened():
|
@@ -164,29 +213,32 @@ def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=No
|
|
164 |
errors.append("Error: Could not read frame from the stream.")
|
165 |
break
|
166 |
|
167 |
-
# Perform object
|
168 |
-
results = model.
|
169 |
|
170 |
-
|
171 |
-
|
172 |
-
clss =
|
173 |
-
|
|
|
174 |
|
175 |
-
for box, cls, conf in zip(boxes, clss, confs):
|
176 |
-
if conf >= confidence_threshold and model.names[
|
177 |
# Check if the object crosses the line
|
178 |
-
if is_object_crossing_line(box, line_params):
|
179 |
-
|
180 |
-
center = ((box[0] + box[2]) / 2, (box[1] + box[3]) / 2)
|
181 |
-
crossed_objects.add(tuple(center)) # Add the center to the set
|
182 |
|
183 |
-
|
|
|
|
|
|
|
|
|
184 |
annotated_frame = results[0].plot()
|
185 |
|
186 |
# Draw the angled line on the frame
|
187 |
draw_angled_line(annotated_frame, line_params, color=(0, 255, 0), thickness=2)
|
188 |
|
189 |
-
# Display the count on the frame
|
190 |
count = len(crossed_objects)
|
191 |
(text_width, text_height), _ = cv2.getTextSize(f"COUNT: {count}", cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
|
192 |
|
@@ -211,8 +263,12 @@ def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=No
|
|
211 |
with gr.Blocks() as demo:
|
212 |
gr.Markdown("<h1>Real-time monitoring, object tracking, and line-crossing detection for CCTV camera streams.</h1></center>")
|
213 |
gr.Markdown("## https://github.com/SanshruthR/CCTV_SENTRY_YOLO11")
|
214 |
-
|
|
|
|
|
215 |
# Step 1: Enter the IP Camera Stream URL
|
|
|
|
|
216 |
stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s104.ipcamlive.com/streams/68idokwtondsqpmkr/stream.m3u8", visible=False)
|
217 |
|
218 |
# Step 1: Extract the first frame from the stream
|
@@ -224,12 +280,18 @@ with gr.Blocks() as demo:
|
|
224 |
# Image component for displaying the first frame
|
225 |
image = gr.Image(value=first_frame, label="First Frame of Stream", type="pil")
|
226 |
|
|
|
227 |
line_info = gr.Textbox(label="Line Coordinates", value="Line Coordinates:\nStart: None, End: None")
|
228 |
image.select(update_line, inputs=image, outputs=[image, line_info])
|
229 |
|
|
|
|
|
|
|
|
|
|
|
230 |
# Step 2: Select classes to detect
|
231 |
gr.Markdown("### Step 2: Select Classes to Detect")
|
232 |
-
model = YOLO(model="
|
233 |
class_names = list(model.names.values()) # Get class names
|
234 |
selected_classes = gr.CheckboxGroup(choices=class_names, label="Select Classes to Detect")
|
235 |
|
@@ -237,7 +299,7 @@ with gr.Blocks() as demo:
|
|
237 |
gr.Markdown("### Step 3: Adjust Confidence Threshold (Optional)")
|
238 |
confidence_threshold = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Confidence Threshold")
|
239 |
|
240 |
-
#
|
241 |
process_button = gr.Button("Process Stream")
|
242 |
|
243 |
# Output image for real-time frame rendering
|
|
|
1 |
+
##############
|
2 |
+
|
3 |
+
#Maximize CPU usage
|
4 |
+
import multiprocessing
|
5 |
+
import cv2
|
6 |
+
|
7 |
+
# Get the number of CPU cores
|
8 |
+
cpu_cores = multiprocessing.cpu_count()
|
9 |
+
|
10 |
+
# Set OpenCV to use all available cores
|
11 |
+
cv2.setNumThreads(cpu_cores)
|
12 |
+
|
13 |
+
# Print the number of threads being used (optional)
|
14 |
+
print(f"OpenCV using {cv2.getNumThreads()} threads out of {cpu_cores} available cores")
|
15 |
+
|
16 |
+
##############
|
17 |
import cv2
|
18 |
import gradio as gr
|
19 |
import numpy as np
|
20 |
from PIL import Image, ImageDraw
|
21 |
from ultralytics import YOLO
|
22 |
+
from ultralytics.utils.plotting import Annotator, colors
|
23 |
import logging
|
24 |
import math
|
25 |
|
|
|
114 |
line_params = None
|
115 |
return None, "Line reset. Click to draw a new line."
|
116 |
|
117 |
+
def intersect(A, B, C, D):
|
118 |
+
"""
|
119 |
+
Determines if two line segments AB and CD intersect.
|
120 |
+
"""
|
121 |
+
def ccw(A, B, C):
|
122 |
+
return (C[1] - A[1]) * (B[0] - A[0]) - (B[1] - A[1]) * (C[0] - A[0])
|
123 |
+
|
124 |
+
def on_segment(A, B, C):
|
125 |
+
if min(A[0], B[0]) <= C[0] <= max(A[0], B[0]) and min(A[1], B[1]) <= C[1] <= max(A[1], B[1]):
|
126 |
+
return True
|
127 |
+
return False
|
128 |
+
|
129 |
+
# Check if the line segments intersect
|
130 |
+
ccw1 = ccw(A, B, C)
|
131 |
+
ccw2 = ccw(A, B, D)
|
132 |
+
ccw3 = ccw(C, D, A)
|
133 |
+
ccw4 = ccw(C, D, B)
|
134 |
+
|
135 |
+
if ((ccw1 * ccw2 < 0) and (ccw3 * ccw4 < 0)):
|
136 |
+
return True
|
137 |
+
elif ccw1 == 0 and on_segment(A, B, C):
|
138 |
+
return True
|
139 |
+
elif ccw2 == 0 and on_segment(A, B, D):
|
140 |
+
return True
|
141 |
+
elif ccw3 == 0 and on_segment(C, D, A):
|
142 |
+
return True
|
143 |
+
elif ccw4 == 0 and on_segment(C, D, B):
|
144 |
+
return True
|
145 |
+
else:
|
146 |
+
return False
|
147 |
+
|
148 |
def is_object_crossing_line(box, line_params):
|
149 |
"""
|
150 |
Determines if an object's bounding box is fully intersected by the user-drawn line.
|
|
|
202 |
errors.append("Error: Could not open stream.")
|
203 |
return None, "\n".join(errors)
|
204 |
|
205 |
+
model = YOLO(model="yolo11n.pt")
|
206 |
+
crossed_objects = {}
|
207 |
+
max_tracked_objects = 1000 # Maximum number of objects to track before clearing
|
208 |
|
209 |
logger.info("Starting to process the stream...")
|
210 |
while cap.isOpened():
|
|
|
213 |
errors.append("Error: Could not read frame from the stream.")
|
214 |
break
|
215 |
|
216 |
+
# Perform object tracking with confidence threshold
|
217 |
+
results = model.track(frame, persist=True, conf=confidence_threshold)
|
218 |
|
219 |
+
if results[0].boxes.id is not None:
|
220 |
+
track_ids = results[0].boxes.id.int().cpu().tolist()
|
221 |
+
clss = results[0].boxes.cls.cpu().tolist()
|
222 |
+
boxes = results[0].boxes.xyxy.cpu()
|
223 |
+
confs = results[0].boxes.conf.cpu().tolist()
|
224 |
|
225 |
+
for box, cls, t_id, conf in zip(boxes, clss, track_ids, confs):
|
226 |
+
if conf >= confidence_threshold and model.names[cls] in selected_classes:
|
227 |
# Check if the object crosses the line
|
228 |
+
if is_object_crossing_line(box, line_params) and t_id not in crossed_objects:
|
229 |
+
crossed_objects[t_id] = True
|
|
|
|
|
230 |
|
231 |
+
# Clear the dictionary if it gets too large
|
232 |
+
if len(crossed_objects) > max_tracked_objects:
|
233 |
+
crossed_objects.clear()
|
234 |
+
|
235 |
+
# Visualize the results with bounding boxes, masks, and IDs
|
236 |
annotated_frame = results[0].plot()
|
237 |
|
238 |
# Draw the angled line on the frame
|
239 |
draw_angled_line(annotated_frame, line_params, color=(0, 255, 0), thickness=2)
|
240 |
|
241 |
+
# Display the count on the frame with a modern look
|
242 |
count = len(crossed_objects)
|
243 |
(text_width, text_height), _ = cv2.getTextSize(f"COUNT: {count}", cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
|
244 |
|
|
|
263 |
with gr.Blocks() as demo:
|
264 |
gr.Markdown("<h1>Real-time monitoring, object tracking, and line-crossing detection for CCTV camera streams.</h1></center>")
|
265 |
gr.Markdown("## https://github.com/SanshruthR/CCTV_SENTRY_YOLO11")
|
266 |
+
|
267 |
+
|
268 |
+
|
269 |
# Step 1: Enter the IP Camera Stream URL
|
270 |
+
# gr.Markdown("### Step 0: Enter the IP Camera Stream URL")
|
271 |
+
# stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s103.ipcamlive.com/streams/67n4ojknye7lkxpmf/stream.m3u8", visible=False)
|
272 |
stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s104.ipcamlive.com/streams/68idokwtondsqpmkr/stream.m3u8", visible=False)
|
273 |
|
274 |
# Step 1: Extract the first frame from the stream
|
|
|
280 |
# Image component for displaying the first frame
|
281 |
image = gr.Image(value=first_frame, label="First Frame of Stream", type="pil")
|
282 |
|
283 |
+
|
284 |
line_info = gr.Textbox(label="Line Coordinates", value="Line Coordinates:\nStart: None, End: None")
|
285 |
image.select(update_line, inputs=image, outputs=[image, line_info])
|
286 |
|
287 |
+
# Reset the line (optional)
|
288 |
+
# gr.Markdown("### Step 4: Reset the Line (Optional)")
|
289 |
+
# reset_button = gr.Button("Reset Line")
|
290 |
+
# reset_button.click(reset_line, inputs=None, outputs=[image, line_info])
|
291 |
+
|
292 |
# Step 2: Select classes to detect
|
293 |
gr.Markdown("### Step 2: Select Classes to Detect")
|
294 |
+
model = YOLO(model="yolo11n.pt") # Load the model to get class names
|
295 |
class_names = list(model.names.values()) # Get class names
|
296 |
selected_classes = gr.CheckboxGroup(choices=class_names, label="Select Classes to Detect")
|
297 |
|
|
|
299 |
gr.Markdown("### Step 3: Adjust Confidence Threshold (Optional)")
|
300 |
confidence_threshold = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Confidence Threshold")
|
301 |
|
302 |
+
#process the stream
|
303 |
process_button = gr.Button("Process Stream")
|
304 |
|
305 |
# Output image for real-time frame rendering
|