Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
#Maximize CPU usage
|
4 |
import multiprocessing
|
5 |
import cv2
|
6 |
|
@@ -32,6 +30,9 @@ start_point = None
|
|
32 |
end_point = None
|
33 |
line_params = None # Stores (slope, intercept) of the line
|
34 |
|
|
|
|
|
|
|
35 |
def extract_first_frame(stream_url):
|
36 |
"""
|
37 |
Extracts the first available frame from the IP camera stream and returns it as a PIL image.
|
@@ -178,6 +179,32 @@ def draw_angled_line(image, line_params, color=(0, 255, 0), thickness=2):
|
|
178 |
_, _, start_point, end_point = line_params
|
179 |
cv2.line(image, start_point, end_point, color, thickness)
|
180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=None):
|
182 |
"""
|
183 |
Processes the IP camera stream to count objects of the selected classes crossing the line.
|
@@ -233,7 +260,7 @@ def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=No
|
|
233 |
crossed_objects.clear()
|
234 |
|
235 |
# Visualize the results with bounding boxes, masks, and IDs
|
236 |
-
annotated_frame =
|
237 |
|
238 |
# Draw the angled line on the frame
|
239 |
draw_angled_line(annotated_frame, line_params, color=(0, 255, 0), thickness=2)
|
@@ -264,11 +291,7 @@ with gr.Blocks() as demo:
|
|
264 |
gr.Markdown("<h1>Real-time monitoring, object tracking, and line-crossing detection for CCTV camera streams.</h1></center>")
|
265 |
gr.Markdown("## https://github.com/SanshruthR/CCTV_SENTRY_YOLO11")
|
266 |
|
267 |
-
|
268 |
-
|
269 |
# Step 1: Enter the IP Camera Stream URL
|
270 |
-
# gr.Markdown("### Step 0: Enter the IP Camera Stream URL")
|
271 |
-
# stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s103.ipcamlive.com/streams/67n4ojknye7lkxpmf/stream.m3u8", visible=False)
|
272 |
stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s104.ipcamlive.com/streams/68idokwtondsqpmkr/stream.m3u8", visible=False)
|
273 |
|
274 |
# Step 1: Extract the first frame from the stream
|
@@ -280,15 +303,9 @@ with gr.Blocks() as demo:
|
|
280 |
# Image component for displaying the first frame
|
281 |
image = gr.Image(value=first_frame, label="First Frame of Stream", type="pil")
|
282 |
|
283 |
-
|
284 |
line_info = gr.Textbox(label="Line Coordinates", value="Line Coordinates:\nStart: None, End: None")
|
285 |
image.select(update_line, inputs=image, outputs=[image, line_info])
|
286 |
|
287 |
-
# Reset the line (optional)
|
288 |
-
# gr.Markdown("### Step 4: Reset the Line (Optional)")
|
289 |
-
# reset_button = gr.Button("Reset Line")
|
290 |
-
# reset_button.click(reset_line, inputs=None, outputs=[image, line_info])
|
291 |
-
|
292 |
# Step 2: Select classes to detect
|
293 |
gr.Markdown("### Step 2: Select Classes to Detect")
|
294 |
model = YOLO(model="yolo11n.pt") # Load the model to get class names
|
@@ -299,7 +316,7 @@ with gr.Blocks() as demo:
|
|
299 |
gr.Markdown("### Step 3: Adjust Confidence Threshold (Optional)")
|
300 |
confidence_threshold = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Confidence Threshold")
|
301 |
|
302 |
-
#
|
303 |
process_button = gr.Button("Process Stream")
|
304 |
|
305 |
# Output image for real-time frame rendering
|
|
|
1 |
+
# Maximize CPU usage
|
|
|
|
|
2 |
import multiprocessing
|
3 |
import cv2
|
4 |
|
|
|
30 |
end_point = None
|
31 |
line_params = None # Stores (slope, intercept) of the line
|
32 |
|
33 |
+
# Low-resolution for inference
|
34 |
+
LOW_RES = (320, 180)
|
35 |
+
|
36 |
def extract_first_frame(stream_url):
|
37 |
"""
|
38 |
Extracts the first available frame from the IP camera stream and returns it as a PIL image.
|
|
|
179 |
_, _, start_point, end_point = line_params
|
180 |
cv2.line(image, start_point, end_point, color, thickness)
|
181 |
|
182 |
+
def detect_and_draw(frame):
|
183 |
+
"""
|
184 |
+
Processes the frame in low resolution and scales the results back to high resolution.
|
185 |
+
"""
|
186 |
+
# Create low-res copy
|
187 |
+
low_res_frame = cv2.resize(frame, LOW_RES)
|
188 |
+
|
189 |
+
# Perform detection on the low-res frame
|
190 |
+
results = model(low_res_frame, verbose=False)
|
191 |
+
|
192 |
+
# Calculate scaling factors for bounding boxes
|
193 |
+
scale_x = frame.shape[1] / LOW_RES[0]
|
194 |
+
scale_y = frame.shape[0] / LOW_RES[1]
|
195 |
+
|
196 |
+
# Draw bounding boxes on the high-res frame
|
197 |
+
for detection in results[0].boxes.data:
|
198 |
+
x1, y1, x2, y2, conf, cls = detection
|
199 |
+
# Scale bounding box coordinates to high-res
|
200 |
+
x1, y1, x2, y2 = int(x1 * scale_x), int(y1 * scale_y), int(x2 * scale_x), int(y2 * scale_y)
|
201 |
+
label = f"{results[0].names[int(cls)]} {conf:.2f}"
|
202 |
+
# Draw the bounding box and label on the high-res frame
|
203 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
204 |
+
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
|
205 |
+
|
206 |
+
return frame
|
207 |
+
|
208 |
def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=None):
|
209 |
"""
|
210 |
Processes the IP camera stream to count objects of the selected classes crossing the line.
|
|
|
260 |
crossed_objects.clear()
|
261 |
|
262 |
# Visualize the results with bounding boxes, masks, and IDs
|
263 |
+
annotated_frame = detect_and_draw(frame)
|
264 |
|
265 |
# Draw the angled line on the frame
|
266 |
draw_angled_line(annotated_frame, line_params, color=(0, 255, 0), thickness=2)
|
|
|
291 |
gr.Markdown("<h1>Real-time monitoring, object tracking, and line-crossing detection for CCTV camera streams.</h1></center>")
|
292 |
gr.Markdown("## https://github.com/SanshruthR/CCTV_SENTRY_YOLO11")
|
293 |
|
|
|
|
|
294 |
# Step 1: Enter the IP Camera Stream URL
|
|
|
|
|
295 |
stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s104.ipcamlive.com/streams/68idokwtondsqpmkr/stream.m3u8", visible=False)
|
296 |
|
297 |
# Step 1: Extract the first frame from the stream
|
|
|
303 |
# Image component for displaying the first frame
|
304 |
image = gr.Image(value=first_frame, label="First Frame of Stream", type="pil")
|
305 |
|
|
|
306 |
line_info = gr.Textbox(label="Line Coordinates", value="Line Coordinates:\nStart: None, End: None")
|
307 |
image.select(update_line, inputs=image, outputs=[image, line_info])
|
308 |
|
|
|
|
|
|
|
|
|
|
|
309 |
# Step 2: Select classes to detect
|
310 |
gr.Markdown("### Step 2: Select Classes to Detect")
|
311 |
model = YOLO(model="yolo11n.pt") # Load the model to get class names
|
|
|
316 |
gr.Markdown("### Step 3: Adjust Confidence Threshold (Optional)")
|
317 |
confidence_threshold = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Confidence Threshold")
|
318 |
|
319 |
+
# Process the stream
|
320 |
process_button = gr.Button("Process Stream")
|
321 |
|
322 |
# Output image for real-time frame rendering
|