Sanshruth commited on
Commit
6e400f8
·
verified ·
1 Parent(s): 339ebae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -190
app.py CHANGED
@@ -1,38 +1,31 @@
1
- # Maximize CPU usage
2
- import multiprocessing
3
- import cv2
4
-
5
- # Get the number of CPU cores
6
- cpu_cores = multiprocessing.cpu_count()
7
-
8
- # Set OpenCV to use all available cores
9
- cv2.setNumThreads(cpu_cores)
10
-
11
- # Print the number of threads being used (optional)
12
- print(f"OpenCV using {cv2.getNumThreads()} threads out of {cpu_cores} available cores")
13
-
14
- ##############
15
  import cv2
16
  import gradio as gr
17
  import numpy as np
18
  from PIL import Image, ImageDraw
19
  from ultralytics import YOLO
20
- from ultralytics.utils.plotting import Annotator, colors
21
  import logging
22
- import math
 
 
23
 
24
  # Set up logging
25
  logging.basicConfig(level=logging.INFO)
26
  logger = logging.getLogger(__name__)
27
 
28
- # Global variables to store line coordinates and line equation
29
  start_point = None
30
  end_point = None
31
- line_params = None # Stores (slope, intercept) of the line
32
 
33
  # Low-resolution for inference
34
  LOW_RES = (320, 180)
35
 
 
 
 
 
 
 
36
  def extract_first_frame(stream_url):
37
  """
38
  Extracts the first available frame from the IP camera stream and returns it as a PIL image.
@@ -63,46 +56,27 @@ def update_line(image, evt: gr.SelectData):
63
  """
64
  global start_point, end_point, line_params
65
 
66
- # If it's the first click, set the start point and show it on the image
67
  if start_point is None:
68
  start_point = (evt.index[0], evt.index[1])
69
-
70
- # Draw the start point on the image
71
  draw = ImageDraw.Draw(image)
72
- draw.ellipse(
73
- (start_point[0] - 5, start_point[1] - 5, start_point[0] + 5, start_point[1] + 5),
74
- fill="blue", outline="blue"
75
- )
76
-
77
  return image, f"Line Coordinates:\nStart: {start_point}, End: None"
78
 
79
- # If it's the second click, set the end point and draw the line
80
  end_point = (evt.index[0], evt.index[1])
81
-
82
- # Calculate the slope (m) and intercept (b) of the line: y = mx + b
83
  if start_point[0] != end_point[0]: # Avoid division by zero
84
  slope = (end_point[1] - start_point[1]) / (end_point[0] - start_point[0])
85
  intercept = start_point[1] - slope * start_point[0]
86
- line_params = (slope, intercept, start_point, end_point) # Store slope, intercept, and points
87
  else:
88
- # Vertical line (special case)
89
  line_params = (float('inf'), start_point[0], start_point, end_point)
90
 
91
- # Draw the line and end point on the image
92
  draw = ImageDraw.Draw(image)
93
  draw.line([start_point, end_point], fill="red", width=2)
94
- draw.ellipse(
95
- (end_point[0] - 5, end_point[1] - 5, end_point[0] + 5, end_point[1] + 5),
96
- fill="green", outline="green"
97
- )
98
 
99
- # Return the updated image and line info
100
  line_info = f"Line Coordinates:\nStart: {start_point}, End: {end_point}\nLine Equation: y = {line_params[0]:.2f}x + {line_params[1]:.2f}"
101
-
102
- # Reset the points for the next interaction
103
  start_point = None
104
  end_point = None
105
-
106
  return image, line_info
107
 
108
  def reset_line():
@@ -115,182 +89,96 @@ def reset_line():
115
  line_params = None
116
  return None, "Line reset. Click to draw a new line."
117
 
118
- def intersect(A, B, C, D):
119
- """
120
- Determines if two line segments AB and CD intersect.
121
- """
122
- def ccw(A, B, C):
123
- return (C[1] - A[1]) * (B[0] - A[0]) - (B[1] - A[1]) * (C[0] - A[0])
124
-
125
- def on_segment(A, B, C):
126
- if min(A[0], B[0]) <= C[0] <= max(A[0], B[0]) and min(A[1], B[1]) <= C[1] <= max(A[1], B[1]):
127
- return True
128
- return False
129
-
130
- # Check if the line segments intersect
131
- ccw1 = ccw(A, B, C)
132
- ccw2 = ccw(A, B, D)
133
- ccw3 = ccw(C, D, A)
134
- ccw4 = ccw(C, D, B)
135
-
136
- if ((ccw1 * ccw2 < 0) and (ccw3 * ccw4 < 0)):
137
- return True
138
- elif ccw1 == 0 and on_segment(A, B, C):
139
- return True
140
- elif ccw2 == 0 and on_segment(A, B, D):
141
- return True
142
- elif ccw3 == 0 and on_segment(C, D, A):
143
- return True
144
- elif ccw4 == 0 and on_segment(C, D, B):
145
- return True
146
- else:
147
- return False
148
-
149
  def is_object_crossing_line(box, line_params):
150
  """
151
  Determines if an object's bounding box is fully intersected by the user-drawn line.
152
  """
153
  _, _, line_start, line_end = line_params
154
-
155
- # Get the bounding box coordinates
156
  x1, y1, x2, y2 = box
157
-
158
- # Define the four edges of the bounding box
159
- box_edges = [
160
- ((x1, y1), (x2, y1)), # Top edge
161
- ((x2, y1), (x2, y2)), # Right edge
162
- ((x2, y2), (x1, y2)), # Bottom edge
163
- ((x1, y2), (x1, y1)) # Left edge
164
- ]
165
-
166
- # Count the number of intersections between the line and the bounding box edges
167
  intersection_count = 0
168
  for edge_start, edge_end in box_edges:
169
  if intersect(line_start, line_end, edge_start, edge_end):
170
  intersection_count += 1
171
-
172
- # Only count the object if the line intersects the bounding box at least twice
173
  return intersection_count >= 2
174
 
175
- def draw_angled_line(image, line_params, color=(0, 255, 0), thickness=2):
176
- """
177
- Draws the user-defined line on the frame.
178
- """
179
- _, _, start_point, end_point = line_params
180
- cv2.line(image, start_point, end_point, color, thickness)
181
-
182
- def detect_and_draw(frame):
183
- """
184
- Processes the frame in low resolution and scales the results back to high resolution.
185
- """
186
- # Create low-res copy
187
- low_res_frame = cv2.resize(frame, LOW_RES)
188
-
189
- # Perform detection on the low-res frame
190
- results = model(low_res_frame, verbose=False)
191
-
192
- # Calculate scaling factors for bounding boxes
193
- scale_x = frame.shape[1] / LOW_RES[0]
194
- scale_y = frame.shape[0] / LOW_RES[1]
195
-
196
- # Draw bounding boxes on the high-res frame
197
- for detection in results[0].boxes.data:
198
- x1, y1, x2, y2, conf, cls = detection
199
- # Scale bounding box coordinates to high-res
200
- x1, y1, x2, y2 = int(x1 * scale_x), int(y1 * scale_y), int(x2 * scale_x), int(y2 * scale_y)
201
- label = f"{results[0].names[int(cls)]} {conf:.2f}"
202
- # Draw the bounding box and label on the high-res frame
203
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
204
- cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
205
-
206
- return frame
207
-
208
- def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=None):
209
  """
210
- Processes the IP camera stream to count objects of the selected classes crossing the line.
211
  """
212
- global line_params
213
-
214
- errors = []
215
 
216
- if line_params is None:
217
- errors.append("Error: No line drawn. Please draw a line on the first frame.")
218
- if selected_classes is None or len(selected_classes) == 0:
219
- errors.append("Error: No classes selected. Please select at least one class to detect.")
220
- if stream_url is None or stream_url.strip() == "":
221
- errors.append("Error: No stream URL provided.")
222
 
223
- if errors:
224
- return None, "\n".join(errors)
 
 
 
225
 
226
- logger.info("Connecting to the IP camera stream...")
 
 
 
 
227
  cap = cv2.VideoCapture(stream_url)
228
- if not cap.isOpened():
229
- errors.append("Error: Could not open stream.")
230
- return None, "\n".join(errors)
231
-
232
  model = YOLO(model="yolo11n.pt")
233
  crossed_objects = {}
234
- max_tracked_objects = 1000 # Maximum number of objects to track before clearing
235
 
236
- logger.info("Starting to process the stream...")
237
- while cap.isOpened():
238
  ret, frame = cap.read()
239
  if not ret:
240
- errors.append("Error: Could not read frame from the stream.")
241
  break
242
 
243
- # Perform object tracking with confidence threshold
244
- results = model.track(frame, persist=True, conf=confidence_threshold)
245
-
246
- if results[0].boxes.id is not None:
247
- track_ids = results[0].boxes.id.int().cpu().tolist()
248
- clss = results[0].boxes.cls.cpu().tolist()
249
- boxes = results[0].boxes.xyxy.cpu()
250
- confs = results[0].boxes.conf.cpu().tolist()
251
-
252
- for box, cls, t_id, conf in zip(boxes, clss, track_ids, confs):
253
- if conf >= confidence_threshold and model.names[cls] in selected_classes:
254
- # Check if the object crosses the line
255
- if is_object_crossing_line(box, line_params) and t_id not in crossed_objects:
256
- crossed_objects[t_id] = True
257
 
258
- # Clear the dictionary if it gets too large
259
- if len(crossed_objects) > max_tracked_objects:
260
- crossed_objects.clear()
 
 
 
 
 
261
 
262
- # Visualize the results with bounding boxes, masks, and IDs
263
- annotated_frame = detect_and_draw(frame)
 
 
264
 
265
- # Draw the angled line on the frame
266
- draw_angled_line(annotated_frame, line_params, color=(0, 255, 0), thickness=2)
 
267
 
268
- # Display the count on the frame with a modern look
269
- count = len(crossed_objects)
270
- (text_width, text_height), _ = cv2.getTextSize(f"COUNT: {count}", cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
271
-
272
- # Calculate the position for the middle of the top
273
- margin = 10 # Margin from the top
274
- x = (annotated_frame.shape[1] - text_width) // 2 # Center-align the text horizontally
275
- y = text_height + margin # Top-align the text
276
-
277
- # Draw the black background rectangle
278
- cv2.rectangle(annotated_frame, (x - margin, y - text_height - margin), (x + text_width + margin, y + margin), (0, 0, 0), -1)
279
-
280
- # Draw the text
281
- cv2.putText(annotated_frame, f"COUNT: {count}", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
282
 
283
- # Yield the annotated frame to Gradio
284
- yield annotated_frame, ""
 
 
 
 
285
 
286
- cap.release()
287
- logger.info("Stream processing completed.")
 
 
 
 
 
 
 
 
288
 
289
  # Define the Gradio interface
290
  with gr.Blocks() as demo:
291
  gr.Markdown("<h1>Real-time monitoring, object tracking, and line-crossing detection for CCTV camera streams.</h1></center>")
292
  gr.Markdown("## https://github.com/SanshruthR/CCTV_SENTRY_YOLO11")
293
-
294
  # Step 1: Enter the IP Camera Stream URL
295
  stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s104.ipcamlive.com/streams/68idokwtondsqpmkr/stream.m3u8", visible=False)
296
 
@@ -300,33 +188,33 @@ with gr.Blocks() as demo:
300
  if first_frame is None:
301
  gr.Markdown(f"**Error:** {status}")
302
  else:
303
- # Image component for displaying the first frame
304
  image = gr.Image(value=first_frame, label="First Frame of Stream", type="pil")
305
-
306
  line_info = gr.Textbox(label="Line Coordinates", value="Line Coordinates:\nStart: None, End: None")
307
  image.select(update_line, inputs=image, outputs=[image, line_info])
308
 
309
  # Step 2: Select classes to detect
310
  gr.Markdown("### Step 2: Select Classes to Detect")
311
- model = YOLO(model="yolo11n.pt") # Load the model to get class names
312
- class_names = list(model.names.values()) # Get class names
313
  selected_classes = gr.CheckboxGroup(choices=class_names, label="Select Classes to Detect")
314
 
315
- # Step 3: Adjust confidence threshold
316
  gr.Markdown("### Step 3: Adjust Confidence Threshold (Optional)")
317
  confidence_threshold = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Confidence Threshold")
318
 
319
  # Process the stream
320
  process_button = gr.Button("Process Stream")
321
-
322
- # Output image for real-time frame rendering
323
  output_image = gr.Image(label="Processed Frame", streaming=True)
324
-
325
- # Error box to display warnings/errors
326
  error_box = gr.Textbox(label="Errors/Warnings", interactive=False)
327
 
328
  # Event listener for processing the video
329
- process_button.click(process_video, inputs=[confidence_threshold, selected_classes, stream_url], outputs=[output_image, error_box])
 
 
 
 
 
 
330
 
331
  # Launch the interface
332
  demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import cv2
2
  import gradio as gr
3
  import numpy as np
4
  from PIL import Image, ImageDraw
5
  from ultralytics import YOLO
 
6
  import logging
7
+ import threading
8
+ import queue
9
+ import time
10
 
11
  # Set up logging
12
  logging.basicConfig(level=logging.INFO)
13
  logger = logging.getLogger(__name__)
14
 
15
+ # Global variables for line coordinates and line equation
16
  start_point = None
17
  end_point = None
18
+ line_params = None # Stores (slope, intercept, start_point, end_point)
19
 
20
  # Low-resolution for inference
21
  LOW_RES = (320, 180)
22
 
23
+ # Frame queue for processed frames
24
+ frame_queue = queue.Queue(maxsize=30) # Adjust queue size based on memory constraints
25
+
26
+ # Thread control flag
27
+ processing_active = True
28
+
29
  def extract_first_frame(stream_url):
30
  """
31
  Extracts the first available frame from the IP camera stream and returns it as a PIL image.
 
56
  """
57
  global start_point, end_point, line_params
58
 
 
59
  if start_point is None:
60
  start_point = (evt.index[0], evt.index[1])
 
 
61
  draw = ImageDraw.Draw(image)
62
+ draw.ellipse((start_point[0] - 5, start_point[1] - 5, start_point[0] + 5, start_point[1] + 5), fill="blue", outline="blue")
 
 
 
 
63
  return image, f"Line Coordinates:\nStart: {start_point}, End: None"
64
 
 
65
  end_point = (evt.index[0], evt.index[1])
 
 
66
  if start_point[0] != end_point[0]: # Avoid division by zero
67
  slope = (end_point[1] - start_point[1]) / (end_point[0] - start_point[0])
68
  intercept = start_point[1] - slope * start_point[0]
69
+ line_params = (slope, intercept, start_point, end_point)
70
  else:
 
71
  line_params = (float('inf'), start_point[0], start_point, end_point)
72
 
 
73
  draw = ImageDraw.Draw(image)
74
  draw.line([start_point, end_point], fill="red", width=2)
75
+ draw.ellipse((end_point[0] - 5, end_point[1] - 5, end_point[0] + 5, end_point[1] + 5), fill="green", outline="green")
 
 
 
76
 
 
77
  line_info = f"Line Coordinates:\nStart: {start_point}, End: {end_point}\nLine Equation: y = {line_params[0]:.2f}x + {line_params[1]:.2f}"
 
 
78
  start_point = None
79
  end_point = None
 
80
  return image, line_info
81
 
82
  def reset_line():
 
89
  line_params = None
90
  return None, "Line reset. Click to draw a new line."
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  def is_object_crossing_line(box, line_params):
93
  """
94
  Determines if an object's bounding box is fully intersected by the user-drawn line.
95
  """
96
  _, _, line_start, line_end = line_params
 
 
97
  x1, y1, x2, y2 = box
98
+ box_edges = [((x1, y1), (x2, y1)), ((x2, y1), (x2, y2)), ((x2, y2), (x1, y2)), ((x1, y2), (x1, y1))]
 
 
 
 
 
 
 
 
 
99
  intersection_count = 0
100
  for edge_start, edge_end in box_edges:
101
  if intersect(line_start, line_end, edge_start, edge_end):
102
  intersection_count += 1
 
 
103
  return intersection_count >= 2
104
 
105
+ def intersect(A, B, C, D):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  """
107
+ Determines if two line segments AB and CD intersect.
108
  """
109
+ def ccw(A, B, C):
110
+ return (C[1] - A[1]) * (B[0] - A[0]) - (B[1] - A[1]) * (C[0] - A[0])
 
111
 
112
+ def on_segment(A, B, C):
113
+ return min(A[0], B[0]) <= C[0] <= max(A[0], B[0]) and min(A[1], B[1]) <= C[1] <= max(A[1], B[1])
 
 
 
 
114
 
115
+ ccw1 = ccw(A, B, C)
116
+ ccw2 = ccw(A, B, D)
117
+ ccw3 = ccw(C, D, A)
118
+ ccw4 = ccw(C, D, B)
119
+ return ((ccw1 * ccw2 < 0) and (ccw3 * ccw4 < 0)) or (ccw1 == 0 and on_segment(A, B, C)) or (ccw2 == 0 and on_segment(A, B, D)) or (ccw3 == 0 and on_segment(C, D, A)) or (ccw4 == 0 and on_segment(C, D, B))
120
 
121
+ def process_frames(stream_url, confidence_threshold, selected_classes):
122
+ """
123
+ Processes frames in a separate thread and adds them to the frame queue.
124
+ """
125
+ global processing_active, frame_queue
126
  cap = cv2.VideoCapture(stream_url)
 
 
 
 
127
  model = YOLO(model="yolo11n.pt")
128
  crossed_objects = {}
 
129
 
130
+ while processing_active and cap.isOpened():
 
131
  ret, frame = cap.read()
132
  if not ret:
 
133
  break
134
 
135
+ # Perform detection on low-res frame
136
+ low_res_frame = cv2.resize(frame, LOW_RES)
137
+ results = model.track(low_res_frame, persist=True, conf=confidence_threshold)
 
 
 
 
 
 
 
 
 
 
 
138
 
139
+ # Scale bounding boxes to high-res
140
+ scale_x = frame.shape[1] / LOW_RES[0]
141
+ scale_y = frame.shape[0] / LOW_RES[1]
142
+ for detection in results[0].boxes.data:
143
+ x1, y1, x2, y2, conf, cls = detection
144
+ x1, y1, x2, y2 = int(x1 * scale_x), int(y1 * scale_y), int(x2 * scale_x), int(y2 * scale_y)
145
+ if is_object_crossing_line((x1, y1, x2, y2), line_params):
146
+ crossed_objects[results[0].boxes.id.int().cpu().tolist()[0]] = True
147
 
148
+ # Draw bounding boxes and line on the frame
149
+ annotated_frame = results[0].plot()
150
+ if line_params:
151
+ draw_angled_line(annotated_frame, line_params, color=(0, 255, 0), thickness=2)
152
 
153
+ # Add frame to the queue
154
+ if not frame_queue.full():
155
+ frame_queue.put(annotated_frame)
156
 
157
+ cap.release()
 
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
+ def draw_angled_line(image, line_params, color=(0, 255, 0), thickness=2):
160
+ """
161
+ Draws the user-defined line on the frame.
162
+ """
163
+ _, _, start_point, end_point = line_params
164
+ cv2.line(image, start_point, end_point, color, thickness)
165
 
166
+ def display_frames():
167
+ """
168
+ Displays frames from the queue at a consistent frame rate.
169
+ """
170
+ while processing_active:
171
+ if not frame_queue.empty():
172
+ frame = frame_queue.get()
173
+ yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), ""
174
+ else:
175
+ time.sleep(0.03) # Wait for the next frame
176
 
177
  # Define the Gradio interface
178
  with gr.Blocks() as demo:
179
  gr.Markdown("<h1>Real-time monitoring, object tracking, and line-crossing detection for CCTV camera streams.</h1></center>")
180
  gr.Markdown("## https://github.com/SanshruthR/CCTV_SENTRY_YOLO11")
181
+
182
  # Step 1: Enter the IP Camera Stream URL
183
  stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s104.ipcamlive.com/streams/68idokwtondsqpmkr/stream.m3u8", visible=False)
184
 
 
188
  if first_frame is None:
189
  gr.Markdown(f"**Error:** {status}")
190
  else:
 
191
  image = gr.Image(value=first_frame, label="First Frame of Stream", type="pil")
 
192
  line_info = gr.Textbox(label="Line Coordinates", value="Line Coordinates:\nStart: None, End: None")
193
  image.select(update_line, inputs=image, outputs=[image, line_info])
194
 
195
  # Step 2: Select classes to detect
196
  gr.Markdown("### Step 2: Select Classes to Detect")
197
+ model = YOLO(model="yolo11n.pt")
198
+ class_names = list(model.names.values())
199
  selected_classes = gr.CheckboxGroup(choices=class_names, label="Select Classes to Detect")
200
 
201
+ # Step 3: Adjust confidence threshold
202
  gr.Markdown("### Step 3: Adjust Confidence Threshold (Optional)")
203
  confidence_threshold = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Confidence Threshold")
204
 
205
  # Process the stream
206
  process_button = gr.Button("Process Stream")
 
 
207
  output_image = gr.Image(label="Processed Frame", streaming=True)
 
 
208
  error_box = gr.Textbox(label="Errors/Warnings", interactive=False)
209
 
210
  # Event listener for processing the video
211
+ process_button.click(
212
+ fn=lambda: (setattr(globals(), "processing_active", True), threading.Thread(target=process_frames, args=(stream_url.value, confidence_threshold.value, selected_classes.value)).start()),
213
+ outputs=None
214
+ )
215
+
216
+ # Display frames
217
+ demo.load(display_frames, inputs=None, outputs=[output_image, error_box], every=0.03)
218
 
219
  # Launch the interface
220
  demo.launch(debug=True)