Sanshruth commited on
Commit
38a38c4
·
verified ·
1 Parent(s): 1f04902

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -34
app.py CHANGED
@@ -1,4 +1,6 @@
1
- # Maximize CPU usage
 
 
2
  import multiprocessing
3
  import cv2
4
 
@@ -176,7 +178,7 @@ def draw_angled_line(image, line_params, color=(0, 255, 0), thickness=2):
176
  _, _, start_point, end_point = line_params
177
  cv2.line(image, start_point, end_point, color, thickness)
178
 
179
- def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=None, resolution_scale=1.0):
180
  """
181
  Processes the IP camera stream to count objects of the selected classes crossing the line.
182
  """
@@ -211,19 +213,8 @@ def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=No
211
  errors.append("Error: Could not read frame from the stream.")
212
  break
213
 
214
- # Get the original frame dimensions
215
- original_height, original_width = frame.shape[:2]
216
-
217
- # Resize the frame for processing
218
- new_width = int(original_width * resolution_scale)
219
- new_height = int(original_height * resolution_scale)
220
- resized_frame = cv2.resize(frame, (new_width, new_height))
221
-
222
- # Perform object tracking with confidence threshold on the resized frame
223
- results = model.track(resized_frame, persist=True, conf=confidence_threshold)
224
-
225
- # Create an annotator for the original frame
226
- annotator = Annotator(frame, line_width=2)
227
 
228
  if results[0].boxes.id is not None:
229
  track_ids = results[0].boxes.id.int().cpu().tolist()
@@ -233,13 +224,6 @@ def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=No
233
 
234
  for box, cls, t_id, conf in zip(boxes, clss, track_ids, confs):
235
  if conf >= confidence_threshold and model.names[cls] in selected_classes:
236
- # Scale the bounding box back to the original resolution
237
- box = box * (original_width / new_width)
238
- box = box.int().tolist()
239
-
240
- # Draw the bounding box on the original frame
241
- annotator.box_label(box, label=f"{model.names[cls]} {conf:.2f}", color=colors(cls))
242
-
243
  # Check if the object crosses the line
244
  if is_object_crossing_line(box, line_params) and t_id not in crossed_objects:
245
  crossed_objects[t_id] = True
@@ -248,8 +232,11 @@ def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=No
248
  if len(crossed_objects) > max_tracked_objects:
249
  crossed_objects.clear()
250
 
251
- # Draw the angled line on the original frame
252
- draw_angled_line(frame, line_params, color=(0, 255, 0), thickness=2)
 
 
 
253
 
254
  # Display the count on the frame with a modern look
255
  count = len(crossed_objects)
@@ -257,17 +244,17 @@ def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=No
257
 
258
  # Calculate the position for the middle of the top
259
  margin = 10 # Margin from the top
260
- x = (frame.shape[1] - text_width) // 2 # Center-align the text horizontally
261
  y = text_height + margin # Top-align the text
262
 
263
  # Draw the black background rectangle
264
- cv2.rectangle(frame, (x - margin, y - text_height - margin), (x + text_width + margin, y + margin), (0, 0, 0), -1)
265
 
266
  # Draw the text
267
- cv2.putText(frame, f"COUNT: {count}", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
268
 
269
  # Yield the annotated frame to Gradio
270
- yield frame, ""
271
 
272
  cap.release()
273
  logger.info("Stream processing completed.")
@@ -277,7 +264,11 @@ with gr.Blocks() as demo:
277
  gr.Markdown("<h1>Real-time monitoring, object tracking, and line-crossing detection for CCTV camera streams.</h1></center>")
278
  gr.Markdown("## https://github.com/SanshruthR/CCTV_SENTRY_YOLO11")
279
 
 
 
280
  # Step 1: Enter the IP Camera Stream URL
 
 
281
  stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s104.ipcamlive.com/streams/68idokwtondsqpmkr/stream.m3u8", visible=False)
282
 
283
  # Step 1: Extract the first frame from the stream
@@ -289,9 +280,15 @@ with gr.Blocks() as demo:
289
  # Image component for displaying the first frame
290
  image = gr.Image(value=first_frame, label="First Frame of Stream", type="pil")
291
 
 
292
  line_info = gr.Textbox(label="Line Coordinates", value="Line Coordinates:\nStart: None, End: None")
293
  image.select(update_line, inputs=image, outputs=[image, line_info])
294
 
 
 
 
 
 
295
  # Step 2: Select classes to detect
296
  gr.Markdown("### Step 2: Select Classes to Detect")
297
  model = YOLO(model="yolo11n.pt") # Load the model to get class names
@@ -302,11 +299,7 @@ with gr.Blocks() as demo:
302
  gr.Markdown("### Step 3: Adjust Confidence Threshold (Optional)")
303
  confidence_threshold = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Confidence Threshold")
304
 
305
- # Step 4: Adjust resolution scale
306
- gr.Markdown("### Step 4: Adjust Resolution Scale (Optional)")
307
- resolution_scale = gr.Slider(minimum=0.1, maximum=1.0, value=1.0, label="Resolution Scale")
308
-
309
- # Process the stream
310
  process_button = gr.Button("Process Stream")
311
 
312
  # Output image for real-time frame rendering
@@ -316,7 +309,7 @@ with gr.Blocks() as demo:
316
  error_box = gr.Textbox(label="Errors/Warnings", interactive=False)
317
 
318
  # Event listener for processing the video
319
- process_button.click(process_video, inputs=[confidence_threshold, selected_classes, stream_url, resolution_scale], outputs=[output_image, error_box])
320
 
321
  # Launch the interface
322
  demo.launch(debug=True)
 
1
+ ##############
2
+
3
+ #Maximize CPU usage
4
  import multiprocessing
5
  import cv2
6
 
 
178
  _, _, start_point, end_point = line_params
179
  cv2.line(image, start_point, end_point, color, thickness)
180
 
181
+ def process_video(confidence_threshold=0.5, selected_classes=None, stream_url=None):
182
  """
183
  Processes the IP camera stream to count objects of the selected classes crossing the line.
184
  """
 
213
  errors.append("Error: Could not read frame from the stream.")
214
  break
215
 
216
+ # Perform object tracking with confidence threshold
217
+ results = model.track(frame, persist=True, conf=confidence_threshold)
 
 
 
 
 
 
 
 
 
 
 
218
 
219
  if results[0].boxes.id is not None:
220
  track_ids = results[0].boxes.id.int().cpu().tolist()
 
224
 
225
  for box, cls, t_id, conf in zip(boxes, clss, track_ids, confs):
226
  if conf >= confidence_threshold and model.names[cls] in selected_classes:
 
 
 
 
 
 
 
227
  # Check if the object crosses the line
228
  if is_object_crossing_line(box, line_params) and t_id not in crossed_objects:
229
  crossed_objects[t_id] = True
 
232
  if len(crossed_objects) > max_tracked_objects:
233
  crossed_objects.clear()
234
 
235
+ # Visualize the results with bounding boxes, masks, and IDs
236
+ annotated_frame = results[0].plot()
237
+
238
+ # Draw the angled line on the frame
239
+ draw_angled_line(annotated_frame, line_params, color=(0, 255, 0), thickness=2)
240
 
241
  # Display the count on the frame with a modern look
242
  count = len(crossed_objects)
 
244
 
245
  # Calculate the position for the middle of the top
246
  margin = 10 # Margin from the top
247
+ x = (annotated_frame.shape[1] - text_width) // 2 # Center-align the text horizontally
248
  y = text_height + margin # Top-align the text
249
 
250
  # Draw the black background rectangle
251
+ cv2.rectangle(annotated_frame, (x - margin, y - text_height - margin), (x + text_width + margin, y + margin), (0, 0, 0), -1)
252
 
253
  # Draw the text
254
+ cv2.putText(annotated_frame, f"COUNT: {count}", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
255
 
256
  # Yield the annotated frame to Gradio
257
+ yield annotated_frame, ""
258
 
259
  cap.release()
260
  logger.info("Stream processing completed.")
 
264
  gr.Markdown("<h1>Real-time monitoring, object tracking, and line-crossing detection for CCTV camera streams.</h1></center>")
265
  gr.Markdown("## https://github.com/SanshruthR/CCTV_SENTRY_YOLO11")
266
 
267
+
268
+
269
  # Step 1: Enter the IP Camera Stream URL
270
+ # gr.Markdown("### Step 0: Enter the IP Camera Stream URL")
271
+ # stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s103.ipcamlive.com/streams/67n4ojknye7lkxpmf/stream.m3u8", visible=False)
272
  stream_url = gr.Textbox(label="Enter IP Camera Stream URL", value="https://s104.ipcamlive.com/streams/68idokwtondsqpmkr/stream.m3u8", visible=False)
273
 
274
  # Step 1: Extract the first frame from the stream
 
280
  # Image component for displaying the first frame
281
  image = gr.Image(value=first_frame, label="First Frame of Stream", type="pil")
282
 
283
+
284
  line_info = gr.Textbox(label="Line Coordinates", value="Line Coordinates:\nStart: None, End: None")
285
  image.select(update_line, inputs=image, outputs=[image, line_info])
286
 
287
+ # Reset the line (optional)
288
+ # gr.Markdown("### Step 4: Reset the Line (Optional)")
289
+ # reset_button = gr.Button("Reset Line")
290
+ # reset_button.click(reset_line, inputs=None, outputs=[image, line_info])
291
+
292
  # Step 2: Select classes to detect
293
  gr.Markdown("### Step 2: Select Classes to Detect")
294
  model = YOLO(model="yolo11n.pt") # Load the model to get class names
 
299
  gr.Markdown("### Step 3: Adjust Confidence Threshold (Optional)")
300
  confidence_threshold = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Confidence Threshold")
301
 
302
+ #process the stream
 
 
 
 
303
  process_button = gr.Button("Process Stream")
304
 
305
  # Output image for real-time frame rendering
 
309
  error_box = gr.Textbox(label="Errors/Warnings", interactive=False)
310
 
311
  # Event listener for processing the video
312
+ process_button.click(process_video, inputs=[confidence_threshold, selected_classes, stream_url], outputs=[output_image, error_box])
313
 
314
  # Launch the interface
315
  demo.launch(debug=True)