muhammadsalmanalfaridzi commited on
Commit
11eefb3
·
verified ·
1 Parent(s): 2824080

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -25
app.py CHANGED
@@ -159,11 +159,8 @@ def convert_video_to_mp4(input_path, output_path):
159
  def detect_objects_in_video(video_path):
160
  temp_output_path = "/tmp/output_video.mp4"
161
  temp_frames_dir = tempfile.mkdtemp()
162
-
163
- all_class_count = {} # To store cumulative counts for all classes (from previous frames)
164
- last_frame_objects = {} # To store the objects detected in the last frame
165
  frame_count = 0
166
- count_text = "" # Initialize count_text properly
167
 
168
  try:
169
  # Convert video to MP4 if necessary
@@ -195,35 +192,46 @@ def detect_objects_in_video(video_path):
195
  # Process predictions for the current frame
196
  predictions = yolo_model.predict(frame_path, confidence=50, overlap=80).json()
197
 
198
- # Get current frame object counts
199
- current_frame_objects = {}
200
  for prediction in predictions['predictions']:
201
  class_name = prediction['class']
202
- current_frame_objects[class_name] = current_frame_objects.get(class_name, 0) + 1
203
-
204
- # Draw bounding boxes on the frame
205
  x, y, w, h = prediction['x'], prediction['y'], prediction['width'], prediction['height']
206
- cv2.rectangle(frame, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (0,255,0), 2)
207
- cv2.putText(frame, class_name, (int(x-w/2), int(y-h/2-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2)
208
 
209
- # Update all class counts: Add objects detected in this frame and remove objects not detected in this frame
210
- for class_name, count in current_frame_objects.items():
211
- # Add to all_class_count
212
- all_class_count[class_name] = all_class_count.get(class_name, 0) + count
213
 
214
- # Now check for objects that were in last frame but not in the current frame
215
- for class_name in list(all_class_count.keys()):
216
- # If the object was not detected in the current frame, reduce its count
217
- if class_name not in current_frame_objects:
218
- all_class_count[class_name] -= 1
219
- if all_class_count[class_name] <= 0:
220
- del all_class_count[class_name] # Remove from all_class_count if count drops to 0
221
 
222
- # Update count_text with current object count
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  count_text = ""
224
- for class_name, count in all_class_count.items():
 
225
  count_text += f"{class_name}: {count}\n"
226
- count_text += f"\nTotal Products: {sum(all_class_count.values())}"
 
227
 
228
  # Overlay the counts text onto the frame
229
  y_offset = 20
@@ -235,6 +243,9 @@ def detect_objects_in_video(video_path):
235
  output_video.write(frame)
236
  frame_count += 1
237
 
 
 
 
238
  video.release()
239
  output_video.release()
240
 
 
159
  def detect_objects_in_video(video_path):
160
  temp_output_path = "/tmp/output_video.mp4"
161
  temp_frames_dir = tempfile.mkdtemp()
 
 
 
162
  frame_count = 0
163
+ previous_detections = {} # Untuk menyimpan deteksi objek dari frame sebelumnya
164
 
165
  try:
166
  # Convert video to MP4 if necessary
 
192
  # Process predictions for the current frame
193
  predictions = yolo_model.predict(frame_path, confidence=50, overlap=80).json()
194
 
195
+ # Track current frame detections
196
+ current_detections = {}
197
  for prediction in predictions['predictions']:
198
  class_name = prediction['class']
 
 
 
199
  x, y, w, h = prediction['x'], prediction['y'], prediction['width'], prediction['height']
200
+ # Generate a unique ID for each detection (can use coordinates or other method)
201
+ object_id = f"{class_name}_{x}_{y}"
202
 
203
+ if object_id not in current_detections:
204
+ current_detections[object_id] = class_name
 
 
205
 
206
+ # Draw bounding box for detected objects
207
+ cv2.rectangle(frame, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (0,255,0), 2)
208
+ cv2.putText(frame, class_name, (int(x-w/2), int(y-h/2-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2)
 
 
 
 
209
 
210
+ # Calculate the changes from previous detections
211
+ removed_objects = set(previous_detections.keys()) - set(current_detections.keys())
212
+ new_objects = set(current_detections.keys()) - set(previous_detections.keys())
213
+
214
+ # Update counts for objects
215
+ object_counts = {}
216
+ for detection_id in current_detections.keys():
217
+ class_name = current_detections[detection_id]
218
+ object_counts[class_name] = object_counts.get(class_name, 0) + 1
219
+
220
+ # Update object counts based on removed objects
221
+ for detection_id in removed_objects:
222
+ class_name = previous_detections[detection_id]
223
+ if class_name in object_counts:
224
+ object_counts[class_name] -= 1
225
+ if object_counts[class_name] <= 0:
226
+ del object_counts[class_name]
227
+
228
+ # Generate display text for counts
229
  count_text = ""
230
+ total_product_count = 0
231
+ for class_name, count in object_counts.items():
232
  count_text += f"{class_name}: {count}\n"
233
+ total_product_count += count
234
+ count_text += f"\nTotal Product: {total_product_count}"
235
 
236
  # Overlay the counts text onto the frame
237
  y_offset = 20
 
243
  output_video.write(frame)
244
  frame_count += 1
245
 
246
+ # Update previous_detections for the next frame
247
+ previous_detections = current_detections
248
+
249
  video.release()
250
  output_video.release()
251