mlbench123 commited on
Commit
042a019
·
verified ·
1 Parent(s): ddecbf3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +194 -31
app.py CHANGED
@@ -586,12 +586,13 @@ def process_video(
586
  correct_dir_l1 = (curr_sign_l1 == L1_inside_sign)
587
 
588
  # Method 3: Close proximity check (catches near-misses)
589
- close_to_l1 = abs(curr_l1_dist) < 40 # within 40 pixels
590
- was_far_l1 = abs(prev_l1) > 20 # was at least 20 pixels away
591
  moving_toward_l1 = abs(curr_l1_dist) < abs(prev_l1) # getting closer
592
 
593
  # Trigger L1 crossing if ANY method detects it
594
- if (inter_l1 or (sign_change_l1 and correct_dir_l1)):
 
595
  if inside and not crossed_l1_flag.get(tid, False):
596
  crossed_l1_flag[tid] = True
597
  print(f"L1 crossed by ID {tid}")
@@ -617,7 +618,8 @@ def process_video(
617
 
618
  # Trigger L2 crossing if ANY method detects it
619
  if (inter_l2 or
620
- (sign_change_l2 and correct_dir_l2)):
 
621
  # Count only if L1 was already crossed and not yet counted
622
  if inside and crossed_l1_flag.get(tid, False) and not crossed_l2_counted.get(tid, False):
623
  global_counter += 1
@@ -914,36 +916,197 @@ def process_video(
914
 
915
  df = pd.DataFrame(rows, columns=["Person","Time in","Time out","Time in queue (seconds)"])
916
  if len(df) > 0:
917
- df.to_excel("person_times.xlsx", index=False)
918
  else:
919
- pd.DataFrame(columns=["Passenger","Time in","Time out","Time in queue (seconds)"]).to_excel("person_times.xlsx", index=False)
920
 
921
  print("\nFinished. Output:", os.path.abspath(output_video_path))
922
- print("Saved times:", os.path.abspath("person_times.xlsx"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
923
 
924
- # ---------------- Runner
925
  if __name__ == "__main__":
926
- CONFIG = {
927
- 'input_video_path': "sample_vid.mp4",
928
- 'output_video_path': "output23.avi",
929
- 'model_name': "yolo11x.pt",
930
- 'head_model_name': "head_detection_single_video_best.pt",
931
- 'conf_threshold': 0.3,
932
- 'img_size': 1280,
933
- 'use_gpu': True,
934
- 'enhance_frames': False,
935
- 'smooth_bbox_tracks': True,
936
- 'missing_timeout': 3.0
937
- }
938
- process_video(
939
- input_video_path = CONFIG['input_video_path'],
940
- output_video_path = CONFIG['output_video_path'],
941
- model_name = CONFIG['model_name'],
942
- head_model_name = CONFIG['head_model_name'],
943
- conf_threshold = CONFIG['conf_threshold'],
944
- img_size = CONFIG['img_size'],
945
- use_gpu = CONFIG['use_gpu'],
946
- enhance_frames = CONFIG['enhance_frames'],
947
- smooth_bbox_tracks = CONFIG['smooth_bbox_tracks'],
948
- missing_timeout = CONFIG['missing_timeout']
949
  )
 
586
  correct_dir_l1 = (curr_sign_l1 == L1_inside_sign)
587
 
588
  # Method 3: Close proximity check (catches near-misses)
589
+ close_to_l1 = abs(curr_l1_dist) < 35 # within 40 pixels
590
+ was_far_l1 = abs(prev_l1) > 40 # was at least 20 pixels away
591
  moving_toward_l1 = abs(curr_l1_dist) < abs(prev_l1) # getting closer
592
 
593
  # Trigger L1 crossing if ANY method detects it
594
+ if (inter_l1 or (sign_change_l1 and correct_dir_l1) or
595
+ (close_to_l1 and was_far_l1 and moving_toward_l1 and correct_dir_l1)):
596
  if inside and not crossed_l1_flag.get(tid, False):
597
  crossed_l1_flag[tid] = True
598
  print(f"L1 crossed by ID {tid}")
 
618
 
619
  # Trigger L2 crossing if ANY method detects it
620
  if (inter_l2 or
621
+ (sign_change_l2 and correct_dir_l2) or
622
+ (close_to_l2 and was_far_l2 and moving_toward_l2 and correct_dir_l2)):
623
  # Count only if L1 was already crossed and not yet counted
624
  if inside and crossed_l1_flag.get(tid, False) and not crossed_l2_counted.get(tid, False):
625
  global_counter += 1
 
916
 
917
  df = pd.DataFrame(rows, columns=["Person","Time in","Time out","Time in queue (seconds)"])
918
  if len(df) > 0:
919
+ df.to_excel("person_times_2.xlsx", index=False)
920
  else:
921
+ pd.DataFrame(columns=["Passenger","Time in","Time out","Time in queue (seconds)"]).to_excel("person_times_2.xlsx", index=False)
922
 
923
  print("\nFinished. Output:", os.path.abspath(output_video_path))
924
+ print("Saved times:", os.path.abspath("person_times_2.xlsx"))
925
+
926
+ # # ---------------- Runner
927
+ # if __name__ == "__main__":
928
+ # CONFIG = {
929
+ # 'input_video_path': "sample_vid_o.mp4",
930
+ # 'output_video_path': "output24.avi",
931
+ # 'model_name': "yolo11x.pt",
932
+ # 'head_model_name': "head_detection_single_video_best.pt",
933
+ # 'conf_threshold': 0.3,
934
+ # 'img_size': 1280,
935
+ # 'use_gpu': True,
936
+ # 'enhance_frames': False,
937
+ # 'smooth_bbox_tracks': True,
938
+ # 'missing_timeout': 3.0
939
+ # }
940
+ # process_video(
941
+ # input_video_path = CONFIG['input_video_path'],
942
+ # output_video_path = CONFIG['output_video_path'],
943
+ # model_name = CONFIG['model_name'],
944
+ # head_model_name = CONFIG['head_model_name'],
945
+ # conf_threshold = CONFIG['conf_threshold'],
946
+ # img_size = CONFIG['img_size'],
947
+ # use_gpu = CONFIG['use_gpu'],
948
+ # enhance_frames = CONFIG['enhance_frames'],
949
+ # smooth_bbox_tracks = CONFIG['smooth_bbox_tracks'],
950
+ # missing_timeout = CONFIG['missing_timeout']
951
+ # )
952
+
953
+
954
+ # ---------------- Gradio Interface
955
+ import gradio as gr
956
+ import tempfile
957
+ import shutil
958
+
959
+ def gradio_process_video(input_video, conf_threshold=0.3, missing_timeout=3.0):
960
+ """
961
+ Wrapper function for Gradio interface
962
+ """
963
+ try:
964
+ # Create temporary directory for outputs
965
+ temp_dir = tempfile.mkdtemp()
966
+
967
+ # Define output paths
968
+ output_video_path = os.path.join(temp_dir, "output_tracking.mp4")
969
+ excel_path = os.path.join(temp_dir, "person_times.xlsx")
970
+
971
+ # Copy the excel file path for the process_video function to use
972
+ original_excel = "person_times_2.xlsx"
973
+
974
+ # Run the processing
975
+ CONFIG = {
976
+ 'input_video_path': input_video,
977
+ 'output_video_path': output_video_path,
978
+ 'model_name': "yolo11x.pt",
979
+ 'head_model_name': "head_detection_single_video_best.pt",
980
+ 'conf_threshold': float(conf_threshold),
981
+ 'img_size': 1280,
982
+ 'use_gpu': torch.cuda.is_available(),
983
+ 'enhance_frames': False,
984
+ 'smooth_bbox_tracks': True,
985
+ 'missing_timeout': float(missing_timeout)
986
+ }
987
+
988
+ process_video(
989
+ input_video_path=CONFIG['input_video_path'],
990
+ output_video_path=CONFIG['output_video_path'],
991
+ model_name=CONFIG['model_name'],
992
+ head_model_name=CONFIG['head_model_name'],
993
+ conf_threshold=CONFIG['conf_threshold'],
994
+ img_size=CONFIG['img_size'],
995
+ use_gpu=CONFIG['use_gpu'],
996
+ enhance_frames=CONFIG['enhance_frames'],
997
+ smooth_bbox_tracks=CONFIG['smooth_bbox_tracks'],
998
+ missing_timeout=CONFIG['missing_timeout']
999
+ )
1000
+
1001
+ # Copy the generated excel file to temp directory
1002
+ if os.path.exists(original_excel):
1003
+ shutil.copy(original_excel, excel_path)
1004
+
1005
+ return output_video_path, excel_path
1006
+
1007
+ except Exception as e:
1008
+ print(f"Error processing video: {str(e)}")
1009
+ import traceback
1010
+ traceback.print_exc()
1011
+ return None, None
1012
+
1013
+ # Create Gradio interface
1014
+ with gr.Blocks(title="Queue Tracking System") as demo:
1015
+ gr.Markdown(
1016
+ """
1017
+ # 🎯 Queue Tracking & Analytics System
1018
+
1019
+ Upload a video to track people in a defined polygon area. The system will:
1020
+ - Track people entering and exiting the zone
1021
+ - Count directional crossings through L1 and L2 lines
1022
+ - Calculate time spent in queue
1023
+ - Measure travel distance
1024
+ - Detect both full body and head-only detections
1025
+
1026
+ **Note:** Processing may take several minutes depending on video length.
1027
+ """
1028
+ )
1029
+
1030
+ with gr.Row():
1031
+ with gr.Column():
1032
+ video_input = gr.Video(
1033
+ label="Upload Video",
1034
+ format="mp4"
1035
+ )
1036
+
1037
+ conf_threshold = gr.Slider(
1038
+ minimum=0.1,
1039
+ maximum=0.9,
1040
+ value=0.3,
1041
+ step=0.05,
1042
+ label="Detection Confidence Threshold",
1043
+ info="Lower values detect more objects but may include false positives"
1044
+ )
1045
+
1046
+ missing_timeout = gr.Slider(
1047
+ minimum=1.0,
1048
+ maximum=10.0,
1049
+ value=3.0,
1050
+ step=0.5,
1051
+ label="Missing Timeout (seconds)",
1052
+ info="How long to wait before considering a person has left the zone"
1053
+ )
1054
+
1055
+ process_btn = gr.Button("🚀 Process Video", variant="primary", size="lg")
1056
+
1057
+ with gr.Column():
1058
+ video_output = gr.Video(
1059
+ label="Processed Video with Tracking",
1060
+ format="mp4"
1061
+ )
1062
+
1063
+ excel_output = gr.File(
1064
+ label="Download Excel Report",
1065
+ file_types=[".xlsx"]
1066
+ )
1067
+
1068
+ gr.Markdown(
1069
+ """
1070
+ ### 📊 Output Information:
1071
+ - **Processed Video**: Shows tracking overlay with IDs, polygon area, and crossing lines
1072
+ - **Excel Report**: Contains entry/exit times and queue duration for each person
1073
+ """
1074
+ )
1075
+
1076
+ gr.Markdown(
1077
+ """
1078
+ ---
1079
+ ### 🔧 Technical Details:
1080
+ - Uses YOLO11x for person detection
1081
+ - Custom head detection model for occlusion handling
1082
+ - Homographic transformation for accurate spatial mapping
1083
+ - ByteTrack for robust ID tracking
1084
+ - Directional crossing detection (L1 → L2)
1085
+ """
1086
+ )
1087
+
1088
+ # Connect the button to the processing function
1089
+ process_btn.click(
1090
+ fn=gradio_process_video,
1091
+ inputs=[video_input, conf_threshold, missing_timeout],
1092
+ outputs=[video_output, excel_output]
1093
+ )
1094
+
1095
+ # Add examples if you have sample videos
1096
+ gr.Examples(
1097
+ examples=[
1098
+ ["sample_vid_o.mp4", 0.3, 3.0],
1099
+ ],
1100
+ inputs=[video_input, conf_threshold, missing_timeout],
1101
+ outputs=[video_output, excel_output],
1102
+ fn=gradio_process_video,
1103
+ cache_examples=False,
1104
+ )
1105
 
1106
+ # Launch the app
1107
  if __name__ == "__main__":
1108
+ demo.launch(
1109
+ share=False, # Set to True if you want a temporary public link
1110
+ server_name="0.0.0.0", # Important for Hugging Face Spaces
1111
+ server_port=7860 # Default port for HF Spaces
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1112
  )