TheKnight115 commited on
Commit
d69ce69
·
verified ·
1 Parent(s): 759bcfc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -73
app.py CHANGED
@@ -6,12 +6,8 @@ import tempfile
6
  import time
7
  from huggingface_hub import hf_hub_download
8
 
9
- def run_yolo(image):
10
- # Run the model on the image and get results
11
- results = model(image)
12
- return results
13
 
14
- # Color definitions for each class
15
  class_colors = {
16
  0: (0, 255, 0), # Green (Helmet)
17
  1: (255, 0, 0), # Blue (License Plate)
@@ -21,6 +17,12 @@ class_colors = {
21
  5: (0, 255, 255), # Yellow (Person)
22
  }
23
 
 
 
 
 
 
 
24
  def process_results(results, image):
25
  # Draw bounding boxes and labels on the image
26
  boxes = results[0].boxes # Get boxes from results
@@ -30,36 +32,48 @@ def process_results(results, image):
30
  conf = box.conf[0] # Confidence score
31
  cls = int(box.cls[0]) # Class index
32
  label = model.names[cls] # Get class name from index
33
-
34
- # Get the color for the current class
35
- color = class_colors.get(cls, (255, 255, 255)) # Default to white if class not found
36
-
37
- # Draw rectangle and label on the image with the appropriate color
38
- cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) # Draw bounding box
39
- cv2.putText(image, f"{label} {conf:.2f}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) # Draw label
40
 
41
  return image
42
 
43
 
44
- def process_video(uploaded_file):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # Create a temporary file to save the uploaded video
46
  with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_file:
47
  temp_file.write(uploaded_file.read())
48
  temp_file_path = temp_file.name # Get the path of the temporary file
49
-
50
  # Read the video file
51
  video = cv2.VideoCapture(temp_file_path)
52
  total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) # Get the total number of frames
53
  frames = []
54
-
55
- # Create a Streamlit progress bar, text for percentage, and timer
56
- progress_bar = st.progress(0)
57
- progress_text = st.empty() # Placeholder for percentage text
58
- timer_text = st.empty() # Placeholder for the timer
59
-
60
  current_frame = 0
61
  start_time = time.time() # Start the timer
62
-
63
  while True:
64
  ret, frame = video.read()
65
  if not ret:
@@ -67,50 +81,33 @@ def process_video(uploaded_file):
67
 
68
  # Run YOLO model on the current frame
69
  results = run_yolo(frame)
70
-
71
  # Process the results and draw boxes on the current frame
72
  processed_frame = process_results(results, frame)
73
- frames.append(processed_frame) # Save the processed frame
74
-
 
 
 
75
  current_frame += 1
76
-
77
- # Calculate and display the progress
78
- progress_percentage = (current_frame / total_frames) * 100
79
- progress_bar.progress(progress_percentage / 100) # Update the progress bar
80
- progress_text.text(f'Processing: {progress_percentage:.2f}%') # Update the percentage text
81
-
82
- # Calculate and display the elapsed time
83
- elapsed_time = time.time() - start_time
84
- timer_text.text(f'Elapsed Time: {elapsed_time:.2f} seconds') # Update the timer text
85
-
86
  video.release()
87
-
88
  # Create a video writer to save the processed frames
89
  height, width, _ = frames[0].shape
90
  output_path = 'processed_video.mp4'
91
  out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), 30, (width, height))
92
 
93
  for frame in frames:
94
- out.write(frame) # Write each processed frame to the video
 
 
95
 
96
  out.release()
97
-
98
- # Complete the progress bar and show final message
99
- progress_bar.progress(100)
100
- progress_text.text('Processing: 100%')
101
- st.success('Video processing complete!')
102
-
103
- # Display the final elapsed time
104
- final_elapsed_time = time.time() - start_time
105
- timer_text.text(f'Total Elapsed Time: {final_elapsed_time:.2f} seconds')
106
-
107
- # Display the processed video
108
- st.video(output_path)
109
-
110
- # Create a download button for the processed video
111
- with open(output_path, 'rb') as f:
112
- video_bytes = f.read()
113
- st.download_button(label='Download Processed Video', data=video_bytes, file_name='processed_video.mp4', mime='video/mp4')
114
 
115
  def live_video_feed():
116
  stframe = st.empty() # Placeholder for the video stream in Streamlit
@@ -120,28 +117,32 @@ def live_video_feed():
120
  ret, frame = video.read()
121
  if not ret:
122
  break
123
-
124
  # Run YOLO model on the current frame
125
  results = run_yolo(frame)
126
-
127
  # Process the results and draw boxes on the current frame
128
  processed_frame = process_results(results, frame)
129
-
 
 
 
130
  # Display the processed frame in the Streamlit app
131
- stframe.image(processed_frame, channels="BGR", use_column_width=True)
132
-
133
- # Stop the live feed when user clicks on the "Stop" button
134
  if st.button("Stop"):
135
  break
136
 
137
  video.release()
138
 
 
139
  def main():
140
  model_file = hf_hub_download(repo_id="TheKnight115/Yolov8m", filename="yolov8_Medium.pt")
141
-
142
- global model
143
  model = YOLO(model_file)
144
-
145
  st.title("Motorbike Violation Detection")
146
 
147
  # Create a selection box for input type
@@ -152,24 +153,27 @@ def main():
152
  uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
153
  if uploaded_file is not None:
154
  # Process the image
155
- image = np.array(cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), 1))
156
- results = run_yolo(image)
157
-
158
- # Process the results and draw boxes on the image
159
- processed_image = process_results(results, image)
160
-
161
- # Display the processed image
162
- st.image(processed_image, caption='Detected Image', use_column_width=True)
163
 
164
  elif input_type == "Video":
165
  uploaded_file = st.file_uploader("Choose a video...", type=["mp4", "mov"])
166
  if uploaded_file is not None:
167
- # Process the video
168
- process_video(uploaded_file)
 
 
 
 
 
 
 
 
 
169
 
170
  elif input_type == "Live Feed":
171
  st.write("Live video feed from webcam. Press 'Stop' to stop the feed.")
172
  live_video_feed()
173
 
 
174
  if __name__ == "__main__":
175
- main()
 
6
  import time
7
  from huggingface_hub import hf_hub_download
8
 
 
 
 
 
9
 
10
+ # Color mapping for different classes
11
  class_colors = {
12
  0: (0, 255, 0), # Green (Helmet)
13
  1: (255, 0, 0), # Blue (License Plate)
 
17
  5: (0, 255, 255), # Yellow (Person)
18
  }
19
 
20
+
21
+ def run_yolo(image):
22
+ # Run the model on the image and get results
23
+ results = model(image)
24
+ return results
25
+
26
  def process_results(results, image):
27
  # Draw bounding boxes and labels on the image
28
  boxes = results[0].boxes # Get boxes from results
 
32
  conf = box.conf[0] # Confidence score
33
  cls = int(box.cls[0]) # Class index
34
  label = model.names[cls] # Get class name from index
35
+ color = class_colors.get(cls, (255, 255, 255)) # Get color for class
36
+
37
+ # Draw rectangle and label on the image
38
+ cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) # Draw colored box
39
+ cv2.putText(image, f"{label} {conf:.2f}", (x1, y1 - 10),
40
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
 
41
 
42
  return image
43
 
44
 
45
+ def process_image(uploaded_file):
46
+ # Read the image file
47
+ image = np.array(cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), 1))
48
+
49
+ # Run YOLO model on the image
50
+ results = run_yolo(image)
51
+
52
+ # Process the results and draw boxes on the image
53
+ processed_image = process_results(results, image)
54
+
55
+ # Convert the image from BGR to RGB before displaying it
56
+ processed_image_rgb = cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB)
57
+
58
+ # Display the processed image in Streamlit
59
+ st.image(processed_image_rgb, caption='Detected Image', use_column_width=True)
60
+
61
+ # Cache the video processing to prevent reprocessing on reruns
62
+ @st.cache_data
63
+ def process_video_and_save(uploaded_file):
64
  # Create a temporary file to save the uploaded video
65
  with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_file:
66
  temp_file.write(uploaded_file.read())
67
  temp_file_path = temp_file.name # Get the path of the temporary file
68
+
69
  # Read the video file
70
  video = cv2.VideoCapture(temp_file_path)
71
  total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) # Get the total number of frames
72
  frames = []
73
+
 
 
 
 
 
74
  current_frame = 0
75
  start_time = time.time() # Start the timer
76
+
77
  while True:
78
  ret, frame = video.read()
79
  if not ret:
 
81
 
82
  # Run YOLO model on the current frame
83
  results = run_yolo(frame)
84
+
85
  # Process the results and draw boxes on the current frame
86
  processed_frame = process_results(results, frame)
87
+
88
+ # Convert the frame from BGR to RGB before displaying
89
+ processed_frame_rgb = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
90
+ frames.append(processed_frame_rgb) # Save the processed frame
91
+
92
  current_frame += 1
93
+
 
 
 
 
 
 
 
 
 
94
  video.release()
95
+
96
  # Create a video writer to save the processed frames
97
  height, width, _ = frames[0].shape
98
  output_path = 'processed_video.mp4'
99
  out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), 30, (width, height))
100
 
101
  for frame in frames:
102
+ # Convert back to BGR for saving the video
103
+ frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
104
+ out.write(frame_bgr) # Write each processed frame to the video
105
 
106
  out.release()
107
+
108
+ # Return the path of the processed video
109
+ return output_path
110
+
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
  def live_video_feed():
113
  stframe = st.empty() # Placeholder for the video stream in Streamlit
 
117
  ret, frame = video.read()
118
  if not ret:
119
  break
120
+
121
  # Run YOLO model on the current frame
122
  results = run_yolo(frame)
123
+
124
  # Process the results and draw boxes on the current frame
125
  processed_frame = process_results(results, frame)
126
+
127
+ # Convert the frame from BGR to RGB before displaying
128
+ processed_frame_rgb = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
129
+
130
  # Display the processed frame in the Streamlit app
131
+ stframe.image(processed_frame_rgb, channels="RGB", use_column_width=True)
132
+
133
+ # Stop the live feed when the user clicks the "Stop" button
134
  if st.button("Stop"):
135
  break
136
 
137
  video.release()
138
 
139
+
140
  def main():
141
  model_file = hf_hub_download(repo_id="TheKnight115/Yolov8m", filename="yolov8_Medium.pt")
142
+
143
+ global model
144
  model = YOLO(model_file)
145
+
146
  st.title("Motorbike Violation Detection")
147
 
148
  # Create a selection box for input type
 
153
  uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
154
  if uploaded_file is not None:
155
  # Process the image
156
+ process_image(uploaded_file)
 
 
 
 
 
 
 
157
 
158
  elif input_type == "Video":
159
  uploaded_file = st.file_uploader("Choose a video...", type=["mp4", "mov"])
160
  if uploaded_file is not None:
161
+ # Process and save the video
162
+ output_path = process_video_and_save(uploaded_file)
163
+
164
+ # Display the processed video
165
+ st.video(output_path)
166
+
167
+ # Provide a download button for the processed video
168
+ with open(output_path, 'rb') as f:
169
+ video_bytes = f.read()
170
+ st.download_button(label='Download Processed Video',
171
+ data=video_bytes, file_name='processed_video.mp4', mime='video/mp4')
172
 
173
  elif input_type == "Live Feed":
174
  st.write("Live video feed from webcam. Press 'Stop' to stop the feed.")
175
  live_video_feed()
176
 
177
+
178
  if __name__ == "__main__":
179
+ main()