scfive commited on
Commit
65be584
·
verified ·
1 Parent(s): 9331e4d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -24
app.py CHANGED
@@ -1,57 +1,48 @@
1
  import gradio as gr
2
- import torch
3
- from PIL import Image
4
  import cv2
 
5
  import numpy as np
 
6
  from huggingface_hub import hf_hub_download
7
 
8
- # Load the model from Hugging Face
9
  model_path = hf_hub_download(repo_id="StephanST/WALDO30", filename="WALDO30_yolov8m_640x640.pt")
10
- model = torch.hub.load('ultralytics/yolov8', 'custom', path=model_path)
11
 
12
  # Detection function for images
13
  def detect_on_image(image):
14
- results = model(image)
15
- results.render() # Render the bounding boxes on the image
16
- detected_img = Image.fromarray(results.imgs[0]) # Convert to PIL format
17
- return detected_img
18
 
19
  # Detection function for videos
20
  def detect_on_video(video):
21
  temp_video_path = "processed_video.mp4"
22
  cap = cv2.VideoCapture(video)
23
  fourcc = cv2.VideoWriter_fourcc(*"mp4v")
24
- out = cv2.VideoWriter(temp_video_path, fourcc, cap.get(cv2.CAP_PROP_FPS),
25
  (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
26
 
27
  while cap.isOpened():
28
  ret, frame = cap.read()
29
  if not ret:
30
  break
31
- results = model(frame) # Run detection
32
- results.render()
33
- frame = np.squeeze(results.imgs[0]) # Extract processed frame
34
- out.write(frame) # Write frame to output video
35
 
36
  cap.release()
37
  out.release()
38
  return temp_video_path
39
 
40
- # Create Gradio Interface
41
- image_input = gr.inputs.Image(type="pil", label="Upload Image")
42
- video_input = gr.inputs.Video(type="file", label="Upload Video")
43
-
44
- image_output = gr.outputs.Image(type="pil", label="Detected Image")
45
- video_output = gr.outputs.Video(label="Detected Video")
46
-
47
  app = gr.Interface(
48
  fn=[detect_on_image, detect_on_video],
49
- inputs=[image_input, video_input],
50
- outputs=[image_output, video_output],
51
  title="WALDO30 YOLOv8 Object Detection",
52
- description="Upload an image or video to see object detection results using WALDO30 YOLOv8 model."
53
  )
54
 
55
- # Launch the app
56
  if __name__ == "__main__":
57
  app.launch()
 
1
  import gradio as gr
 
 
2
  import cv2
3
+ from PIL import Image
4
  import numpy as np
5
+ from ultralytics import YOLO
6
  from huggingface_hub import hf_hub_download
7
 
8
+ # Download the model from Hugging Face
9
  model_path = hf_hub_download(repo_id="StephanST/WALDO30", filename="WALDO30_yolov8m_640x640.pt")
10
+ model = YOLO(model_path) # Load YOLOv8 model
11
 
12
  # Detection function for images
13
  def detect_on_image(image):
14
+ results = model(image) # Perform detection
15
+ annotated_frame = results[0].plot() # Get annotated image
16
+ return Image.fromarray(annotated_frame)
 
17
 
18
  # Detection function for videos
19
  def detect_on_video(video):
20
  temp_video_path = "processed_video.mp4"
21
  cap = cv2.VideoCapture(video)
22
  fourcc = cv2.VideoWriter_fourcc(*"mp4v")
23
+ out = cv2.VideoWriter(temp_video_path, fourcc, cap.get(cv2.CAP_PROP_FPS),
24
  (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
25
 
26
  while cap.isOpened():
27
  ret, frame = cap.read()
28
  if not ret:
29
  break
30
+ results = model(frame) # Perform detection
31
+ annotated_frame = results[0].plot() # Get annotated frame
32
+ out.write(annotated_frame)
 
33
 
34
  cap.release()
35
  out.release()
36
  return temp_video_path
37
 
38
+ # Gradio Interface
 
 
 
 
 
 
39
  app = gr.Interface(
40
  fn=[detect_on_image, detect_on_video],
41
+ inputs=[gr.inputs.Image(type="pil", label="Upload Image"), gr.inputs.Video(type="file", label="Upload Video")],
42
+ outputs=[gr.outputs.Image(type="pil", label="Detected Image"), gr.outputs.Video(label="Detected Video")],
43
  title="WALDO30 YOLOv8 Object Detection",
44
+ description="Upload an image or video to see object detection results using the WALDO30 YOLOv8 model."
45
  )
46
 
 
47
  if __name__ == "__main__":
48
  app.launch()