Spaces:
Runtime error
Runtime error
Commit
·
037139b
1
Parent(s):
f591895
Update app.py
Browse files
app.py
CHANGED
@@ -1,86 +1,105 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
|
4 |
-
|
5 |
-
from ultralyticsplus import YOLO, render_result
|
6 |
|
|
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
conf_threshold: gr.inputs.Slider = 0.25,
|
13 |
-
iou_threshold: gr.inputs.Slider = 0.45,
|
14 |
-
):
|
15 |
-
"""
|
16 |
-
YOLOv8 inference function
|
17 |
-
Args:
|
18 |
-
image: Input image
|
19 |
-
model_path: Path to the model
|
20 |
-
image_size: Image size
|
21 |
-
conf_threshold: Confidence threshold
|
22 |
-
iou_threshold: IOU threshold
|
23 |
-
Returns:
|
24 |
-
Rendered image
|
25 |
-
"""
|
26 |
-
model = YOLO(model_path)
|
27 |
-
model.overrides['conf'] = conf_threshold
|
28 |
-
model.overrides['iou']= iou_threshold
|
29 |
-
model.overrides['agnostic_nms'] = False # NMS class-agnostic
|
30 |
-
model.overrides['max_det'] = 999
|
31 |
-
image = read_image(image)
|
32 |
-
results = model.predict(image)
|
33 |
-
render = render_result(model=model, image=image, result=results[0])
|
34 |
-
|
35 |
-
return render
|
36 |
|
37 |
-
|
38 |
-
"""
|
39 |
-
Gradio postprocess function
|
40 |
-
Args:
|
41 |
-
image: Input image
|
42 |
-
Returns:
|
43 |
-
Processed image
|
44 |
-
"""
|
45 |
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
48 |
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
-
|
53 |
-
|
|
|
54 |
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
'samples/2.JPG',
|
63 |
-
],
|
64 |
-
inputs={'postprocess': gr_postprocess(inputs)},
|
65 |
-
),
|
66 |
-
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
|
67 |
-
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
|
68 |
-
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
|
69 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
|
72 |
-
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
)
|
86 |
-
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import requests
|
4 |
+
import os
|
|
|
5 |
|
6 |
+
from ultralytics import YOLO
|
7 |
|
8 |
+
file_urls = [
|
9 |
+
torch.hub.download_url_to_file('https://huggingface.co/spaces/foduucom/object_detection/tree/main/samples/1.jpeg', '1.jpg'),
|
10 |
+
torch.hub.download_url_to_file('https://huggingface.co/spaces/foduucom/object_detection/tree/main/samples/2.JPG', '2.jpg'),
|
11 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
def download_file(url, save_name):
|
16 |
+
url = url
|
17 |
+
if not os.path.exists(save_name):
|
18 |
+
file = requests.get(url)
|
19 |
+
open(save_name, 'wb').write(file.content)
|
20 |
|
21 |
+
for i, url in enumerate(file_urls):
|
22 |
+
if 'mp4' in file_urls[i]:
|
23 |
+
download_file(
|
24 |
+
file_urls[i],
|
25 |
+
f"video.mp4"
|
26 |
+
)
|
27 |
+
else:
|
28 |
+
download_file(
|
29 |
+
file_urls[i],
|
30 |
+
f"image_{i}.jpg"
|
31 |
+
)
|
32 |
|
33 |
+
model = YOLO('best.pt')
|
34 |
+
path = [['image_0.jpg'], ['image_1.jpg']]
|
35 |
+
video_path = [['video.mp4']]
|
36 |
|
37 |
+
def show_preds_image(image_path):
|
38 |
+
image = cv2.imread(image_path)
|
39 |
+
outputs = model.predict(source=image_path)
|
40 |
+
results = outputs[0].cpu().numpy()
|
41 |
+
for i, det in enumerate(results.boxes.xyxy):
|
42 |
+
cv2.rectangle(
|
43 |
+
image,
|
44 |
+
(int(det[0]), int(det[1])),
|
45 |
+
(int(det[2]), int(det[3])),
|
46 |
+
color=(0, 0, 255),
|
47 |
+
thickness=2,
|
48 |
+
lineType=cv2.LINE_AA
|
49 |
+
)
|
50 |
+
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
51 |
|
52 |
+
inputs_image = [
|
53 |
+
gr.components.Image(type="filepath", label="Input Image"),
|
54 |
+
]
|
55 |
+
outputs_image = [
|
56 |
+
gr.components.Image(type="numpy", label="Output Image"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
]
|
58 |
+
interface_image = gr.Interface(
|
59 |
+
fn=show_preds_image,
|
60 |
+
inputs=inputs_image,
|
61 |
+
outputs=outputs_image,
|
62 |
+
title="Pothole detector app",
|
63 |
+
examples=path,
|
64 |
+
cache_examples=False,
|
65 |
+
)
|
66 |
+
|
67 |
+
def show_preds_video(video_path):
|
68 |
+
cap = cv2.VideoCapture(video_path)
|
69 |
+
while(cap.isOpened()):
|
70 |
+
ret, frame = cap.read()
|
71 |
+
if ret:
|
72 |
+
frame_copy = frame.copy()
|
73 |
+
outputs = model.predict(source=frame)
|
74 |
+
results = outputs[0].cpu().numpy()
|
75 |
+
for i, det in enumerate(results.boxes.xyxy):
|
76 |
+
cv2.rectangle(
|
77 |
+
frame_copy,
|
78 |
+
(int(det[0]), int(det[1])),
|
79 |
+
(int(det[2]), int(det[3])),
|
80 |
+
color=(0, 0, 255),
|
81 |
+
thickness=2,
|
82 |
+
lineType=cv2.LINE_AA
|
83 |
+
)
|
84 |
+
yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
|
85 |
|
86 |
+
inputs_video = [
|
87 |
+
gr.components.Video(type="filepath", label="Input Video"),
|
88 |
|
89 |
+
]
|
90 |
+
outputs_video = [
|
91 |
+
gr.components.Image(type="numpy", label="Output Image"),
|
92 |
+
]
|
93 |
+
interface_video = gr.Interface(
|
94 |
+
fn=show_preds_video,
|
95 |
+
inputs=inputs_video,
|
96 |
+
outputs=outputs_video,
|
97 |
+
title="Pothole detector",
|
98 |
+
examples=video_path,
|
99 |
+
cache_examples=False,
|
100 |
)
|
101 |
+
|
102 |
+
gr.TabbedInterface(
|
103 |
+
[interface_image, interface_video],
|
104 |
+
tab_names=['Image inference', 'Video inference']
|
105 |
+
).queue().launch()
|