Spaces:
Sleeping
Sleeping
Continue the rework... now mostly passable
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
import gradio as gr
|
4 |
import cv2
|
5 |
import numpy as np
|
@@ -9,25 +8,26 @@ from sahi.utils.yolov5 import download_yolov5s6_model
|
|
9 |
from sahi import AutoDetectionModel
|
10 |
from sahi.predict import get_sliced_prediction, visualize_object_predictions
|
11 |
|
|
|
|
|
|
|
|
|
|
|
12 |
|
|
|
13 |
yolov5_model_path = "best.pt"
|
14 |
download_yolov5s6_model(destination_path=yolov5_model_path)
|
15 |
detection_model = AutoDetectionModel.from_pretrained(
|
16 |
model_type="yolov5",
|
17 |
model_path=yolov5_model_path,
|
18 |
confidence_threshold=0.01,
|
19 |
-
device=
|
20 |
)
|
21 |
|
22 |
-
EXAMPLES = [
|
23 |
-
["test1.jpg"],
|
24 |
-
["test2.jpg"],
|
25 |
-
["test3.jpg"],
|
26 |
-
["test4.jpg"],
|
27 |
-
]
|
28 |
|
|
|
29 |
|
30 |
-
|
31 |
result = get_sliced_prediction(
|
32 |
image_path,
|
33 |
detection_model,
|
@@ -37,44 +37,33 @@ def do_detection(image_path, hide_labels=False):
|
|
37 |
overlap_width_ratio=0.12,
|
38 |
)
|
39 |
|
40 |
-
count
|
41 |
-
|
|
|
|
|
42 |
for i in result.object_prediction_list:
|
43 |
-
count += 1
|
44 |
score = i.score
|
45 |
value = score.value
|
46 |
category = i.category
|
47 |
category_name = category.name
|
48 |
if value > confidence_scores[category_name]:
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
56 |
text_size=1,
|
57 |
text_th=1,
|
58 |
hide_labels=hide_labels,
|
59 |
rect_th=3,
|
60 |
-
|
61 |
-
file_name="result",
|
62 |
-
export_format="png",
|
63 |
-
)
|
64 |
-
image2 = cv2.imread("/home/ubuntu/Receptacle_Detection_Demo/result.png")
|
65 |
-
img_rgb = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
|
66 |
-
|
67 |
-
class_counts = {}
|
68 |
-
|
69 |
-
predictions = new_list
|
70 |
-
for i in predictions:
|
71 |
-
category = i.category
|
72 |
-
category_name = category.name
|
73 |
-
if category_name not in class_counts:
|
74 |
-
class_counts[category_name] = 1
|
75 |
-
else:
|
76 |
-
class_counts[category_name] += 1
|
77 |
|
|
|
78 |
legend_text = "Symbols Counted:"
|
79 |
for class_name, count in class_counts.items():
|
80 |
legend_text += f" {class_name}: {count} |"
|
@@ -111,43 +100,31 @@ def do_detection(image_path, hide_labels=False):
|
|
111 |
|
112 |
img_rgb[legend_y:, legend_x:, :] = legend_bg
|
113 |
|
114 |
-
result_image_path = "/home/ubuntu/Receptacle_Detection_Demo/result_with_legend.png"
|
115 |
-
cv2.imwrite(result_image_path, img_rgb)
|
116 |
-
|
117 |
return (
|
118 |
-
|
119 |
result.to_coco_predictions(),
|
120 |
)
|
121 |
|
122 |
|
123 |
-
def
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
def update_gfci(val):
|
144 |
-
confidence_scores["Duplex - GFCI"] = val
|
145 |
-
return "updated!"
|
146 |
-
|
147 |
-
|
148 |
-
def update_gfciwp(val):
|
149 |
-
confidence_scores["Duplex - Weatherproof-GFCI"] = val
|
150 |
-
return "updated!"
|
151 |
|
152 |
|
153 |
demo = gr.Blocks()
|
@@ -167,7 +144,12 @@ with gr.Blocks(theme=theme) as demo:
|
|
167 |
interactive=True,
|
168 |
)
|
169 |
examples = gr.Examples(
|
170 |
-
examples=
|
|
|
|
|
|
|
|
|
|
|
171 |
inputs=[input_image],
|
172 |
examples_per_page=4,
|
173 |
label="Examples to use.",
|
@@ -175,42 +157,42 @@ with gr.Blocks(theme=theme) as demo:
|
|
175 |
|
176 |
hide_labels = gr.Checkbox(label="Hide labels")
|
177 |
with gr.Accordion("Visualization Confidence Thresholds", open=False):
|
178 |
-
|
179 |
minimum=0.1,
|
180 |
maximum=1,
|
181 |
value=0.53,
|
182 |
interactive=True,
|
183 |
label="Singleplex",
|
184 |
)
|
185 |
-
|
186 |
minimum=0.1,
|
187 |
maximum=1,
|
188 |
value=0.66,
|
189 |
interactive=True,
|
190 |
label="Duplex",
|
191 |
)
|
192 |
-
|
193 |
minimum=0.1,
|
194 |
maximum=1,
|
195 |
value=0.65,
|
196 |
interactive=True,
|
197 |
label="Triplex",
|
198 |
)
|
199 |
-
|
200 |
minimum=0.1,
|
201 |
maximum=1,
|
202 |
value=0.63,
|
203 |
interactive=True,
|
204 |
label="Quadruplex",
|
205 |
)
|
206 |
-
|
207 |
minimum=0.1,
|
208 |
maximum=1,
|
209 |
value=0.31,
|
210 |
interactive=True,
|
211 |
label="GFCI",
|
212 |
)
|
213 |
-
|
214 |
minimum=0.1,
|
215 |
maximum=1,
|
216 |
value=0.33,
|
@@ -218,25 +200,19 @@ with gr.Blocks(theme=theme) as demo:
|
|
218 |
label="GFCI/WP",
|
219 |
)
|
220 |
|
221 |
-
filter_name1.change(fn=update_single, inputs=filter_name1)
|
222 |
-
filter_name2.change(fn=update_duplex, inputs=filter_name2)
|
223 |
-
filter_name3.change(fn=update_triplex, inputs=filter_name3)
|
224 |
-
filter_name4.change(fn=update_quadruplex, inputs=filter_name4)
|
225 |
-
filter_name5.change(fn=update_gfci, inputs=filter_name5)
|
226 |
-
filter_name6.change(fn=update_gfciwp, inputs=filter_name6)
|
227 |
-
confidence_scores = {
|
228 |
-
"Triplex - Standard": filter_name3.value,
|
229 |
-
"Duplex - Standard": filter_name2.value,
|
230 |
-
"Singleplex - Standard": filter_name1.value,
|
231 |
-
"Duplex - GFCI": filter_name5.value,
|
232 |
-
"Duplex - Weatherproof-GFCI": filter_name6.value,
|
233 |
-
"Quadruplex - Standard": filter_name4.value,
|
234 |
-
}
|
235 |
-
|
236 |
results_button = gr.Button("Submit")
|
237 |
results_button.click(
|
238 |
-
|
239 |
-
inputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
240 |
outputs=[
|
241 |
gr.Image(type="numpy", label="Output Image"),
|
242 |
gr.Json(),
|
|
|
1 |
+
import torch
|
|
|
2 |
import gradio as gr
|
3 |
import cv2
|
4 |
import numpy as np
|
|
|
8 |
from sahi import AutoDetectionModel
|
9 |
from sahi.predict import get_sliced_prediction, visualize_object_predictions
|
10 |
|
11 |
+
# Autodetect GPU
|
12 |
+
if torch.cuda.is_available():
|
13 |
+
device = torch.device("cuda")
|
14 |
+
else:
|
15 |
+
device = torch.device("cpu")
|
16 |
|
17 |
+
# Load the model
|
18 |
yolov5_model_path = "best.pt"
|
19 |
download_yolov5s6_model(destination_path=yolov5_model_path)
|
20 |
detection_model = AutoDetectionModel.from_pretrained(
|
21 |
model_type="yolov5",
|
22 |
model_path=yolov5_model_path,
|
23 |
confidence_threshold=0.01,
|
24 |
+
device=device,
|
25 |
)
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
+
def do_detection(image_path, hide_labels, confidence_scores):
|
29 |
|
30 |
+
# Obtain detection results
|
31 |
result = get_sliced_prediction(
|
32 |
image_path,
|
33 |
detection_model,
|
|
|
37 |
overlap_width_ratio=0.12,
|
38 |
)
|
39 |
|
40 |
+
# Filter detections according to the slider and count the number of classes
|
41 |
+
# for visualization
|
42 |
+
predictions = []
|
43 |
+
class_counts = {}
|
44 |
for i in result.object_prediction_list:
|
|
|
45 |
score = i.score
|
46 |
value = score.value
|
47 |
category = i.category
|
48 |
category_name = category.name
|
49 |
if value > confidence_scores[category_name]:
|
50 |
+
predictions.append(i)
|
51 |
+
if i.category.name not in class_counts:
|
52 |
+
class_counts[i.category.name] = 1
|
53 |
+
else:
|
54 |
+
class_counts[i.category.name] += 1
|
55 |
+
|
56 |
+
# Draw the boxes and labels on top of the image
|
57 |
+
img_rgb = visualize_object_predictions(
|
58 |
+
image_path,
|
59 |
+
object_prediction_list=predictions,
|
60 |
text_size=1,
|
61 |
text_th=1,
|
62 |
hide_labels=hide_labels,
|
63 |
rect_th=3,
|
64 |
+
)["image"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
+
# Construct a legend
|
67 |
legend_text = "Symbols Counted:"
|
68 |
for class_name, count in class_counts.items():
|
69 |
legend_text += f" {class_name}: {count} |"
|
|
|
100 |
|
101 |
img_rgb[legend_y:, legend_x:, :] = legend_bg
|
102 |
|
|
|
|
|
|
|
103 |
return (
|
104 |
+
img_rgb,
|
105 |
result.to_coco_predictions(),
|
106 |
)
|
107 |
|
108 |
|
109 |
+
def call_func(
|
110 |
+
image_path,
|
111 |
+
hide_labels,
|
112 |
+
singleplex_value,
|
113 |
+
duplex_value,
|
114 |
+
triplex_value,
|
115 |
+
quadruplex_value,
|
116 |
+
gfci_value,
|
117 |
+
gfci_wp_value,
|
118 |
+
):
|
119 |
+
confidence_scores = {
|
120 |
+
"Singleplex - Standard": singleplex_value,
|
121 |
+
"Duplex - Standard": duplex_value,
|
122 |
+
"Triplex - Standard": triplex_value,
|
123 |
+
"Quadruplex - Standard": quadruplex_value,
|
124 |
+
"Duplex - GFCI": gfci_value,
|
125 |
+
"Duplex - Weatherproof-GFCI": gfci_wp_value,
|
126 |
+
}
|
127 |
+
return do_detection(image_path, hide_labels, confidence_scores)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
|
130 |
demo = gr.Blocks()
|
|
|
144 |
interactive=True,
|
145 |
)
|
146 |
examples = gr.Examples(
|
147 |
+
examples=[
|
148 |
+
["test1.jpg"],
|
149 |
+
["test2.jpg"],
|
150 |
+
["test3.jpg"],
|
151 |
+
["test4.jpg"],
|
152 |
+
],
|
153 |
inputs=[input_image],
|
154 |
examples_per_page=4,
|
155 |
label="Examples to use.",
|
|
|
157 |
|
158 |
hide_labels = gr.Checkbox(label="Hide labels")
|
159 |
with gr.Accordion("Visualization Confidence Thresholds", open=False):
|
160 |
+
singleplex_slider = gr.Slider(
|
161 |
minimum=0.1,
|
162 |
maximum=1,
|
163 |
value=0.53,
|
164 |
interactive=True,
|
165 |
label="Singleplex",
|
166 |
)
|
167 |
+
duplex_slider = gr.Slider(
|
168 |
minimum=0.1,
|
169 |
maximum=1,
|
170 |
value=0.66,
|
171 |
interactive=True,
|
172 |
label="Duplex",
|
173 |
)
|
174 |
+
triplex_slider = gr.Slider(
|
175 |
minimum=0.1,
|
176 |
maximum=1,
|
177 |
value=0.65,
|
178 |
interactive=True,
|
179 |
label="Triplex",
|
180 |
)
|
181 |
+
quadruplex_slider = gr.Slider(
|
182 |
minimum=0.1,
|
183 |
maximum=1,
|
184 |
value=0.63,
|
185 |
interactive=True,
|
186 |
label="Quadruplex",
|
187 |
)
|
188 |
+
gfci_slider = gr.Slider(
|
189 |
minimum=0.1,
|
190 |
maximum=1,
|
191 |
value=0.31,
|
192 |
interactive=True,
|
193 |
label="GFCI",
|
194 |
)
|
195 |
+
gfci_wp_slider = gr.Slider(
|
196 |
minimum=0.1,
|
197 |
maximum=1,
|
198 |
value=0.33,
|
|
|
200 |
label="GFCI/WP",
|
201 |
)
|
202 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
results_button = gr.Button("Submit")
|
204 |
results_button.click(
|
205 |
+
call_func,
|
206 |
+
inputs=[
|
207 |
+
input_image,
|
208 |
+
hide_labels,
|
209 |
+
singleplex_slider,
|
210 |
+
duplex_slider,
|
211 |
+
triplex_slider,
|
212 |
+
quadruplex_slider,
|
213 |
+
gfci_slider,
|
214 |
+
gfci_wp_slider,
|
215 |
+
],
|
216 |
outputs=[
|
217 |
gr.Image(type="numpy", label="Output Image"),
|
218 |
gr.Json(),
|