mbar0075 commited on
Commit
267b424
·
1 Parent(s): 7428106
Files changed (2) hide show
  1. app.py +70 -39
  2. requirements.txt +1 -1
app.py CHANGED
@@ -6,11 +6,42 @@ import supervision as sv
6
  from inference import get_model
7
 
8
  MARKDOWN = """
9
- <h1 style='text-align: center'>Detect Something 💫</h1>
10
- Welcome to Segment Something! Your on the go demo for instance segmentation. 🚀
11
-
12
- <h2 style='text-align: center'>Matthias Bartolo</h2>
13
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  Powered by Roboflow [Inference](https://github.com/roboflow/inference) and
15
  [Supervision](https://github.com/roboflow/supervision). 🔥
16
  """
@@ -21,9 +52,9 @@ IMAGE_EXAMPLES = [
21
  ['https://media.roboflow.com/supervision/image-examples/basketball-1.png', 0.3, 0.3, 0.1],
22
  ]
23
 
24
- YOLO_V8N_MODEL = get_model(model_id="yolov8n-640")
25
- YOLO_V8S_MODEL = get_model(model_id="yolov8s-640")
26
- YOLO_V8M_MODEL = get_model(model_id="yolov8m-640")
27
 
28
  LABEL_ANNOTATORS = sv.LabelAnnotator(text_color=sv.Color.black())
29
  BOUNDING_BOX_ANNOTATORS = sv.BoundingBoxAnnotator()
@@ -72,11 +103,11 @@ def process_image(
72
  iou_threshold: float
73
  ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
74
  yolo_v8_annotated_image = detect_and_annotate(
75
- YOLO_V8N_MODEL, input_image, yolo_v8_confidence_threshold, iou_threshold)
76
  yolo_v9_annotated_image = detect_and_annotate(
77
- YOLO_V8S_MODEL, input_image, yolo_v9_confidence_threshold, iou_threshold)
78
  yolo_10_annotated_image = detect_and_annotate(
79
- YOLO_V8M_MODEL, input_image, yolo_v10_confidence_threshold, iou_threshold)
80
 
81
  return (
82
  yolo_v8_annotated_image,
@@ -85,12 +116,12 @@ def process_image(
85
  )
86
 
87
 
88
- yolo_v8N_confidence_threshold_component = gr.Slider(
89
  minimum=0,
90
  maximum=1.0,
91
  value=0.3,
92
  step=0.01,
93
- label="YOLOv8N Confidence Threshold",
94
  info=(
95
  "The confidence threshold for the YOLO model. Lower the threshold to "
96
  "reduce false negatives, enhancing the model's sensitivity to detect "
@@ -98,12 +129,12 @@ yolo_v8N_confidence_threshold_component = gr.Slider(
98
  "positives, preventing the model from identifying objects it shouldn't."
99
  ))
100
 
101
- yolo_v8S_confidence_threshold_component = gr.Slider(
102
  minimum=0,
103
  maximum=1.0,
104
  value=0.3,
105
  step=0.01,
106
- label="YOLOv8S Confidence Threshold",
107
  info=(
108
  "The confidence threshold for the YOLO model. Lower the threshold to "
109
  "reduce false negatives, enhancing the model's sensitivity to detect "
@@ -111,12 +142,12 @@ yolo_v8S_confidence_threshold_component = gr.Slider(
111
  "positives, preventing the model from identifying objects it shouldn't."
112
  ))
113
 
114
- yolo_v8M_confidence_threshold_component = gr.Slider(
115
  minimum=0,
116
  maximum=1.0,
117
  value=0.3,
118
  step=0.01,
119
- label="YOLOv8M Confidence Threshold",
120
  info=(
121
  "The confidence threshold for the YOLO model. Lower the threshold to "
122
  "reduce false negatives, enhancing the model's sensitivity to detect "
@@ -143,27 +174,27 @@ with gr.Blocks() as demo:
143
  gr.Markdown(MARKDOWN)
144
  with gr.Accordion("Configuration", open=False):
145
  with gr.Row():
146
- yolo_v8N_confidence_threshold_component.render()
147
- yolo_v8S_confidence_threshold_component.render()
148
- yolo_v8M_confidence_threshold_component.render()
149
  iou_threshold_component.render()
150
  with gr.Row():
151
  input_image_component = gr.Image(
152
  type='pil',
153
  label='Input'
154
  )
155
- yolo_v8n_output_image_component = gr.Image(
156
  type='pil',
157
- label='YOLOv8N'
158
  )
159
  with gr.Row():
160
- yolo_v8s_output_image_component = gr.Image(
161
  type='pil',
162
- label='YOLOv8S'
163
  )
164
- yolo_v8m_output_image_component = gr.Image(
165
  type='pil',
166
- label='YOLOv8M'
167
  )
168
  submit_button_component = gr.Button(
169
  value='Submit',
@@ -175,15 +206,15 @@ with gr.Blocks() as demo:
175
  examples=IMAGE_EXAMPLES,
176
  inputs=[
177
  input_image_component,
178
- yolo_v8N_confidence_threshold_component,
179
- yolo_v8S_confidence_threshold_component,
180
- yolo_v8M_confidence_threshold_component,
181
  iou_threshold_component
182
  ],
183
  outputs=[
184
- yolo_v8n_output_image_component,
185
- yolo_v8s_output_image_component,
186
- yolo_v8m_output_image_component
187
  ]
188
  )
189
 
@@ -191,16 +222,16 @@ with gr.Blocks() as demo:
191
  fn=process_image,
192
  inputs=[
193
  input_image_component,
194
- yolo_v8N_confidence_threshold_component,
195
- yolo_v8S_confidence_threshold_component,
196
- yolo_v8M_confidence_threshold_component,
197
  iou_threshold_component
198
  ],
199
  outputs=[
200
- yolo_v8n_output_image_component,
201
- yolo_v8s_output_image_component,
202
- yolo_v8m_output_image_component
203
  ]
204
  )
205
 
206
- demo.launch(debug=False, show_error=True, max_threads=1)
 
6
  from inference import get_model
7
 
8
  MARKDOWN = """
9
+ <h1 style='text-align: center'>YOLO-ARENA 🏟️</h1>
10
+ Welcome to YOLO-Arena! This demo showcases the performance of various YOLO models
11
+ pre-trained on the COCO dataset.
12
+ - **YOLOv8**
13
+ <div style="display: flex; align-items: center;">
14
+ <a href="https://github.com/ultralytics/ultralytics" style="margin-right: 10px;">
15
+ <img src="https://badges.aleen42.com/src/github.svg">
16
+ </a>
17
+ <a href="https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov8-object-detection-on-custom-dataset.ipynb" style="margin-right: 10px;">
18
+ <img src="https://colab.research.google.com/assets/colab-badge.svg">
19
+ </a>
20
+ </div>
21
+ - **YOLOv9**
22
+ <div style="display: flex; align-items: center;">
23
+ <a href="https://github.com/WongKinYiu/yolov9" style="margin-right: 10px;">
24
+ <img src="https://badges.aleen42.com/src/github.svg">
25
+ </a>
26
+ <a href="https://arxiv.org/abs/2402.13616" style="margin-right: 10px;">
27
+ <img src="https://img.shields.io/badge/arXiv-2402.13616-b31b1b.svg">
28
+ </a>
29
+ <a href="https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov9-object-detection-on-custom-dataset.ipynb" style="margin-right: 10px;">
30
+ <img src="https://colab.research.google.com/assets/colab-badge.svg">
31
+ </a>
32
+ </div>
33
+ - **YOLOv10**
34
+ <div style="display: flex; align-items: center;">
35
+ <a href="https://github.com/THU-MIG/yolov10" style="margin-right: 10px;">
36
+ <img src="https://badges.aleen42.com/src/github.svg">
37
+ </a>
38
+ <a href="https://arxiv.org/abs/2405.14458" style="margin-right: 10px;">
39
+ <img src="https://img.shields.io/badge/arXiv-2405.14458-b31b1b.svg">
40
+ </a>
41
+ <a href="https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov10-object-detection-on-custom-dataset.ipynb" style="margin-right: 10px;">
42
+ <img src="https://colab.research.google.com/assets/colab-badge.svg">
43
+ </a>
44
+ </div>
45
  Powered by Roboflow [Inference](https://github.com/roboflow/inference) and
46
  [Supervision](https://github.com/roboflow/supervision). 🔥
47
  """
 
52
  ['https://media.roboflow.com/supervision/image-examples/basketball-1.png', 0.3, 0.3, 0.1],
53
  ]
54
 
55
+ YOLO_V8_MODEL = get_model(model_id="coco/8")
56
+ YOLO_V9_MODEL = get_model(model_id="coco/17")
57
+ YOLO_V10_MODEL = get_model(model_id="coco/22")
58
 
59
  LABEL_ANNOTATORS = sv.LabelAnnotator(text_color=sv.Color.black())
60
  BOUNDING_BOX_ANNOTATORS = sv.BoundingBoxAnnotator()
 
103
  iou_threshold: float
104
  ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
105
  yolo_v8_annotated_image = detect_and_annotate(
106
+ YOLO_V8_MODEL, input_image, yolo_v8_confidence_threshold, iou_threshold)
107
  yolo_v9_annotated_image = detect_and_annotate(
108
+ YOLO_V9_MODEL, input_image, yolo_v9_confidence_threshold, iou_threshold)
109
  yolo_10_annotated_image = detect_and_annotate(
110
+ YOLO_V10_MODEL, input_image, yolo_v10_confidence_threshold, iou_threshold)
111
 
112
  return (
113
  yolo_v8_annotated_image,
 
116
  )
117
 
118
 
119
+ yolo_v8_confidence_threshold_component = gr.Slider(
120
  minimum=0,
121
  maximum=1.0,
122
  value=0.3,
123
  step=0.01,
124
+ label="YOLOv8 Confidence Threshold",
125
  info=(
126
  "The confidence threshold for the YOLO model. Lower the threshold to "
127
  "reduce false negatives, enhancing the model's sensitivity to detect "
 
129
  "positives, preventing the model from identifying objects it shouldn't."
130
  ))
131
 
132
+ yolo_v9_confidence_threshold_component = gr.Slider(
133
  minimum=0,
134
  maximum=1.0,
135
  value=0.3,
136
  step=0.01,
137
+ label="YOLOv9 Confidence Threshold",
138
  info=(
139
  "The confidence threshold for the YOLO model. Lower the threshold to "
140
  "reduce false negatives, enhancing the model's sensitivity to detect "
 
142
  "positives, preventing the model from identifying objects it shouldn't."
143
  ))
144
 
145
+ yolo_v10_confidence_threshold_component = gr.Slider(
146
  minimum=0,
147
  maximum=1.0,
148
  value=0.3,
149
  step=0.01,
150
+ label="YOLOv10 Confidence Threshold",
151
  info=(
152
  "The confidence threshold for the YOLO model. Lower the threshold to "
153
  "reduce false negatives, enhancing the model's sensitivity to detect "
 
174
  gr.Markdown(MARKDOWN)
175
  with gr.Accordion("Configuration", open=False):
176
  with gr.Row():
177
+ yolo_v8_confidence_threshold_component.render()
178
+ yolo_v9_confidence_threshold_component.render()
179
+ yolo_v10_confidence_threshold_component.render()
180
  iou_threshold_component.render()
181
  with gr.Row():
182
  input_image_component = gr.Image(
183
  type='pil',
184
  label='Input'
185
  )
186
+ yolo_v8_output_image_component = gr.Image(
187
  type='pil',
188
+ label='YOLOv8'
189
  )
190
  with gr.Row():
191
+ yolo_v9_output_image_component = gr.Image(
192
  type='pil',
193
+ label='YOLOv9'
194
  )
195
+ yolo_v10_output_image_component = gr.Image(
196
  type='pil',
197
+ label='YOLOv10'
198
  )
199
  submit_button_component = gr.Button(
200
  value='Submit',
 
206
  examples=IMAGE_EXAMPLES,
207
  inputs=[
208
  input_image_component,
209
+ yolo_v8_confidence_threshold_component,
210
+ yolo_v9_confidence_threshold_component,
211
+ yolo_v10_confidence_threshold_component,
212
  iou_threshold_component
213
  ],
214
  outputs=[
215
+ yolo_v8_output_image_component,
216
+ yolo_v9_output_image_component,
217
+ yolo_v10_output_image_component
218
  ]
219
  )
220
 
 
222
  fn=process_image,
223
  inputs=[
224
  input_image_component,
225
+ yolo_v8_confidence_threshold_component,
226
+ yolo_v9_confidence_threshold_component,
227
+ yolo_v10_confidence_threshold_component,
228
  iou_threshold_component
229
  ],
230
  outputs=[
231
+ yolo_v8_output_image_component,
232
+ yolo_v9_output_image_component,
233
+ yolo_v10_output_image_component
234
  ]
235
  )
236
 
237
+ demo.launch(debug=False, show_error=True, max_threads=1)
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  setuptools<70.0.0
2
  awscli==1.29.54
3
  gradio==4.19.2
4
- inference==0.11.2
5
  supervision==0.20.0
 
1
  setuptools<70.0.0
2
  awscli==1.29.54
3
  gradio==4.19.2
4
+ inference==0.13.0
5
  supervision==0.20.0