Spaces:
Runtime error
Runtime error
Commit
·
271251d
1
Parent(s):
f4ffaec
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,9 +4,6 @@ from sahi.prediction import ObjectPrediction
|
|
| 4 |
from sahi.utils.cv import visualize_object_predictions, read_image
|
| 5 |
from ultralyticsplus import YOLO, render_result
|
| 6 |
|
| 7 |
-
# Images
|
| 8 |
-
torch.hub.download_url_to_file('https://huggingface.co/spaces/foduucom/object_detection/samples/1.jpeg', '1.jpeg')
|
| 9 |
-
torch.hub.download_url_to_file('https://huggingface.co/spaces/foduucom/object_detection/samples/2.jpg', '2.JPG')
|
| 10 |
|
| 11 |
def yolov8_inference(
|
| 12 |
image: gr.inputs.Image = None,
|
|
@@ -30,7 +27,8 @@ def yolov8_inference(
|
|
| 30 |
model.overrides['conf'] = conf_threshold
|
| 31 |
model.overrides['iou']= iou_threshold
|
| 32 |
model.overrides['agnostic_nms'] = False # NMS class-agnostic
|
| 33 |
-
|
|
|
|
| 34 |
image = read_image(image)
|
| 35 |
results = model.predict(image)
|
| 36 |
render = render_result(model=model, image=image, result=results[0])
|
|
@@ -39,9 +37,8 @@ def yolov8_inference(
|
|
| 39 |
|
| 40 |
|
| 41 |
inputs = [
|
| 42 |
-
|
| 43 |
-
gr.
|
| 44 |
-
default="foduucom/object_detection", label="Model"),
|
| 45 |
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
|
| 46 |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
|
| 47 |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
|
|
@@ -62,4 +59,4 @@ demo_app = gr.Interface(
|
|
| 62 |
cache_examples=True,
|
| 63 |
theme='huggingface',
|
| 64 |
)
|
| 65 |
-
demo_app.launch(debug=True, enable_queue=True)
|
|
|
|
| 4 |
from sahi.utils.cv import visualize_object_predictions, read_image
|
| 5 |
from ultralyticsplus import YOLO, render_result
|
| 6 |
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
def yolov8_inference(
|
| 9 |
image: gr.inputs.Image = None,
|
|
|
|
| 27 |
model.overrides['conf'] = conf_threshold
|
| 28 |
model.overrides['iou']= iou_threshold
|
| 29 |
model.overrides['agnostic_nms'] = False # NMS class-agnostic
|
| 30 |
+
# Correct line of code
|
| 31 |
+
model.overrides['max_det'] = 999
|
| 32 |
image = read_image(image)
|
| 33 |
results = model.predict(image)
|
| 34 |
render = render_result(model=model, image=image, result=results[0])
|
|
|
|
| 37 |
|
| 38 |
|
| 39 |
inputs = [
|
| 40 |
+
# Images
|
| 41 |
+
gr.Examples(['samples/1.jpeg', 'samples/2.JPG'],),
|
|
|
|
| 42 |
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
|
| 43 |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
|
| 44 |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
|
|
|
|
| 59 |
cache_examples=True,
|
| 60 |
theme='huggingface',
|
| 61 |
)
|
| 62 |
+
demo_app.launch(debug=True, enable_queue=True)
|