File size: 5,125 Bytes
f75c597
 
 
 
78b3cfa
 
f75c597
 
fe63e0b
 
 
 
 
 
dbd5944
 
f75c597
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78b3cfa
 
f75c597
 
78b3cfa
 
f75c597
78b3cfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f75c597
 
 
 
 
 
 
 
dbd5944
 
78b3cfa
 
f75c597
 
 
 
 
 
 
 
78b3cfa
 
fe63e0b
 
78b3cfa
f75c597
 
 
 
 
 
 
 
 
dbd5944
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import gradio as gr
import torch
from sahi.prediction import ObjectPrediction
from sahi.utils.cv import visualize_object_predictions, read_image
# from ultralyticsplus import YOLO
from ultralytics import YOLO

# Images
try:
    torch.hub.download_url_to_file("https://image.jimcdn.com/app/cms/image/transf/none/path/sb7e051baffe289da/image/i98db96643a3b080e/version/1416825261/image.jpg", "mg.jpg")
except:
    torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'mg.jpg')
# torch.hub.download_url_to_file("https://ikiwiki.iki.fi/_media/jot-email-1612-fi-iki.png", "fi.jpg")
# torch.hub.download_url_to_file("https://www.geekculture.com/joyoftech/joyimages/1612.gif", "en.jpg")
# torch.hub.download_url_to_file('https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg', 'highway1.jpg')
# torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')

def yolov8_inference(
    image: gr.inputs.Image = None,
    model_path: gr.inputs.Dropdown = None,
    image_size: gr.inputs.Slider = 640,
    conf_threshold: gr.inputs.Slider = 0.25,
    iou_threshold: gr.inputs.Slider = 0.45,
):
    """
    YOLOv8 inference function
    Args:
        image: Input image
        model_path: Path to the model
        image_size: Image size
        conf_threshold: Confidence threshold
        iou_threshold: IOU threshold
    Returns:
        Rendered image
    """
    # model = YOLO(""+model_path+"/train/weights/best.onnx", task="detect")
    model = YOLO("https://huggingface.co/"+model_path+"/resolve/main/train/weights/best.onnx", task="detect")
    model.conf = conf_threshold
    model.iou = iou_threshold
    # results = model.predict(image, imgsz=image_size, return_outputs=True)
    results = model.predict(image)
    object_prediction_list = []
    print("*", len(results))
    for _box in results:
        for box in _box:
            xyxy = [int(x) for x in box.boxes.xyxy[0]]
            conf = float(box.boxes.conf[0])
            cls = int(box.boxes.cls[0])
            label = box.names[cls]
        #label = list(map(lambda x: box.names[int(x)], cls))
        #for xyxy, conf, cls, label in zip(xyxy,conf,cls,label):
            object_prediction_list.append(
                    ObjectPrediction(
                        bbox=xyxy,
                        category_id=cls,
                        score=conf,
                        category_name=label,
                        )
                    )
    print(object_prediction_list)

    # for _, image_results in enumerate(results):
    #     if len(image_results)!=0:
    #         image_predictions_in_xyxy_format = image_results['det']
    #         for pred in image_predictions_in_xyxy_format:
    #             x1, y1, x2, y2 = (
    #                 int(pred[0]),
    #                 int(pred[1]),
    #                 int(pred[2]),
    #                 int(pred[3]),
    #             )
    #             bbox = [x1, y1, x2, y2]
    #             score = pred[4]
    #             category_name = model.model.names[int(pred[5])]
    #             category_id = pred[5]
    #             object_prediction = ObjectPrediction(
    #                 bbox=bbox,
    #                 category_id=int(category_id),
    #                 score=score,
    #                 category_name=category_name,
    #             )
    #             object_prediction_list.append(object_prediction)

    image = read_image(image)
    output_image = visualize_object_predictions(image=image, object_prediction_list=object_prediction_list)
    return output_image['image']
        

inputs = [
    gr.inputs.Image(type="filepath", label="Input Image"),
    # gr.inputs.Dropdown(["kadirnar/yolov8n-v8.0", "kadirnar/yolov8m-v8.0", "kadirnar/yolov8l-v8.0", "kadirnar/yolov8x-v8.0", "kadirnar/yolov8x6-v8.0"], 
    #                    default="kadirnar/yolov8m-v8.0", label="Model"),
    # gr.inputs.Dropdown(["jongkook90/yolov8_comicbook"], default="jongkook90/yolov8_comicbook", label="Model"),
    gr.inputs.Dropdown(["jongkook90/yolov8_comicbook"], default="jongkook90/yolov8_comicbook", label="Model"),
    gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
    gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
    gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
]

outputs = gr.outputs.Image(type="filepath", label="Output Image")
title = "Ultralytics YOLOv8: State-of-the-Art YOLO Models"

examples = [
        ['mg.jpg', 'jongkook90/yolov8_comicbook', 640, 0.25, 0.45],
        #['fi.jpg', 'jongkook90/yolov8_comicbook', 640, 0.25, 0.45],
        #['en.jpg', 'jongkook90/yolov8_comicbook', 640, 0.25, 0.45],
        ] 
demo_app = gr.Interface(
    fn=yolov8_inference,
    inputs=inputs,
    outputs=outputs,
    title=title,
    examples=examples,
    cache_examples=True,
    theme='huggingface',
)
demo_app.launch(debug=True, enable_queue=True)