File size: 2,257 Bytes
c0892d3
86f489c
c0892d3
 
 
114c1f0
 
dfe8c33
c0892d3
114c1f0
c0892d3
86f489c
 
c0892d3
 
 
c824d7a
c0892d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d334981
c0892d3
 
d334981
c0892d3
 
 
 
 
ffe4d9f
e42513a
73b78d3
c0892d3
c824d7a
c0892d3
8c6eafc
 
36ced1e
 
c0892d3
 
150fa49
23e5a52
c0892d3
5694e2c
c0892d3
 
 
 
 
e42513a
c0892d3
 
 
150fa49
c0892d3
150fa49
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import gradio as gr
#import torch
import yolov7


#
# from huggingface_hub import hf_hub_download
from huggingface_hub import HfApi


# Images
#torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
#torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
    
def yolov7_inference(
    image: gr.inputs.Image = None,
    model_path: gr.inputs.Dropdown = None,
    image_size: gr.inputs.Slider = 640,
    conf_threshold: gr.inputs.Slider = 0.25,
    iou_threshold: gr.inputs.Slider = 0.45,
):
    """
    YOLOv7 inference function
    Args:
        image: Input image
        model_path: Path to the model
        image_size: Image size
        conf_threshold: Confidence threshold
        iou_threshold: IOU threshold
    Returns:
        Rendered image
    """

    model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
    model.conf = conf_threshold
    model.iou = iou_threshold
    results = model([image], size=image_size)
    return results.render()[0]
        


inputs = [
    gr.inputs.Image(type="pil", label="Input Image"),
    gr.inputs.Dropdown(
        choices=[
            "alshimaa/model_baseline",
            "alshimaa/model_yolo7",
            #"kadirnar/yolov7-v0.1",
        ],
        default="alshimaa/model_baseline",
        label="Model",
    )
    #gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
    #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
    #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
]

outputs = gr.outputs.Image(type="filepath", label="Output Image")
title = "Smart Environmental Eye (SEE)"

examples = [['image1.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45], ['image2.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45], ['image3.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45]]
demo_app = gr.Interface(
    fn=yolov7_inference,
    inputs=inputs,
    outputs=outputs,
    title=title,
    examples=examples,
    cache_examples=True,
    theme='huggingface',
)

demo_app.launch(debug=True, enable_queue=True)