File size: 1,804 Bytes
961927f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
626c0bb
961927f
 
 
0de3299
 
739d458
 
0de3299
739d458
0de3299
961927f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334ccb5
961927f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import gradio as gr
import numpy as np
import os

from hugsvision.inference.TorchVisionClassifierInference import TorchVisionClassifierInference

models_name = [
    "VGG16",
    "ShuffleNetV2",
    "mobilenet_v2"
]

colname = "mobilenet_v2"

radio = gr.inputs.Radio(models_name, default="mobilenet_v2", type="value", label=colname)
print(radio.label)

def predict_image(image):
    image = np.array(image) / 255
    image = np.expand_dims(image, axis=0)

    classifier = TorchVisionClassifierInference(
        model_path = "./models/" + colname + "/best_model.pth",
    )

    pred = classifier.predict(img=image)

    label2id = json.load(open("./models/" + colname + "/best_model.pth"))["label2id"].keys()
    # vec = [100.0 if a.lower() == pred.lower() else 0.00 for a in label2id]
    acc = dict((label2id[i], "%.2f" % 100.0 if label2id[i].lower() == pred.lower() else 0.0) for i in range(len(label2id)))
    
    return acc
    # return pred

# open categories.txt in read mode
categories = open("categories.txt", "r")
labels = categories.readline().split(";")

image = gr.inputs.Image(shape=(300, 300), label="Upload Your Image Here")
label = gr.outputs.Label(num_top_classes=len(labels))

samples = ['./samples/basking.jpg', './samples/blacktip.jpg']
# , './samples/blacktip.jpg', './samples/blue.jpg', './samples/bull.jpg', './samples/hammerhead.jpg',
#         './samples/lemon.jpg', './samples/mako.jpg', './samples/nurse.jpg', './samples/sand tiger.jpg', './samples/thresher.jpg', 
#         './samples/tigre.jpg', './samples/whale.jpg', './samples/white.jpg', './samples/whitetip.jpg']
        
interface = gr.Interface(
    fn=predict_image, 
    inputs=[image, radio], 
    outputs=label, 
    capture_session=True, 
    allow_flagging=False, 
    # examples=samples
)
interface.launch()