Spaces:
Runtime error
Runtime error
File size: 1,917 Bytes
961927f f3b2340 961927f f3b2340 961927f f3b2340 961927f f3b2340 961927f f3b2340 d7b7621 0de3299 c42ba3e d7b7621 739d458 0de3299 961927f f3b2340 961927f 334ccb5 961927f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import gradio as gr
import numpy as np
from PIL import Image
import os
import json
from hugsvision.inference.TorchVisionClassifierInference import TorchVisionClassifierInference
models_name = [
"VGG16",
"ShuffleNetV2",
"mobilenet_v2"
]
colname = "mobilenet_v2"
radio = gr.inputs.Radio(models_name, default="mobilenet_v2", type="value", label=colname)
print(radio.label)
def predict_image(image, model_name):
image = Image.fromarray(np.uint8(image)).convert('RGB')
print("======================")
print(type(image))
print(type(model_name))
print("==========")
print(image)
print(model_name)
print("======================")
# image = np.array(image) / 255
# image = np.expand_dims(image, axis=0)
classifier = TorchVisionClassifierInference(
model_path = "./models/" + colname,
)
pred = classifier.predict_image(img=image)
print(pred)
acc = dict((labels[i], 0.0) for i in range(len(labels)))
acc[pred] = 100.0
print(acc)
return acc
# return pred
# open categories.txt in read mode
categories = open("categories.txt", "r")
labels = categories.readline().split(";")
image = gr.inputs.Image(shape=(300, 300), label="Upload Your Image Here")
print(image)
label = gr.outputs.Label(num_top_classes=len(labels))
samples = ['./samples/basking.jpg', './samples/blacktip.jpg']
# , './samples/blacktip.jpg', './samples/blue.jpg', './samples/bull.jpg', './samples/hammerhead.jpg',
# './samples/lemon.jpg', './samples/mako.jpg', './samples/nurse.jpg', './samples/sand tiger.jpg', './samples/thresher.jpg',
# './samples/tigre.jpg', './samples/whale.jpg', './samples/white.jpg', './samples/whitetip.jpg']
interface = gr.Interface(
fn=predict_image,
inputs=[image, radio],
outputs=label,
capture_session=True,
allow_flagging=False,
# examples=samples
)
interface.launch() |