import gradio as gr import numpy as np from PIL import Image import os import json from hugsvision.inference.TorchVisionClassifierInference import TorchVisionClassifierInference models_name = [ "VGG16", "ShuffleNetV2", "mobilenet_v2" ] colname = "mobilenet_v2" radio = gr.inputs.Radio(models_name, default="mobilenet_v2", type="value", label=colname) print(radio.label) def predict_image(image, model_name): image = Image.fromarray(np.uint8(image)).convert('RGB') print("======================") print(type(image)) print(type(model_name)) print("==========") print(image) print(model_name) print("======================") # image = np.array(image) / 255 # image = np.expand_dims(image, axis=0) classifier = TorchVisionClassifierInference( model_path = "./models/" + colname, ) pred = classifier.predict_image(img=image) acc = dict((labels[i], 0.0) for i in range(len(labels))) acc[pred] = 100.0 return acc # return pred # open categories.txt in read mode categories = open("categories.txt", "r") labels = categories.readline().split(";") image = gr.inputs.Image(shape=(300, 300), label="Upload Your Image Here") print(image) label = gr.outputs.Label(num_top_classes=len(labels)) samples = ['./samples/basking.jpg', './samples/blacktip.jpg'] # , './samples/blacktip.jpg', './samples/blue.jpg', './samples/bull.jpg', './samples/hammerhead.jpg', # './samples/lemon.jpg', './samples/mako.jpg', './samples/nurse.jpg', './samples/sand tiger.jpg', './samples/thresher.jpg', # './samples/tigre.jpg', './samples/whale.jpg', './samples/white.jpg', './samples/whitetip.jpg'] interface = gr.Interface( fn=predict_image, inputs=[image, radio], outputs=label, capture_session=True, allow_flagging=False, # examples=samples ) interface.launch()