File size: 1,928 Bytes
1cdd82e
 
 
 
 
 
4ff08e8
1cdd82e
4ff08e8
34f0f30
1cdd82e
 
 
 
 
 
 
 
 
 
 
 
 
4ff08e8
 
1cdd82e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ff08e8
1cdd82e
 
 
 
 
 
34f0f30
 
 
1cdd82e
 
 
 
 
 
 
 
 
 
 
d789f3d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
import os
import torch
import json
import yoloxdetect2.helpers as yoloxdetect

#model = yoloxdetect.YoloxDetector2('./dataset/yolox_s.pth', 'configs.yolox_s', device="cpu", hf_model=True)
model = yoloxdetect.YoloxDetector2('kadirnar/yolox_s-v0.1.1', 'configs.yolox_s', device="cpu", hf_model=True)

image_size = 640

def yolox_inference(
    image_path: gr.inputs.Image = None,
):
    """
    YOLOX inference function
    Args:
        image: Input image
    Returns:
        Rendered image
    """

    pred2 = []
    if image_path is not None :
        print(image_path)
        model.torchyolo = True
        pred2 = model.predict(image_path=image_path, image_size=image_size)

    
    tensor = {
      "tensorflow": [ 
      ]
    }

    if pred2 is not None:
        for i, element in enumerate(pred2[0]):
            object = {}
            itemclass = round(pred2[2][i].item())
            object["classe"] = itemclass
            object["nome"] = pred2[3][itemclass]
            object["score"] = pred2[1][i].item()
            object["x"] = element[0].item()
            object["y"] = element[1].item()
            object["w"] = element[2].item()
            object["h"] = element[3].item()
            tensor["tensorflow"].append(object)
  

    text = json.dumps(tensor)
    return text
        

inputs = [
    gr.inputs.Image(type="pil", label="Input Image"),
]

outputs = gr.outputs.Image(type="filepath", label="Output Image")
title = "SIMULADOR PARA RECONHECIMENTO DE IMAGEM"

examples = [
    ["small-vehicles1.jpeg"],
    ["zidane.jpg"],
    ["dog.jpg"],
]

demo_app = gr.Interface(
    fn=yolox_inference,
    inputs=inputs,
    outputs=["text"],
    title=title,
    examples=examples,
    cache_examples=True,
    live=True,
)
demo_app.launch(debug=True, server_name="192.168.0.153", server_port=8080, enable_queue=True)
#demo_app.launch(debug=True, server_port=8083, enable_queue=True)