File size: 1,167 Bytes
2b3f2fb
 
 
 
 
 
 
e9c88a9
 
2b3f2fb
 
 
 
 
 
 
 
 
 
 
 
 
 
e9c88a9
 
 
2b3f2fb
 
4f3206e
 
e9c88a9
 
 
 
 
 
 
 
2b3f2fb
 
32427ff
67aeb1b
e9c88a9
1fd98e2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import pandas as pd
import PIL
from PIL import Image
from PIL import ImageDraw
import gradio as gr
import torch
import easyocr
import io
import base64

def draw_boxes(image, bounds, color='red', width=2):
    draw = ImageDraw.Draw(image)
    for bound in bounds:
        p0, p1, p2, p3 = bound[0]
        draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
    return image

def inference(img, lang):
    reader = easyocr.Reader(lang)
    bounds = reader.readtext(img.name)
    im = PIL.Image.open(img.name)
    draw_boxes(im, bounds)
    im.save('result.jpg')

    #result_buffer = io.BytesIO()
    
    return ['result.jpg', pd.DataFrame(bounds).iloc[: , 1:]]

choices = ["en", "vi"] 

#gr.Interface(
#    inference,
#    inputs=[gr.Image(label='Input'),
#            gr.CheckboxGroup(choices, type="value", label='language')],
#    outputs=[gr.Image(label='Output'), 
#             gr.Dataframe(headers=['text', 'confidence'])]
#    ).launch(debug=True)

gr.Interface(
    inference,
    inputs=[gr.Image(label='Input'),
            gr.CheckboxGroup(choices, type="value", label='language')],
    outputs=gr.Image(label='Output')
    ).launch(debug=True)