File size: 1,518 Bytes
caae3e0
3afe20e
caae3e0
 
 
 
2337a39
 
 
 
 
 
 
 
 
 
cc1887c
ff3f385
778806f
3bfbec3
 
 
 
 
 
 
72d2777
 
3bfbec3
ba16445
 
 
 
3bfbec3
ba16445
 
 
aa9d67d
ba16445
63a3fc9
b7b5d68
 
 
 
cc1887c
caae3e0
0bb8b0b
 
caae3e0
0bb8b0b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import gradio as gr
import numpy as np
from huggingface_hub import InferenceClient

client = InferenceClient("models/microsoft/trocr-base-handwritten")

def sepia(input_img):
    sepia_filter = np.array([
        [0.393, 0.769, 0.189],
        [0.349, 0.686, 0.168],
        [0.272, 0.534, 0.131]
    ])
    sepia_img = input_img.dot(sepia_filter.T)
    sepia_img /= sepia_img.max()
    return sepia_img


## https://www.gradio.app/docs/gradio/blocks
## required positional arguments: 'inputs' and 'outputs'
def process_image(image):
    # Your image processing logic here
    return "Processed Text"

def additional_input(text):
    return f"Additional input received: {text}"

sepia_interface = gr.Interface(sepia, gr.Image(), "image")

with gr.Blocks() as generated_output:
    #with gr.Row():
    #    sepia_interface
    #with gr.Column():
    #    gr.Texbox()
    with gr.Row():
        input_img = gr.Image(label="Input Image")
        clear_button = gr.Button("Clear")
        submit_button = gr.Button("Submit")
        output_img = gr.Image(sepia(input_img), label="Output Image")
        sepia_vales_text = gr.Textbox(label="Sepia Values")
    submit_button.click(sepia, inputs=input_img, outputs=[output_img, sepia_vales_text])

#with gr.Blocks() as generated_output:
#    inp = gr.Interface(sepia, gr.Image(), "image")
#    out = gr.Textbox()


demoApp = gr.TabbedInterface([sepia_interface, generated_output], ["RGB Sepia Filter", "Handwritten to Text"])

if __name__ == "__main__":
    demoApp.launch()