Spaces:
Sleeping
Sleeping
File size: 1,174 Bytes
caae3e0 3afe20e caae3e0 2337a39 cc1887c ff3f385 778806f 3bfbec3 72d2777 3bfbec3 6bc3d42 eea229c 6bc3d42 eea229c b7b5d68 cc1887c caae3e0 0bb8b0b caae3e0 0bb8b0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import gradio as gr
import numpy as np
from huggingface_hub import InferenceClient
client = InferenceClient("models/microsoft/trocr-base-handwritten")
def sepia(input_img):
sepia_filter = np.array([
[0.393, 0.769, 0.189],
[0.349, 0.686, 0.168],
[0.272, 0.534, 0.131]
])
sepia_img = input_img.dot(sepia_filter.T)
sepia_img /= sepia_img.max()
return sepia_img
## https://www.gradio.app/docs/gradio/blocks
## required positional arguments: 'inputs' and 'outputs'
def process_image(image):
# Your image processing logic here
return "Processed Text"
def additional_input(text):
return f"Additional input received: {text}"
sepia_interface = gr.Interface(sepia, gr.Image(), "image")
with gr.Blocks() as generated_output:
with gr.Column():
gr.Interface(sepia, gr.Image(), "image")
with gr.Row():
gr.Textbox()
#with gr.Blocks() as generated_output:
# inp = gr.Interface(sepia, gr.Image(), "image")
# out = gr.Textbox()
demoApp = gr.TabbedInterface([sepia_interface, generated_output], ["RGB Sepia Filter", "Handwritten to Text"])
if __name__ == "__main__":
demoApp.launch()
|