Spaces:
Sleeping
Sleeping
File size: 1,827 Bytes
caae3e0 3afe20e caae3e0 2337a39 6bf7cb4 2337a39 cc1887c ff3f385 778806f 3bfbec3 72d2777 3bfbec3 6bc3d42 0814097 31ac16a 64e1a19 0814097 64e1a19 7add9c9 6f76811 b7b5d68 cc1887c caae3e0 64e1a19 0bb8b0b 64e1a19 caae3e0 6f76811 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import gradio as gr
import numpy as np
from huggingface_hub import InferenceClient
client = InferenceClient("models/microsoft/trocr-base-handwritten")
def sepia(input_img):
sepia_filter = np.array([
[0.393, 0.769, 0.189],
[0.349, 0.686, 0.168],
[0.272, 0.534, 0.131]
])
sepia_img = input_img.dot(sepia_filter.T)
sepia_img /= sepia_img.max()
sepia_values = repr(sepia_img)
return sepia_img, sepia_values
## https://www.gradio.app/docs/gradio/blocks
## required positional arguments: 'inputs' and 'outputs'
def process_image(image):
# Your image processing logic here
return "Processed Text"
def additional_input(text):
return f"Additional input received: {text}"
sepia_interface = gr.Interface(sepia, gr.Image(), "image")
with gr.Blocks() as generated_output:
with gr.Column():
gr.Interface(fn=sepia,
inputs=gr.Image(),
outputs="image",
show_progress="minimal")
with gr.Row():
gr.Textbox(label="text")
#gr.Interface(sepia,
# inputs = gr.Image(label="image"),
# outputs = gr.Textbox())
#with gr.Blocks() as generated_output:
# inp = gr.Interface(sepia, gr.Image(), "image")
# out = gr.Textbox()
#demo = gr.TabbedInterface([sepia_interface, generated_output], ["RGB Sepia Filter", "Handwritten to Text"])
with gr.Blocks() as demo:
with gr.Row():
input_img = gr.Image(label="Input Image")
submit_button = gr.Button("Submit")
output_img = gr.Image(label="Output Image")
sepia_values_text = gr.Textbox(label="Sepia Values")
submit_button.click(sepia, inputs=input_img, outputs=[output_img, sepia_values_text])
if __name__ == "__main__":
demo.launch()
|