import gradio as gr import numpy as np from huggingface_hub import InferenceClient client = InferenceClient("models/microsoft/trocr-base-handwritten") def sepia(input_img): sepia_filter = np.array([ [0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131] ]) sepia_img = input_img.dot(sepia_filter.T) sepia_img /= sepia_img.max() return sepia_img ## https://www.gradio.app/docs/gradio/blocks ## required positional arguments: 'inputs' and 'outputs' def process_image(image): # Your image processing logic here return "Processed Text" def additional_input(text): return f"Additional input received: {text}" sepia_interface = gr.Interface(sepia, gr.Image(), "image") with gr.Blocks() as generated_output: #with gr.Row(): # sepia_interface #with gr.Column(): # gr.Texbox() with gr.Row(): input_img = gr.Image(label="Input Image") clear_button = gr.Button("Clear") submit_button = gr.Button("Submit") output_img = gr.Image(label="Output Image") sepia_vales_text = gr.Textbox(label="Sepia Values") submit_button.click(sepia, inputs=input_img, outputs=[sepia_img, sepia_vales_text]) #with gr.Blocks() as generated_output: # inp = gr.Interface(sepia, gr.Image(), "image") # out = gr.Textbox() demoApp = gr.TabbedInterface([sepia_interface, generated_output], ["RGB Sepia Filter", "Handwritten to Text"]) if __name__ == "__main__": demoApp.launch()