import gradio as gr import numpy as np from huggingface_hub import InferenceClient client = InferenceClient("models/microsoft/trocr-base-handwritten") def sepia(input_img): sepia_filter = np.array([ [0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131] ]) sepia_img = input_img.dot(sepia_filter.T) sepia_img /= sepia_img.max() return sepia_img sepia_interface = gr.Interface(sepia, gr.Image(), "image") ## https://www.gradio.app/docs/gradio/blocks ## required positional arguments: 'inputs' and 'outputs' def process_image(image): # Your image processing logic here return "Processed Text" def additional_input(text): return f"Additional input received: {text}" with gr.Blocks() as generated_output: with gr.Row(): with gr.Column(): input_img = gr.Image(label="Input Image") clear_button = gr.Button("Clear") submit_button = gr.Button("Submit") with gr.Column(): output_text = gr.Textbox(label="Output Text") additional_text = gr.Textbox(label="Additional Input") #with gr.Blocks() as generated_output: # inp = gr.Interface(sepia, gr.Image(), "image") # out = gr.Textbox() demoApp = gr.TabbedInterface([sepia_interface, generated_output], ["RGB Sepia Filter", "Handwritten to Text"]) if __name__ == "__main__": demoApp.launch()