import gradio as gr import numpy as np from huggingface_hub import InferenceClient client = InferenceClient("models/microsoft/trocr-base-handwritten") def sepia(input_img): sepia_filter = np.array([ [0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131] ]) sepia_img = input_img.dot(sepia_filter.T) sepia_img /= sepia_img.max() sepia_values = repr(sepia_img) return sepia_img, sepia_values ## https://www.gradio.app/docs/gradio/blocks ## required positional arguments: 'inputs' and 'outputs' def process_image(image): # Your image processing logic here return "Processed Text" def additional_input(text): return f"Additional input received: {text}" sepia_interface = gr.Interface(sepia, gr.Image(), "image") with gr.Blocks() as generated_output: with gr.Column(): gr.Interface(fn=sepia, inputs=gr.Image(), outputs="image", show_progress="minimal") with gr.Row(): gr.Textbox(label="text") #gr.Interface(sepia, # inputs = gr.Image(label="image"), # outputs = gr.Textbox()) #with gr.Blocks() as generated_output: # inp = gr.Interface(sepia, gr.Image(), "image") # out = gr.Textbox() #demo = gr.TabbedInterface([sepia_interface, generated_output], ["RGB Sepia Filter", "Handwritten to Text"]) with gr.Blocks() as demo: with gr.Row(): input_img = gr.Image(label="Input Image") submit_button = gr.Button("Submit") output_img = gr.Image(label="Output Image") sepia_values_text = gr.Textbox(label="Sepia Values") submit_button.click(sepia, inputs=input_img, outputs=[output_img, sepia_values_text]) if __name__ == "__main__": demo.launch()