from ui import * from adjust import * from set_up import prepare_input import gradio as gr with gr.Blocks() as demo: ui_lang_radio.render() ui_lang_radio.change(change_lang, inputs=ui_lang_radio, outputs=comp_to_update) top_markdown.render() with gr.Column(): with gr.Row(): with gr.Column(): input_video.render() input_video.change(get_duration, input_video, [ start_time, end_time]) # with gr.Row(): # start_time.render() # end_time.render() with gr.Column(): lang_radio.render() model_dropdown.render() summary_checkbox.render() start_button.render() start_button.click(prepare_input, [input_video, start_time, end_time, lang_radio, model_dropdown, summary_checkbox], [output_video, output_transcribe, output_file]) bottom_markdown.render() with gr.Row(equal_height=False): with gr.Column(): output_video.render() output_file.render() output_file.change(prepare_output, inputs=output_video, outputs=[ adjust_speaker, adjust_audio, prev_button, next_button, adjust_button]) with gr.Column(): output_transcribe.render() output_summary.render() middle_markdown.render() with gr.Row(equal_height=False): adjust_audio.render() adjust_speaker.render() with gr.Row(): prev_button.render() next_button.render() prev_button.click(get_speakers_previous, inputs=[adjust_speaker], outputs=[ adjust_speaker, adjust_audio]) next_button.click(get_speakers_next, inputs=[adjust_speaker], outputs=[ adjust_speaker, adjust_audio]) adjust_button.render() adjust_button.click(start_adjust, inputs=[input_video, adjust_speaker, start_time, end_time,], outputs=[ output_video, output_transcribe, output_file]) with gr.Accordion("Copyright"): gr.Markdown("Created with OpenAI Whisper and Huggingface") if __name__ == "__main__": demo.queue().launch()