Spaces:
Runtime error
Runtime error
File size: 2,853 Bytes
e698260 3273372 0d5492e e698260 f9f41df e698260 bef4887 e698260 5182145 bef4887 8341dac e698260 4198c04 e698260 1d55aac a497221 1d55aac f9f41df e698260 3273372 e698260 4198c04 f9f41df e698260 1d55aac a497221 1d55aac e698260 bdec318 85b9c5e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
from ui import *
from adjust import *
from set_up import prepare_input, prepare_video_subtitle
import gradio as gr
with gr.Blocks() as demo:
ui_lang_radio.render()
ui_lang_radio.change(change_lang, inputs=ui_lang_radio,
outputs=comp_to_update)
top_markdown.render()
with gr.Column():
with gr.Row():
with gr.Column():
input_url.render()
url_download_button.render()
url_download_button.click(
get_video_from_url, input_url, input_video)
input_video.render()
# input_video.clear(clear_duration, outputs=[
# start_time, end_time])
# input_video_button.render()
# input_video_button.click(get_duration, input_video, [
# start_time, end_time])
with gr.Column():
with gr.Row():
start_time.render()
end_time.render()
lang_radio.render()
model_dropdown.render()
# summary_checkbox.render()
start_button.render()
(
start_button
.click(prepare_input, [input_video, start_time, end_time, lang_radio, model_dropdown], [output_transcribe, output_file])
.success(prepare_output, inputs=input_video, outputs=[adjust_speaker, adjust_audio, prev_button, next_button, adjust_button])
.success(prepare_video_subtitle, inputs=[input_video, start_time, end_time], outputs=output_video)
)
bottom_markdown.render()
with gr.Row(equal_height=False):
with gr.Column():
output_video.render()
output_file.render()
with gr.Column():
output_transcribe.render()
# output_summary.render()
middle_markdown.render()
with gr.Row(equal_height=False):
adjust_audio.render()
adjust_speaker.render()
with gr.Row():
prev_button.render()
next_button.render()
prev_button.click(get_speakers_previous, inputs=[adjust_speaker], outputs=[
adjust_speaker, adjust_audio])
next_button.click(get_speakers_next, inputs=[adjust_speaker], outputs=[
adjust_speaker, adjust_audio])
adjust_button.render()
(
adjust_button
.click(start_adjust, inputs=[input_video, adjust_speaker], outputs=[output_transcribe, output_file])
.success(start_adjust_subtitle, inputs=[input_video, start_time, end_time], outputs=output_video)
)
with gr.Accordion("Copyright"):
gr.Markdown("Created with OpenAI Whisper and Huggingface")
if __name__ == "__main__":
demo.queue(concurrency_count=2, max_size=2, batch=True).launch(
share=False, show_error=True)
|