File size: 2,688 Bytes
e698260
 
3273372
0d5492e
 
e698260
 
 
 
f9f41df
e698260
 
 
 
bef4887
 
 
 
e698260
8341dac
 
bef4887
8341dac
 
 
e698260
 
4198c04
e698260
 
 
11bd532
3273372
f9f41df
e698260
 
 
 
 
3273372
 
 
e698260
3273372
e698260
 
4198c04
f9f41df
e698260
 
 
 
 
 
 
 
 
 
 
 
 
581b947
e698260
 
 
 
bdec318
 
8341dac
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
from ui import *
from adjust import *
from set_up import prepare_input, prepare_video_subtitle
import gradio as gr

with gr.Blocks() as demo:
    ui_lang_radio.render()
    ui_lang_radio.change(change_lang, inputs=ui_lang_radio,
                         outputs=comp_to_update)

    top_markdown.render()
    with gr.Column():
        with gr.Row():
            with gr.Column():
                input_url.render()
                url_download_button.render()
                url_download_button.click(
                    get_video_from_url, input_url, input_video)
                input_video.render()
                input_video.change(get_duration, input_video, [
                    start_time, end_time])
            with gr.Column():
                with gr.Row():
                    start_time.render()
                    end_time.render()
                lang_radio.render()
                model_dropdown.render()
                # summary_checkbox.render()
        start_button.render()
        start_button.click(prepare_input,
                           [input_video, start_time, end_time, lang_radio,
                               model_dropdown],
                           [output_transcribe, output_file])

    bottom_markdown.render()
    with gr.Row(equal_height=False):
        with gr.Column():
            output_video.render()
            output_file.render()
            output_file.change(prepare_video_subtitle, inputs=[
                               input_video, start_time, end_time], outputs=output_video)
            output_video.change(prepare_output, inputs=output_video, outputs=[
                adjust_speaker, adjust_audio, prev_button, next_button, adjust_button])

        with gr.Column():
            output_transcribe.render()
            # output_summary.render()

    middle_markdown.render()
    with gr.Row(equal_height=False):
        adjust_audio.render()
        adjust_speaker.render()
    with gr.Row():
        prev_button.render()
        next_button.render()
        prev_button.click(get_speakers_previous, inputs=[adjust_speaker], outputs=[
                          adjust_speaker, adjust_audio])
        next_button.click(get_speakers_next, inputs=[adjust_speaker], outputs=[
                          adjust_speaker, adjust_audio])

    adjust_button.render()
    adjust_button.click(start_adjust, inputs=[input_video, adjust_speaker, start_time, end_time,], outputs=[
                        output_video, output_transcribe, output_file])

    with gr.Accordion("Copyright"):
        gr.Markdown("Created with OpenAI Whisper and Huggingface")

if __name__ == "__main__":
    demo.queue().launch(share=False, show_error=True)