chompionsawelo commited on
Commit
3273372
·
1 Parent(s): 35ce1cb

order change

Browse files
Files changed (2) hide show
  1. app.py +6 -3
  2. set_up.py +8 -6
app.py CHANGED
@@ -1,6 +1,6 @@
1
  from ui import *
2
  from adjust import *
3
- from set_up import prepare_input
4
  import gradio as gr
5
 
6
  with gr.Blocks() as demo:
@@ -30,15 +30,18 @@ with gr.Blocks() as demo:
30
  start_button.click(prepare_input,
31
  [input_video, start_time, end_time, lang_radio,
32
  model_dropdown],
33
- [output_video, output_transcribe, output_file])
34
 
35
  bottom_markdown.render()
36
  with gr.Row(equal_height=False):
37
  with gr.Column():
38
  output_video.render()
39
  output_file.render()
40
- output_file.change(prepare_output, inputs=output_video, outputs=[
 
 
41
  adjust_speaker, adjust_audio, prev_button, next_button, adjust_button])
 
42
  with gr.Column():
43
  output_transcribe.render()
44
  # output_summary.render()
 
1
  from ui import *
2
  from adjust import *
3
+ from set_up import prepare_input, prepare_video_subtitle
4
  import gradio as gr
5
 
6
  with gr.Blocks() as demo:
 
30
  start_button.click(prepare_input,
31
  [input_video, start_time, end_time, lang_radio,
32
  model_dropdown],
33
+ [output_transcribe, output_file])
34
 
35
  bottom_markdown.render()
36
  with gr.Row(equal_height=False):
37
  with gr.Column():
38
  output_video.render()
39
  output_file.render()
40
+ output_file.change(prepare_video_subtitle, inputs=[
41
+ input_video, start_time, end_time], outputs=output_video)
42
+ output_video.change(prepare_output, inputs=output_video, outputs=[
43
  adjust_speaker, adjust_audio, prev_button, next_button, adjust_button])
44
+
45
  with gr.Column():
46
  output_transcribe.render()
47
  # output_summary.render()
set_up.py CHANGED
@@ -9,6 +9,14 @@ import os
9
  import utils
10
 
11
 
 
 
 
 
 
 
 
 
12
  def prepare_input(input_file, start_time, end_time, lang, model_size, progress=gr.Progress()):
13
  gr.Info(ui_lang["progress_starting_process"])
14
 
@@ -45,17 +53,11 @@ def prepare_input(input_file, start_time, end_time, lang, model_size, progress=g
45
  progress(0.6, desc=ui_lang["progress_transcribing_audio"])
46
  start_transcribe(input_file, lang, model_size, progress)
47
 
48
- # Add subtitle to video
49
- progress(0.8, desc=ui_lang["progress_add_subtitle"])
50
- add_subtitle_to_video(input_file, base_subtitle_file,
51
- video_subtitle_file, start_time, end_time)
52
-
53
  # Return video file link, transcribe string, transcribe.txt, subtitle.txt
54
  transcribe_txt_list, _ = utils.read_transcribe_subtitle_file(
55
  input_file, False)
56
  transcribe_txt = "\n".join(transcribe_txt_list)
57
  return [
58
- video_subtitle_file,
59
  transcribe_txt,
60
  [transcribe_file, subtitle_file]
61
  ]
 
9
  import utils
10
 
11
 
12
+ def prepare_video_subtitle(input_file, start_time, end_time, progress=gr.Progress()):
13
+ # Add subtitle to video
14
+ progress(0.8, desc=ui_lang["progress_add_subtitle"])
15
+ add_subtitle_to_video(input_file, base_subtitle_file,
16
+ video_subtitle_file, start_time, end_time)
17
+ return video_subtitle_file
18
+
19
+
20
  def prepare_input(input_file, start_time, end_time, lang, model_size, progress=gr.Progress()):
21
  gr.Info(ui_lang["progress_starting_process"])
22
 
 
53
  progress(0.6, desc=ui_lang["progress_transcribing_audio"])
54
  start_transcribe(input_file, lang, model_size, progress)
55
 
 
 
 
 
 
56
  # Return video file link, transcribe string, transcribe.txt, subtitle.txt
57
  transcribe_txt_list, _ = utils.read_transcribe_subtitle_file(
58
  input_file, False)
59
  transcribe_txt = "\n".join(transcribe_txt_list)
60
  return [
 
61
  transcribe_txt,
62
  [transcribe_file, subtitle_file]
63
  ]