cocktailpeanut commited on
Commit
ff1c7b7
·
1 Parent(s): d9bc8b5
Files changed (1) hide show
  1. app_locally.py +15 -11
app_locally.py CHANGED
@@ -14,7 +14,8 @@ ckpt_converter = 'checkpoints/converter'
14
  tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device)
15
  tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth')
16
 
17
- def predict(prompt, style, audio_file_pth, mic_file_path, use_mic, language):
 
18
  # initialize a empty info
19
  text_hint = ''
20
 
@@ -70,7 +71,8 @@ def predict(prompt, style, audio_file_pth, mic_file_path, use_mic, language):
70
 
71
  speed = 1.0
72
 
73
- tts_model.tts_to_file(prompt, speaker_id, src_path, speaker=style, language=language)
 
74
 
75
  save_path = f'{output_dir}/output.wav'
76
  # Run the tone color converter
@@ -129,13 +131,13 @@ with gr.Blocks(analytics_enabled=False) as demo:
129
  info="One or two sentences at a time is better. Up to 200 text characters.",
130
  value="He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered, flour-fattened sauce.",
131
  )
132
- style_gr = gr.Dropdown(
133
- label="Style",
134
- info="Select a style of output audio for the synthesised speech. (Chinese only support 'default' now)",
135
- choices=['default', 'whispering', 'cheerful', 'terrified', 'angry', 'sad', 'friendly'],
136
- max_choices=1,
137
- value="default",
138
- )
139
  ref_gr = gr.Audio(
140
  label="Reference Audio",
141
  info="Click on the ✎ button to upload your own target speaker audio",
@@ -165,11 +167,13 @@ with gr.Blocks(analytics_enabled=False) as demo:
165
 
166
  gr.Examples(examples,
167
  label="Examples",
168
- inputs=[input_text_gr, style_gr, ref_gr, mic_gr, use_mic_gr, language],
 
169
  outputs=[out_text_gr, audio_gr, ref_audio_gr],
170
  fn=predict,
171
  cache_examples=False,)
172
- tts_button.click(predict, [input_text_gr, style_gr, ref_gr, mic_gr, use_mic_gr, language], outputs=[out_text_gr, audio_gr, ref_audio_gr])
 
173
 
174
  demo.queue()
175
  demo.launch(debug=True, show_api=True)
 
14
  tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device)
15
  tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth')
16
 
17
+ #def predict(prompt, style, audio_file_pth, mic_file_path, use_mic, language):
18
+ def predict(prompt, audio_file_pth, mic_file_path, use_mic, language):
19
  # initialize a empty info
20
  text_hint = ''
21
 
 
71
 
72
  speed = 1.0
73
 
74
+ #tts_model.tts_to_file(prompt, speaker_id, src_path, speaker=style, language=language)
75
+ tts_model.tts_to_file(prompt, speaker_id, src_path, language=language)
76
 
77
  save_path = f'{output_dir}/output.wav'
78
  # Run the tone color converter
 
131
  info="One or two sentences at a time is better. Up to 200 text characters.",
132
  value="He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered, flour-fattened sauce.",
133
  )
134
+ #style_gr = gr.Dropdown(
135
+ # label="Style",
136
+ # info="Select a style of output audio for the synthesised speech. (Chinese only support 'default' now)",
137
+ # choices=['default', 'whispering', 'cheerful', 'terrified', 'angry', 'sad', 'friendly'],
138
+ # max_choices=1,
139
+ # value="default",
140
+ #)
141
  ref_gr = gr.Audio(
142
  label="Reference Audio",
143
  info="Click on the ✎ button to upload your own target speaker audio",
 
167
 
168
  gr.Examples(examples,
169
  label="Examples",
170
+ #inputs=[input_text_gr, style_gr, ref_gr, mic_gr, use_mic_gr, language],
171
+ inputs=[input_text_gr, ref_gr, mic_gr, use_mic_gr, language],
172
  outputs=[out_text_gr, audio_gr, ref_audio_gr],
173
  fn=predict,
174
  cache_examples=False,)
175
+ #tts_button.click(predict, [input_text_gr, style_gr, ref_gr, mic_gr, use_mic_gr, language], outputs=[out_text_gr, audio_gr, ref_audio_gr])
176
+ tts_button.click(predict, [input_text_gr, ref_gr, mic_gr, use_mic_gr, language], outputs=[out_text_gr, audio_gr, ref_audio_gr])
177
 
178
  demo.queue()
179
  demo.launch(debug=True, show_api=True)