Fabrice-TIERCELIN commited on
Commit
e0c7db5
·
verified ·
1 Parent(s): 28ff5b8

Gender option

Browse files
Files changed (1) hide show
  1. app.py +25 -17
app.py CHANGED
@@ -29,20 +29,7 @@ else:
29
  tts = TTS(model_name, gpu=torch.cuda.is_available())
30
  tts.to(device_type)
31
 
32
- def predict(prompt, language, audio_file_pth, mic_file_path, use_mic):
33
- if use_mic:
34
- if mic_file_path is None:
35
- gr.Warning("Please record your voice with Microphone, or uncheck Use Microphone to use reference audios")
36
- return (
37
- None,
38
- None,
39
- None,
40
- )
41
- else:
42
- speaker_wav = mic_file_path
43
- else:
44
- speaker_wav = audio_file_pth
45
-
46
  if len(prompt) < 2:
47
  gr.Warning("Please give a longer prompt text")
48
  return (
@@ -56,7 +43,27 @@ def predict(prompt, language, audio_file_pth, mic_file_path, use_mic):
56
  None,
57
  None,
58
  None,
59
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  try:
61
  if language == "fr":
62
  if m.find("your") != -1:
@@ -136,11 +143,12 @@ Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, wh
136
  max_choices=1,
137
  value="en",
138
  )
 
139
  audio_file_pth = gr.Audio(
140
  label="Reference Audio",
141
  #info="Click on the ✎ button to upload your own target speaker audio",
142
  type="filepath",
143
- value="examples/female.wav",
144
  )
145
  mic_file_path = gr.Audio(sources=["microphone"],
146
  type="filepath",
@@ -159,7 +167,7 @@ Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, wh
159
  information = gr.HTML()
160
 
161
  submit.click(predict, inputs = [
162
- prompt, language, audio_file_pth, mic_file_path, use_mic
163
  ], outputs = [
164
  waveform_visual,
165
  synthesised_audio,
 
29
  tts = TTS(model_name, gpu=torch.cuda.is_available())
30
  tts.to(device_type)
31
 
32
+ def predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic):
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  if len(prompt) < 2:
34
  gr.Warning("Please give a longer prompt text")
35
  return (
 
43
  None,
44
  None,
45
  None,
46
+ )
47
+
48
+ if use_mic:
49
+ if mic_file_path is None:
50
+ gr.Warning("Please record your voice with Microphone, or uncheck Use Microphone to use reference audios")
51
+ return (
52
+ None,
53
+ None,
54
+ None,
55
+ )
56
+ else:
57
+ speaker_wav = mic_file_path
58
+ else:
59
+ speaker_wav = audio_file_pth
60
+
61
+ if speaker_wav is None:
62
+ if gender == "male":
63
+ speaker_wav = "examples/male.wav"
64
+ else:
65
+ speaker_wav = "examples/female.wav"
66
+
67
  try:
68
  if language == "fr":
69
  if m.find("your") != -1:
 
143
  max_choices=1,
144
  value="en",
145
  )
146
+ gender = gr.Radio(["female", "male"], label="Gender", info="Gender of the voice")
147
  audio_file_pth = gr.Audio(
148
  label="Reference Audio",
149
  #info="Click on the ✎ button to upload your own target speaker audio",
150
  type="filepath",
151
+ value=None,
152
  )
153
  mic_file_path = gr.Audio(sources=["microphone"],
154
  type="filepath",
 
167
  information = gr.HTML()
168
 
169
  submit.click(predict, inputs = [
170
+ prompt, language, gender, audio_file_pth, mic_file_path, use_mic
171
  ], outputs = [
172
  waveform_visual,
173
  synthesised_audio,