Spaces:
Runtime error
Runtime error
Commit
·
2bb432c
1
Parent(s):
9976fb9
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,12 +7,13 @@ import gradio as gr
|
|
| 7 |
from TTS.api import TTS
|
| 8 |
|
| 9 |
model_names = TTS().list_models()
|
| 10 |
-
m = model_names[
|
| 11 |
#for model in model_names:
|
| 12 |
# if model.find("/fr/") != -1:
|
| 13 |
# m = model
|
| 14 |
# break
|
| 15 |
print(model_names)
|
|
|
|
| 16 |
print(os.listdir("/home/user/.local/lib/python3.10/site-packages/TTS/"))
|
| 17 |
print(os.listdir("/home/user/.local/lib/python3.10/site-packages/TTS/utils"))
|
| 18 |
old = open("/home/user/.local/lib/python3.10/site-packages/TTS/utils/io.py", "r").read()
|
|
@@ -27,6 +28,7 @@ try:
|
|
| 27 |
print(open("/home/user/.local/lib/python3.10/site-packages/TTS/utils/io.py", "r").read())
|
| 28 |
except:
|
| 29 |
print("mauvais fichier")
|
|
|
|
| 30 |
tts = TTS(m, gpu=False).to("cpu")
|
| 31 |
#tts.to("cuda") # cuda only
|
| 32 |
|
|
@@ -57,11 +59,17 @@ def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree):
|
|
| 57 |
None,
|
| 58 |
None,
|
| 59 |
)
|
| 60 |
-
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
tts.tts_to_file(
|
| 62 |
text=prompt,
|
| 63 |
file_path="output.wav",
|
| 64 |
-
speaker_wav=speaker_wav
|
|
|
|
| 65 |
)
|
| 66 |
"""
|
| 67 |
tts.tts_to_file(
|
|
@@ -103,7 +111,7 @@ XTTS is built on previous research, like Tortoise, with additional architectural
|
|
| 103 |
<br/>
|
| 104 |
This is the same model that powers our creator application <a href="https://coqui.ai">Coqui Studio</a> as well as the <a href="https://docs.coqui.ai">Coqui API</a>. In production we apply modifications to make low-latency streaming possible.
|
| 105 |
<br/>
|
| 106 |
-
Leave a star on the Github <a href="https://github.com/coqui-ai/TTS"
|
| 107 |
<br/>
|
| 108 |
<p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
|
| 109 |
<br/>
|
|
@@ -262,7 +270,7 @@ gr.Interface(
|
|
| 262 |
info="Notice: Microphone input may not work properly under traffic",),
|
| 263 |
gr.Checkbox(
|
| 264 |
label="Agree",
|
| 265 |
-
value=
|
| 266 |
info="I agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml",
|
| 267 |
),
|
| 268 |
],
|
|
|
|
| 7 |
from TTS.api import TTS
|
| 8 |
|
| 9 |
model_names = TTS().list_models()
|
| 10 |
+
m = model_names[1]
|
| 11 |
#for model in model_names:
|
| 12 |
# if model.find("/fr/") != -1:
|
| 13 |
# m = model
|
| 14 |
# break
|
| 15 |
print(model_names)
|
| 16 |
+
"""
|
| 17 |
print(os.listdir("/home/user/.local/lib/python3.10/site-packages/TTS/"))
|
| 18 |
print(os.listdir("/home/user/.local/lib/python3.10/site-packages/TTS/utils"))
|
| 19 |
old = open("/home/user/.local/lib/python3.10/site-packages/TTS/utils/io.py", "r").read()
|
|
|
|
| 28 |
print(open("/home/user/.local/lib/python3.10/site-packages/TTS/utils/io.py", "r").read())
|
| 29 |
except:
|
| 30 |
print("mauvais fichier")
|
| 31 |
+
"""
|
| 32 |
tts = TTS(m, gpu=False).to("cpu")
|
| 33 |
#tts.to("cuda") # cuda only
|
| 34 |
|
|
|
|
| 59 |
None,
|
| 60 |
None,
|
| 61 |
)
|
| 62 |
+
try:
|
| 63 |
+
if language == "fr":
|
| 64 |
+
if m.find("your") != -1:
|
| 65 |
+
language = "fr-fr"
|
| 66 |
+
if m.find("/fr/") != -1:
|
| 67 |
+
language = None
|
| 68 |
tts.tts_to_file(
|
| 69 |
text=prompt,
|
| 70 |
file_path="output.wav",
|
| 71 |
+
speaker_wav=speaker_wav,
|
| 72 |
+
language=language
|
| 73 |
)
|
| 74 |
"""
|
| 75 |
tts.tts_to_file(
|
|
|
|
| 111 |
<br/>
|
| 112 |
This is the same model that powers our creator application <a href="https://coqui.ai">Coqui Studio</a> as well as the <a href="https://docs.coqui.ai">Coqui API</a>. In production we apply modifications to make low-latency streaming possible.
|
| 113 |
<br/>
|
| 114 |
+
Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, where our open-source inference and training code lives.
|
| 115 |
<br/>
|
| 116 |
<p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
|
| 117 |
<br/>
|
|
|
|
| 270 |
info="Notice: Microphone input may not work properly under traffic",),
|
| 271 |
gr.Checkbox(
|
| 272 |
label="Agree",
|
| 273 |
+
value=True,
|
| 274 |
info="I agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml",
|
| 275 |
),
|
| 276 |
],
|