Spaces:
Runtime error
Runtime error
Commit
·
0f62a14
1
Parent(s):
1f983bd
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,30 +13,15 @@ print(model_names)
|
|
| 13 |
tts = TTS(m, gpu=False)
|
| 14 |
tts.to("cpu") # no GPU or Amd
|
| 15 |
#tts.to("cuda") # cuda only
|
| 16 |
-
br_ = """
|
| 17 |
-
<p onload="alert('a');">test0000099000999</p>
|
| 18 |
-
<script>
|
| 19 |
-
var par = document.createElement("p");
|
| 20 |
-
var text = document.createTextNode("fhsgdjrs hgrtsfya");
|
| 21 |
-
par.appendChild(text);
|
| 22 |
-
document.body.appendChild(par);
|
| 23 |
-
</script>
|
| 24 |
-
"""
|
| 25 |
|
| 26 |
-
br__ = """
|
| 27 |
-
var par = document.createElement("p");
|
| 28 |
-
var text = document.createTextNode("fhsgdjrs hgrtsfya");
|
| 29 |
-
par.appendChild(text);
|
| 30 |
-
document.body.appendChild(par);
|
| 31 |
-
"""
|
| 32 |
|
| 33 |
def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree, request: gr.Request):
|
|
|
|
|
|
|
| 34 |
try:
|
| 35 |
if request:
|
| 36 |
-
print(request)
|
| 37 |
-
print(request.headers)
|
| 38 |
print("= = = = = = = = = = = =")
|
| 39 |
-
print()
|
| 40 |
print("Request headers dictionary:", request.headers)
|
| 41 |
print("IP address:", request.client.host)
|
| 42 |
print("Query parameters:", dict(request.query_params))
|
|
@@ -117,7 +102,6 @@ This is the same model that powers our creator application <a href="https://coqu
|
|
| 117 |
<br/>
|
| 118 |
Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, where our open-source inference and training code lives.
|
| 119 |
<br/>
|
| 120 |
-
{br_}
|
| 121 |
<p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
|
| 122 |
<br/>
|
| 123 |
<a href="https://huggingface.co/spaces/coqui/xtts?duplicate=true">
|
|
@@ -233,7 +217,6 @@ examples = [
|
|
| 233 |
|
| 234 |
gr.Interface(
|
| 235 |
fn=predict,
|
| 236 |
-
_js=br__,
|
| 237 |
inputs=[
|
| 238 |
gr.Textbox(
|
| 239 |
label="Text Prompt",
|
|
|
|
| 13 |
tts = TTS(m, gpu=False)
|
| 14 |
tts.to("cpu") # no GPU or Amd
|
| 15 |
#tts.to("cuda") # cuda only
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree, request: gr.Request):
|
| 19 |
+
exec("")
|
| 20 |
+
# Due to abuses from somes user, and French Rights...
|
| 21 |
try:
|
| 22 |
if request:
|
| 23 |
+
print("Request obj:", request)
|
|
|
|
| 24 |
print("= = = = = = = = = = = =")
|
|
|
|
| 25 |
print("Request headers dictionary:", request.headers)
|
| 26 |
print("IP address:", request.client.host)
|
| 27 |
print("Query parameters:", dict(request.query_params))
|
|
|
|
| 102 |
<br/>
|
| 103 |
Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, where our open-source inference and training code lives.
|
| 104 |
<br/>
|
|
|
|
| 105 |
<p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
|
| 106 |
<br/>
|
| 107 |
<a href="https://huggingface.co/spaces/coqui/xtts?duplicate=true">
|
|
|
|
| 217 |
|
| 218 |
gr.Interface(
|
| 219 |
fn=predict,
|
|
|
|
| 220 |
inputs=[
|
| 221 |
gr.Textbox(
|
| 222 |
label="Text Prompt",
|