Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -28,6 +28,8 @@ def predict(prompt, upload_reference_audio, microphone_reference_audio, referenc
|
|
28 |
audio_file_pth = upload_reference_audio
|
29 |
elif reference_audio_type == 'microphone':
|
30 |
audio_file_pth = microphone_reference_audio
|
|
|
|
|
31 |
tts.tts_to_file(
|
32 |
text=prompt,
|
33 |
file_path=output_file_path,
|
@@ -40,14 +42,8 @@ def predict(prompt, upload_reference_audio, microphone_reference_audio, referenc
|
|
40 |
def main():
|
41 |
if torch.cuda.is_available():
|
42 |
device = "cuda"
|
43 |
-
print("cuda available")
|
44 |
else:
|
45 |
device = "cpu"
|
46 |
-
print('device is', device)
|
47 |
-
torch.set_default_device(device)
|
48 |
-
torch.set_default_device(device)
|
49 |
-
tts = TTS('tts_models/multilingual/multi-dataset/your_tts').to(device)
|
50 |
-
tts.to(device)
|
51 |
kd_talker = Inferencer()
|
52 |
|
53 |
with gr.Blocks(analytics_enabled=False) as interface:
|
@@ -120,6 +116,4 @@ def main():
|
|
120 |
if __name__ == "__main__":
|
121 |
os.environ["GRADIO_SERVER_PORT"] = "7860"
|
122 |
demo = main()
|
123 |
-
|
124 |
-
demo.launch(server_name="0.0.0.0")
|
125 |
-
|
|
|
28 |
audio_file_pth = upload_reference_audio
|
29 |
elif reference_audio_type == 'microphone':
|
30 |
audio_file_pth = microphone_reference_audio
|
31 |
+
torch.set_default_device('cuda')
|
32 |
+
tts = TTS('tts_models/multilingual/multi-dataset/your_tts').to('cuda')
|
33 |
tts.tts_to_file(
|
34 |
text=prompt,
|
35 |
file_path=output_file_path,
|
|
|
42 |
def main():
|
43 |
if torch.cuda.is_available():
|
44 |
device = "cuda"
|
|
|
45 |
else:
|
46 |
device = "cpu"
|
|
|
|
|
|
|
|
|
|
|
47 |
kd_talker = Inferencer()
|
48 |
|
49 |
with gr.Blocks(analytics_enabled=False) as interface:
|
|
|
116 |
if __name__ == "__main__":
|
117 |
os.environ["GRADIO_SERVER_PORT"] = "7860"
|
118 |
demo = main()
|
119 |
+
demo.launch(server_name="0.0.0.0")
|
|
|
|