Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -28,6 +28,7 @@ tts.to(device)
|
|
| 28 |
|
| 29 |
result_dir = "results"
|
| 30 |
|
|
|
|
| 31 |
def predict(prompt, upload_reference_audio, microphone_reference_audio, reference_audio_type):
|
| 32 |
global result_dir
|
| 33 |
output_file_path = os.path.join(result_dir, 'output.wav')
|
|
@@ -43,7 +44,7 @@ def predict(prompt, upload_reference_audio, microphone_reference_audio, referenc
|
|
| 43 |
)
|
| 44 |
return gr.Audio(value=output_file_path, type='filepath')
|
| 45 |
|
| 46 |
-
|
| 47 |
def main():
|
| 48 |
if torch.cuda.is_available():
|
| 49 |
device = "cuda"
|
|
|
|
| 28 |
|
| 29 |
result_dir = "results"
|
| 30 |
|
| 31 |
+
@spaces.GPU
|
| 32 |
def predict(prompt, upload_reference_audio, microphone_reference_audio, reference_audio_type):
|
| 33 |
global result_dir
|
| 34 |
output_file_path = os.path.join(result_dir, 'output.wav')
|
|
|
|
| 44 |
)
|
| 45 |
return gr.Audio(value=output_file_path, type='filepath')
|
| 46 |
|
| 47 |
+
@spaces.GPU
|
| 48 |
def main():
|
| 49 |
if torch.cuda.is_available():
|
| 50 |
device = "cuda"
|