Spaces:
Running
on
Zero
Running
on
Zero
Update nemo_align.py
Browse files- nemo_align.py +1 -3
nemo_align.py
CHANGED
|
@@ -442,13 +442,11 @@ def align_tdt_to_ctc_timestamps(tdt_txt, model, audio_filepath):
|
|
| 442 |
raise ValueError("Currently supporting hybrid models")
|
| 443 |
|
| 444 |
if torch.cuda.is_available():
|
| 445 |
-
enable = True
|
| 446 |
viterbi_device = torch.device('cuda')
|
| 447 |
else:
|
| 448 |
-
enable = False
|
| 449 |
viterbi_device = torch.device('cpu')
|
| 450 |
|
| 451 |
-
with torch.cuda.amp.autocast(enabled=
|
| 452 |
with torch.inference_mode():
|
| 453 |
hypotheses = model.transcribe([audio_filepath], return_hypotheses=True, batch_size=1)
|
| 454 |
|
|
|
|
| 442 |
raise ValueError("Currently supporting hybrid models")
|
| 443 |
|
| 444 |
if torch.cuda.is_available():
|
|
|
|
| 445 |
viterbi_device = torch.device('cuda')
|
| 446 |
else:
|
|
|
|
| 447 |
viterbi_device = torch.device('cpu')
|
| 448 |
|
| 449 |
+
with torch.cuda.amp.autocast(enabled=False, dtype=torch.bfloat16):
|
| 450 |
with torch.inference_mode():
|
| 451 |
hypotheses = model.transcribe([audio_filepath], return_hypotheses=True, batch_size=1)
|
| 452 |
|