Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,19 +5,20 @@ import torch
|
|
| 5 |
|
| 6 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 7 |
asr = pipeline(model="asif00/whisper-bangla")
|
| 8 |
-
asr.to(device=device)
|
| 9 |
ser = pipeline("text2text-generation", model="asif00/mbart_bn_error_correction")
|
| 10 |
ser.to(device=device)
|
| 11 |
|
| 12 |
@spaces.GPU
|
| 13 |
def transcribe(audio):
|
| 14 |
text = asr(audio)["text"]
|
|
|
|
| 15 |
return text
|
| 16 |
|
| 17 |
|
| 18 |
@spaces.GPU
|
| 19 |
def correction(text):
|
| 20 |
-
corrected_text = ser(text)
|
| 21 |
print(corrected_text)
|
| 22 |
return corrected_text
|
| 23 |
|
|
|
|
| 5 |
|
| 6 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 7 |
asr = pipeline(model="asif00/whisper-bangla")
|
| 8 |
+
# asr.to(device=device)
|
| 9 |
ser = pipeline("text2text-generation", model="asif00/mbart_bn_error_correction")
|
| 10 |
ser.to(device=device)
|
| 11 |
|
| 12 |
@spaces.GPU
|
| 13 |
def transcribe(audio):
|
| 14 |
text = asr(audio)["text"]
|
| 15 |
+
print(text)
|
| 16 |
return text
|
| 17 |
|
| 18 |
|
| 19 |
@spaces.GPU
|
| 20 |
def correction(text):
|
| 21 |
+
corrected_text = ser(text)["generated_text"]
|
| 22 |
print(corrected_text)
|
| 23 |
return corrected_text
|
| 24 |
|