Noumida commited on
Commit
eba970d
·
verified ·
1 Parent(s): 3e32781

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -41
app.py CHANGED
@@ -1,52 +1,58 @@
 
1
  import os
 
2
  import torch
3
  import torchaudio
4
- import gradio as gr
5
  import nemo.collections.asr as nemo_asr
6
 
7
- # Select device
8
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- # Load CTC and RNNT models from AI4Bharat
11
- asr_ctc = nemo_asr.models.EncDecCTCModelBPE.from_pretrained("ai4bharat/indicwhisper-ctc-indic").to(device)
12
- asr_rnnt = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained("ai4bharat/indicwhisper-rnnt-indic").to(device)
 
 
 
 
13
 
14
- # All 22 scheduled Indian languages
15
- language_options = [
16
- "Assamese", "Bengali", "Bodo", "Dogri", "Gujarati", "Hindi",
17
- "Kannada", "Kashmiri", "Konkani", "Maithili", "Malayalam",
18
- "Manipuri", "Marathi", "Nepali", "Odia", "Punjabi", "Sanskrit",
19
- "Santali", "Sindhi", "Tamil", "Telugu", "Urdu"
20
- ]
21
 
22
- # CTC ASR function
23
- def run_asr_ctc(audio_path, source_lang):
24
- asr_ctc.change_vocabulary(language=source_lang)
25
- return asr_ctc.transcribe(paths2audio_files=[audio_path])[0]
26
 
27
- # RNNT ASR function
28
- def run_asr_rnnt(audio_path, source_lang):
29
- asr_rnnt.change_vocabulary(language=source_lang)
30
- return asr_rnnt.transcribe(paths2audio_files=[audio_path])[0]
31
 
32
- # Gradio UI
33
  with gr.Blocks() as demo:
34
- gr.Markdown("## AI4Bharat Indic ASR (CTC & RNNT)")
35
-
36
- with gr.Tab("CTC Transcription"):
37
- with gr.Row():
38
- input_audio = gr.Audio(type="filepath", label="Upload Audio")
39
- source_lang = gr.Dropdown(choices=language_options, label="Language", value="Hindi")
40
- output_text_ctc = gr.Textbox(label="CTC Transcription Output")
41
- ctc_button = gr.Button("Transcribe (CTC)")
42
- ctc_button.click(run_asr_ctc, inputs=[input_audio, source_lang], outputs=output_text_ctc)
43
-
44
- with gr.Tab("RNNT Transcription"):
45
- with gr.Row():
46
- input_audio_rnnt = gr.Audio(type="filepath", label="Upload Audio")
47
- source_lang_rnnt = gr.Dropdown(choices=language_options, label="Language", value="Hindi")
48
- output_text_rnnt = gr.Textbox(label="RNNT Transcription Output")
49
- rnnt_button = gr.Button("Transcribe (RNNT)")
50
- rnnt_button.click(run_asr_rnnt, inputs=[input_audio_rnnt, source_lang_rnnt], outputs=output_text_rnnt)
51
-
52
- demo.launch()
 
1
+ from __future__ import annotations
2
  import os
3
+ import gradio as gr
4
  import torch
5
  import torchaudio
6
+ import spaces
7
  import nemo.collections.asr as nemo_asr
8
 
9
+ LANGUAGE_NAME_TO_CODE = {
10
+ "Assamese": "as", "Bengali": "bn", "Bodo": "br", "Dogri": "doi",
11
+ "Gujarati": "gu", "Hindi": "hi", "Kannada": "kn", "Kashmiri": "ks",
12
+ "Konkani": "kok", "Maithili": "mai", "Malayalam": "ml", "Manipuri": "mni",
13
+ "Marathi": "mr", "Nepali": "ne", "Odia": "or", "Punjabi": "pa",
14
+ "Sanskrit": "sa", "Santali": "sat", "Sindhi": "sd", "Tamil": "ta",
15
+ "Telugu": "te", "Urdu": "ur"
16
+ }
17
+
18
+ DESCRIPTION = """IndicConformer: Dual-Decoder ASR for Indian Languages"""
19
+
20
+ device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
21
+ model = nemo_asr.models.EncDecCTCModel.from_pretrained("ai4bharat/IndicConformer").to(device)
22
+ model.eval()
23
 
24
+ @spaces.GPU
25
+ def transcribe_ctc_and_rnnt(audio_path, language_name):
26
+ lang_id = LANGUAGE_NAME_TO_CODE[language_name]
27
+ waveform, sample_rate = torchaudio.load(audio_path)
28
+ waveform = waveform.mean(dim=0, keepdim=True) if waveform.shape[0] > 1 else waveform
29
+ waveform = torchaudio.functional.resample(waveform, sample_rate, 16000)
30
+ waveform_np = waveform.squeeze().numpy()
31
 
32
+ model.cur_decoder = "ctc"
33
+ ctc = model.transcribe([waveform_np], batch_size=1, language_id=lang_id)[0][0]
 
 
 
 
 
34
 
35
+ model.cur_decoder = "rnnt"
36
+ rnnt = model.transcribe([waveform_np], batch_size=1, language_id=lang_id)[0][0]
 
 
37
 
38
+ return ctc, rnnt
 
 
 
39
 
 
40
  with gr.Blocks() as demo:
41
+ gr.Markdown(DESCRIPTION)
42
+ with gr.Row():
43
+ with gr.Column():
44
+ audio = gr.Audio(label="Upload or record audio", type="filepath")
45
+ lang = gr.Dropdown(
46
+ label="Select language",
47
+ choices=LANGUAGE_NAME_TO_CODE.keys(),
48
+ value="Hindi"
49
+ )
50
+ transcribe_btn = gr.Button("Transcribe (CTC + RNNT)")
51
+ with gr.Column():
52
+ ctc_output = gr.Textbox(label="CTC Transcription")
53
+ rnnt_output = gr.Textbox(label="RNNT Transcription")
54
+
55
+ transcribe_btn.click(fn=transcribe_ctc_and_rnnt, inputs=[audio, lang], outputs=[ctc_output, rnnt_output])
56
+
57
+ if __name__ == "__main__":
58
+ demo.queue().launch()