gdnartea commited on
Commit
3e0dbc5
·
verified ·
1 Parent(s): 39ee0b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -2
app.py CHANGED
@@ -1,9 +1,16 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, Speech2TextProcessor, Speech2TextForConditionalGeneration, VitsProcessor, VitsForConditionalGeneration
 
3
 
4
- # Load the ASR model and processor
5
  asr_processor = Speech2TextProcessor.from_pretrained("/path/to/canary/processor")
6
- asr_model = Speech2TextForConditionalGeneration.from_pretrained("/path/to/canary/model")
 
 
 
 
 
 
7
 
8
  # Load the text processing model and tokenizer
9
  proc_tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct")
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, Speech2TextProcessor, Speech2TextForConditionalGeneration, VitsProcessor, VitsForConditionalGeneration
3
+ from nemo.collections.asr.models import EncDecMultiTaskModel
4
 
5
+ # Load the ASR model and processor // fix processor stuff first
6
  asr_processor = Speech2TextProcessor.from_pretrained("/path/to/canary/processor")
7
+ asr_model = EncDecMultiTaskModel.from_pretrained('nvidia/canary-1b')
8
+
9
+ # update dcode params
10
+ decode_cfg = canary_model.cfg.decoding
11
+ decode_cfg.beam.beam_size = 1
12
+ canary_model.change_decoding_strategy(decode_cfg)
13
+
14
 
15
  # Load the text processing model and tokenizer
16
  proc_tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct")