Update app.py
Browse files
app.py
CHANGED
@@ -41,6 +41,8 @@ frame_asr = FrameBatchMultiTaskAED(
|
|
41 |
|
42 |
amp_dtype = torch.float16
|
43 |
|
|
|
|
|
44 |
def convert_audio(audio_filepath, tmpdir, utt_id):
|
45 |
"""
|
46 |
Convert all files to monochannel 16 kHz wav files.
|
@@ -222,12 +224,6 @@ def on_src_or_tgt_lang_change(src_lang_value, tgt_lang_value, pnc_value):
|
|
222 |
)
|
223 |
return src_lang, tgt_lang, pnc
|
224 |
|
225 |
-
llm_pipeline = transformers.pipeline(
|
226 |
-
"text-generation",
|
227 |
-
model="meta-llama/Meta-Llama-3-8B",
|
228 |
-
model_kwargs={"torch_dtype": torch.bfloat16},
|
229 |
-
device_map="auto"
|
230 |
-
)
|
231 |
|
232 |
with gr.Blocks(
|
233 |
title="NeMo Canary Model",
|
@@ -271,6 +267,7 @@ with gr.Blocks(
|
|
271 |
)
|
272 |
|
273 |
with gr.Column():
|
|
|
274 |
gr.HTML("<p>Run the model.</p>")
|
275 |
|
276 |
go_button = gr.Button(
|
@@ -313,7 +310,5 @@ with gr.Blocks(
|
|
313 |
)
|
314 |
|
315 |
|
316 |
-
|
317 |
-
|
318 |
demo.queue()
|
319 |
demo.launch(share=True)
|
|
|
41 |
|
42 |
amp_dtype = torch.float16
|
43 |
|
44 |
+
llm_pipeline = transformers.pipeline("text-generation", model="meta-llama/Meta-Llama-3-8B", model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto")
|
45 |
+
|
46 |
def convert_audio(audio_filepath, tmpdir, utt_id):
|
47 |
"""
|
48 |
Convert all files to monochannel 16 kHz wav files.
|
|
|
224 |
)
|
225 |
return src_lang, tgt_lang, pnc
|
226 |
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
|
228 |
with gr.Blocks(
|
229 |
title="NeMo Canary Model",
|
|
|
267 |
)
|
268 |
|
269 |
with gr.Column():
|
270 |
+
|
271 |
gr.HTML("<p>Run the model.</p>")
|
272 |
|
273 |
go_button = gr.Button(
|
|
|
310 |
)
|
311 |
|
312 |
|
|
|
|
|
313 |
demo.queue()
|
314 |
demo.launch(share=True)
|