Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -14,13 +14,23 @@ from nemo.collections.asr.parts.utils.streaming_utils import FrameBatchMultiTask
|
|
14 |
from nemo.collections.asr.parts.utils.transcribe_utils import get_buffered_pred_feat_multitaskAED
|
15 |
|
16 |
|
|
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
torch.random.manual_seed(0)
|
19 |
proc_model_name = "microsoft/Phi-3-mini-4k-instruct-gguf"
|
20 |
proc_model = AutoModelForCausalLM.from_pretrained(proc_model_name)
|
21 |
proc_model.to("cpu")
|
22 |
proc_tokenizer = AutoTokenizer.from_pretrained(proc_model_name)
|
23 |
-
|
24 |
|
25 |
|
26 |
|
@@ -113,7 +123,8 @@ def generate_response(user_input):
|
|
113 |
|
114 |
def CanaryPhi(audio_filepath):
|
115 |
user_input = transcribe(audio_filepath)
|
116 |
-
response = generate_response(user_input)
|
|
|
117 |
return response
|
118 |
|
119 |
|
|
|
14 |
from nemo.collections.asr.parts.utils.transcribe_utils import get_buffered_pred_feat_multitaskAED
|
15 |
|
16 |
|
17 |
+
model_id = "meta-llama/Meta-Llama-3-8B"
|
18 |
|
19 |
+
llama_pipe = pipeline(
|
20 |
+
"text-generation",
|
21 |
+
model=model_id,
|
22 |
+
model_kwargs={"torch_dtype": torch.bfloat16},
|
23 |
+
device_map="auto",
|
24 |
+
)
|
25 |
+
|
26 |
+
|
27 |
+
'''
|
28 |
torch.random.manual_seed(0)
|
29 |
proc_model_name = "microsoft/Phi-3-mini-4k-instruct-gguf"
|
30 |
proc_model = AutoModelForCausalLM.from_pretrained(proc_model_name)
|
31 |
proc_model.to("cpu")
|
32 |
proc_tokenizer = AutoTokenizer.from_pretrained(proc_model_name)
|
33 |
+
'''
|
34 |
|
35 |
|
36 |
|
|
|
123 |
|
124 |
def CanaryPhi(audio_filepath):
|
125 |
user_input = transcribe(audio_filepath)
|
126 |
+
#response = generate_response(user_input)
|
127 |
+
response = llama_pipe(user_input)
|
128 |
return response
|
129 |
|
130 |
|