Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ llama_pipe = pipeline(
|
|
22 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
23 |
device_map="auto",
|
24 |
)
|
25 |
-
|
26 |
|
27 |
|
28 |
torch.random.manual_seed(0)
|
@@ -34,7 +34,7 @@ proc_tokenizer = AutoTokenizer.from_pretrained(proc_model_name)
|
|
34 |
|
35 |
|
36 |
|
37 |
-
|
38 |
torch.random.manual_seed(0)
|
39 |
proc_model_name = "microsoft/Phi-3-mini-4k-instruct-gguf"
|
40 |
proc_model = AutoModelForCausalLM.from_pretrained(proc_model_name)
|
@@ -42,7 +42,11 @@ proc_model.to("cpu")
|
|
42 |
proc_tokenizer = AutoTokenizer.from_pretrained(proc_model_name)
|
43 |
'''
|
44 |
|
45 |
-
|
|
|
|
|
|
|
|
|
46 |
|
47 |
|
48 |
SAMPLE_RATE = 16000 # Hz
|
|
|
22 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
23 |
device_map="auto",
|
24 |
)
|
25 |
+
|
26 |
|
27 |
|
28 |
torch.random.manual_seed(0)
|
|
|
34 |
|
35 |
|
36 |
|
37 |
+
|
38 |
torch.random.manual_seed(0)
|
39 |
proc_model_name = "microsoft/Phi-3-mini-4k-instruct-gguf"
|
40 |
proc_model = AutoModelForCausalLM.from_pretrained(proc_model_name)
|
|
|
42 |
proc_tokenizer = AutoTokenizer.from_pretrained(proc_model_name)
|
43 |
'''
|
44 |
|
45 |
+
torch.random.manual_seed(0)
|
46 |
+
proc_model_name = "microsoft/Phi-3-mini-4k-instruct-onnx"
|
47 |
+
proc_model = AutoModelForCausalLM.from_pretrained(proc_model_name)
|
48 |
+
proc_model.to("cpu")
|
49 |
+
proc_tokenizer = AutoTokenizer.from_pretrained(proc_model_name)
|
50 |
|
51 |
|
52 |
SAMPLE_RATE = 16000 # Hz
|