Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -251,14 +251,12 @@ def process_audio_from_video(video_path):
|
|
251 |
|
252 |
|
253 |
import torch
|
254 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
255 |
import gradio as gr
|
|
|
|
|
256 |
|
257 |
-
|
258 |
-
|
259 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
260 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
|
261 |
-
|
262 |
|
263 |
|
264 |
def transcribe_and_predict_video(video, chat_history=[]):
|
|
|
251 |
|
252 |
|
253 |
import torch
|
|
|
254 |
import gradio as gr
|
255 |
+
# Load model directly
|
256 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
257 |
|
258 |
+
tokenizer = AutoTokenizer.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.1-GPTQ")
|
259 |
+
model = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.1-GPTQ")
|
|
|
|
|
|
|
260 |
|
261 |
|
262 |
def transcribe_and_predict_video(video, chat_history=[]):
|