Update src/streamlit_app.py
Browse files- src/streamlit_app.py +9 -4
src/streamlit_app.py
CHANGED
@@ -4,10 +4,15 @@ from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
|
4 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
5 |
import streamlit as st
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
|
10 |
-
whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def transcribe(audio_path):
|
13 |
waveform, sample_rate = torchaudio.load(audio_path)
|
|
|
4 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
5 |
import streamlit as st
|
6 |
|
7 |
+
@st.cache_resource
|
8 |
+
def load_models():
|
9 |
+
whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
|
10 |
+
whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
|
11 |
+
text_model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
13 |
+
return whisper_processor, whisper_model, text_model, tokenizer
|
14 |
+
|
15 |
+
whisper_processor, whisper_model, text_model, tokenizer = load_models()
|
16 |
|
17 |
def transcribe(audio_path):
|
18 |
waveform, sample_rate = torchaudio.load(audio_path)
|