mojad121 commited on
Commit
a4a299e
·
verified ·
1 Parent(s): 3931f52

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +9 -4
src/streamlit_app.py CHANGED
@@ -4,10 +4,15 @@ from transformers import WhisperProcessor, WhisperForConditionalGeneration
4
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
5
  import streamlit as st
6
 
7
- text_model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
8
- tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
9
- whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
10
- whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
 
 
 
 
 
11
 
12
  def transcribe(audio_path):
13
  waveform, sample_rate = torchaudio.load(audio_path)
 
4
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
5
  import streamlit as st
6
 
7
+ @st.cache_resource
8
+ def load_models():
9
+ whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
10
+ whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
11
+ text_model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
12
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
13
+ return whisper_processor, whisper_model, text_model, tokenizer
14
+
15
+ whisper_processor, whisper_model, text_model, tokenizer = load_models()
16
 
17
  def transcribe(audio_path):
18
  waveform, sample_rate = torchaudio.load(audio_path)