|
import os |
|
import torch |
|
import torchaudio |
|
import tempfile |
|
from transformers import WhisperProcessor, WhisperForConditionalGeneration |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
import streamlit as st |
|
|
|
os.environ["TRANSFORMERS_CACHE"] = "/app/cache" |
|
os.makedirs("/app/cache", exist_ok=True) |
|
|
|
whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") |
|
whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") |
|
text_model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased") |
|
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") |
|
|
|
def transcribe(audio_bytes): |
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp: |
|
tmp.write(audio_bytes) |
|
tmp_path = tmp.name |
|
waveform, sample_rate = torchaudio.load(tmp_path) |
|
input_features = whisper_processor(waveform.squeeze().numpy(), sampling_rate=sample_rate, return_tensors="pt").input_features |
|
predicted_ids = whisper_model.generate(input_features) |
|
transcription = whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0] |
|
os.remove(tmp_path) |
|
return transcription |
|
|
|
def extract_text_features(text): |
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) |
|
outputs = text_model(**inputs) |
|
return outputs.logits.argmax(dim=1).item() |
|
|
|
def predict |
|
|