File size: 1,392 Bytes
55c917c 36b8f27 55c917c 36b8f27 da94893 80d83c0 e702a33 55c917c 36b8f27 55c917c 36b8f27 55c917c e702a33 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import os
import torch
import torchaudio
import tempfile
from transformers import WhisperProcessor, WhisperForConditionalGeneration
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import streamlit as st
os.environ["TRANSFORMERS_CACHE"] = "/app/cache"
os.makedirs("/app/cache", exist_ok=True)
whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
text_model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
def transcribe(audio_bytes):
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
tmp.write(audio_bytes)
tmp_path = tmp.name
waveform, sample_rate = torchaudio.load(tmp_path)
input_features = whisper_processor(waveform.squeeze().numpy(), sampling_rate=sample_rate, return_tensors="pt").input_features
predicted_ids = whisper_model.generate(input_features)
transcription = whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
os.remove(tmp_path)
return transcription
def extract_text_features(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
outputs = text_model(**inputs)
return outputs.logits.argmax(dim=1).item()
def predict
|