Spaces:
Runtime error
Runtime error
File size: 1,773 Bytes
ebf88d6 3a70449 ebf88d6 3a70449 ebf88d6 3a70449 ebf88d6 3a70449 ebf88d6 3a70449 ebf88d6 3a70449 ebf88d6 3a70449 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import torch
from pydub import AudioSegment
import soundfile as sf
import os
def preprocess_audio(input_audio_path, output_audio_path):
"""
Converts audio to 16kHz WAV format.
"""
audio = AudioSegment.from_file(input_audio_path)
audio = audio.set_frame_rate(16000).set_channels(1)
audio.export(output_audio_path, format="wav")
return output_audio_path
def split_audio(audio_path, chunk_length_ms=30000):
"""
Splits audio into chunks of specified length.
"""
audio = AudioSegment.from_file(audio_path)
return [audio[i : i + chunk_length_ms] for i in range(0, len(audio), chunk_length_ms)]
def transcribe_chunk(audio_chunk, chunk_index, whisper_models):
"""
Transcribes a single audio chunk using the pre-loaded Whisper model.
"""
temp_path = f"temp_chunk_{chunk_index}.wav"
audio_chunk.export(temp_path, format="wav")
audio, _ = sf.read(temp_path)
inputs = whisper_models["processor"](audio, sampling_rate=16000, return_tensors="pt")
input_features = inputs.input_features.to(whisper_models["device"])
predicted_ids = whisper_models["model"].generate(input_features)
transcription = whisper_models["processor"].batch_decode(predicted_ids, skip_special_tokens=True)[0]
os.remove(temp_path)
return transcription
def speech_to_text_long(audio_path, whisper_models):
"""
Transcribes a long audio file by splitting it into chunks.
"""
processed_audio_path = "processed_audio.wav"
preprocess_audio(audio_path, processed_audio_path)
chunks = split_audio(processed_audio_path)
transcriptions = [transcribe_chunk(chunk, idx, whisper_models) for idx, chunk in enumerate(chunks)]
os.remove(processed_audio_path)
return " ".join(transcriptions)
|