import torch from pydub import AudioSegment import soundfile as sf import os def preprocess_audio(input_audio_path, output_audio_path): """ Converts audio to 16kHz WAV format. """ audio = AudioSegment.from_file(input_audio_path) audio = audio.set_frame_rate(16000).set_channels(1) audio.export(output_audio_path, format="wav") return output_audio_path def split_audio(audio_path, chunk_length_ms=30000): """ Splits audio into chunks of specified length. """ audio = AudioSegment.from_file(audio_path) return [audio[i : i + chunk_length_ms] for i in range(0, len(audio), chunk_length_ms)] def transcribe_chunk(audio_chunk, chunk_index, whisper_models): """ Transcribes a single audio chunk using the pre-loaded Whisper model. """ temp_path = f"temp_chunk_{chunk_index}.wav" audio_chunk.export(temp_path, format="wav") audio, _ = sf.read(temp_path) inputs = whisper_models["processor"](audio, sampling_rate=16000, return_tensors="pt") input_features = inputs.input_features.to(whisper_models["device"]) predicted_ids = whisper_models["model"].generate(input_features) transcription = whisper_models["processor"].batch_decode(predicted_ids, skip_special_tokens=True)[0] os.remove(temp_path) return transcription def speech_to_text_long(audio_path, whisper_models): """ Transcribes a long audio file by splitting it into chunks. """ processed_audio_path = "processed_audio.wav" preprocess_audio(audio_path, processed_audio_path) chunks = split_audio(processed_audio_path) transcriptions = [transcribe_chunk(chunk, idx, whisper_models) for idx, chunk in enumerate(chunks)] os.remove(processed_audio_path) return " ".join(transcriptions)