Spaces:
Running
Running
import torch | |
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC | |
import soundfile as sf | |
# Load the processor and model | |
processor = Wav2Vec2Processor.from_pretrained("openbmb/MiniCPM-o-2_6") | |
model = Wav2Vec2ForCTC.from_pretrained("openbmb/MiniCPM-o-2_6") | |
def transcribe_audio(file_path): | |
# Load audio file | |
audio_input, sample_rate = sf.read(file_path) | |
# Preprocess the audio | |
input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values | |
# Perform inference | |
with torch.no_grad(): | |
logits = model(input_values).logits | |
# Decode the logits to text | |
predicted_ids = torch.argmax(logits, dim=-1) | |
transcription = processor.batch_decode(predicted_ids) | |
return transcription[0] | |
if __name__ == "__main__": | |
audio_file_path = "CAR0005.mp3" | |
transcription = transcribe_audio(audio_file_path) | |
print("Transcription:", transcription) |