asr-pyctcdecode / app.py
Vaibhav Srivastav
adding pyctcdecode code
0448aa2
raw
history blame
1.9 kB
import nltk
import librosa
import torch
import gradio as gr
from pyctcdecode import build_ctcdecoder
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
nltk.download("punkt")
#Loading the model and the tokenizer
model_name = "facebook/wav2vec2-base-960h"
processor = Wav2Vec2Processor.from_pretrained(model_name)
model = Wav2Vec2ForCTC.from_pretrained(model_name)
def load_data(input_file):
#read the file
speech, sample_rate = librosa.load(input_file)
#make it 1-D
if len(speech.shape) > 1:
speech = speech[:,0] + speech[:,1]
#resampling to 16KHz
if sample_rate !=16000:
speech = librosa.resample(speech, sample_rate,16000)
return speech
def fix_transcription_casing(input_sentence):
sentences = nltk.sent_tokenize(input_sentence)
return (' '.join([s.replace(s[0],s[0].capitalize(),1) for s in sentences]))
def predict_and_decode(input_file):
speech = load_data(input_file)
#tokenize
input_values = processor(speech, return_tensors="pt", sampling_rate=16000).input_values
logits = model(input_values).logits
vocab_list = list(processor.tokenizer.get_vocab().keys())
# #Take argmax
# predicted_ids = torch.argmax(logits, dim=-1)
# #Get the words from predicted word ids
# transcription = tokenizer.decode(predicted_ids[0])
decoder = build_ctcdecoder(vocab_list)
pred = decoder.decode(logits)
#Output is all upper case
transcribed_text = fix_transcription_casing(pred.lower())
return transcribed_text
gr.Interface(predict_and_decode,
inputs = gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker"),
outputs = gr.outputs.Textbox(label="Output Text"),
title="ASR using Wav2Vec 2.0 & pyctcdecode",
description = "Wav2Vec2 in-action",
layout = "horizontal",
examples = [["test.wav"]], theme="huggingface").launch()