import nltk import librosa import torch import gradio as gr from pyctcdecode import build_ctcdecoder from transformers import Wav2Vec2Processor, AutoModelForCTC nltk.download("punkt") wav2vec2_processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") wav2vec2_model = AutoModelForCTC.from_pretrained("facebook/wav2vec2-base-960h") hubert_processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft") hubert_model = AutoModelForCTC.from_pretrained("facebook/hubert-large-ls960-ft") def return_processor_and_model(model_name): if model_name == "facebook/wav2vec2-base-960h": return wav2vec2_processor, wav2vec2_model elif model_name == "facebook/hubert-large-ls960-ft": return hubert_processor, hubert_model else: return None def load_and_fix_data(input_file): #read the file speech, sample_rate = librosa.load(input_file) #make it 1D if len(speech.shape) > 1: speech = speech[:,0] + speech[:,1] #resampling to 16KHz if sample_rate !=16000: speech = librosa.resample(speech, sample_rate,16000) return speech def fix_transcription_casing(input_sentence): sentences = nltk.sent_tokenize(input_sentence) return (' '.join([s.replace(s[0],s[0].capitalize(),1) for s in sentences])) def predict_and_ctc_decode(input_file, model_name): processor, model = return_processor_and_model(model_name) speech = load_and_fix_data(input_file) input_values = processor(speech, return_tensors="pt", sampling_rate=16000).input_values logits = model(input_values).logits.cpu().detach().numpy()[0] vocab_list = list(processor.tokenizer.get_vocab().keys()) decoder = build_ctcdecoder(vocab_list) pred = decoder.decode(logits) transcribed_text = fix_transcription_casing(pred.lower()) return transcribed_text def predict_and_greedy_decode(input_file, model_name): processor, model = return_processor_and_model(model_name) speech = load_and_fix_data(input_file) input_values = processor(speech, return_tensors="pt", sampling_rate=16000).input_values logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) pred = processor.batch_decode(predicted_ids) transcribed_text = fix_transcription_casing(pred[0].lower()) return transcribed_text def return_all_predictions(input_file, model_name): return predict_and_ctc_decode(input_file, model_name), predict_and_greedy_decode(input_file, model_name) gr.Interface(return_all_predictions, inputs = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown(["facebook/wav2vec2-base-960h", "facebook/hubert-large-ls960-ft"], label="Model Name")], outputs = [gr.outputs.Textbox(label="Beam CTC decoding"), gr.outputs.Textbox(label="Greedy decoding")], title="ASR using Wav2Vec2/ Hubert & pyctcdecode", description = "Comparing greedy decoder with beam search CTC decoder (https://distill.pub/2017/ctc/), record/ drop your audio!", layout = "horizontal", examples = [["test1.wav", "facebook/wav2vec2-base-960h"], ["test2.wav", "facebook/hubert-large-ls960-ft"]], theme="huggingface").launch()