Spaces:
Runtime error
Runtime error
File size: 1,739 Bytes
7a5ebea b2df314 7a5ebea 1fb0d02 7a5ebea 7c44de0 98af999 7c44de0 b2df314 7a5ebea b2df314 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import gradio as gr
import torch
import time
import librosa
import soundfile
import nemo.collections.asr as nemo_asr
import tempfile
import os
import uuid
SAMPLE_RATE = 16000
model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained("stt_en_conformer_transducer_large")
model.change_decoding_strategy(None)
model.eval()
def process_audio_file(file):
data, sr = librosa.load(file)
if sr != SAMPLE_RATE:
data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE)
# monochannel
data = librosa.to_mono(data)
return data
def transcribe(Audio, state=""):
# Grant additional context
time.sleep(1)
if state is None:
state = ""
audio_data = process_audio_file(Audio)
with tempfile.TemporaryDirectory() as tmpdir:
audio_path = os.path.join(tmpdir, f'audio_{uuid.uuid4()}.wav')
soundfile.write(audio_path, audio_data, SAMPLE_RATE)
transcriptions = model.transcribe([audio_path])
# if transcriptions form a tuple (from RNNT), extract just "best" hypothesis
if type(transcriptions) == tuple and len(transcriptions) == 2:
transcriptions = transcriptions[0]
transcriptions = transcriptions[0]
state = state + transcriptions + " "
return state, state
iface = gr.Interface(
fn=transcribe,
inputs=[
gr.inputs.Audio(source="microphone", type='filepath'),
"state",
],
outputs=[
"textbox",
"state",
],
layout="horizontal",
theme="huggingface",
title="NeMo Streaming Conformer Transducer Large - English",
description="Demo for English speech recognition using Conformer Transducers",
allow_flagging='never',
live=True,
)
iface.launch()
|