File size: 7,477 Bytes
0b1b9ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4571fa6
0b1b9ce
 
 
 
 
 
 
45ba322
def81e4
0b1b9ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71126de
0b1b9ce
 
71126de
0b1b9ce
 
 
 
 
71126de
f6cc7ad
0b1b9ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d4aec1
 
 
 
 
 
 
 
0b1b9ce
1b1b9a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b1b9ce
0ed3a98
0b1b9ce
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
# import gradio as gr
# import torch
# from transformers import pipeline, AutoTokenizer
# from nemo.collections.asr.models import EncDecMultiTaskModel

# # load model
# canary_model = EncDecMultiTaskModel.from_pretrained('nvidia/canary-1b')

# # update dcode params
# decode_cfg = canary_model.cfg.decoding
# decode_cfg.beam.beam_size = 1
# canary_model.change_decoding_strategy(decode_cfg)

# pipe = pipeline(
#     "automatic-speech-recognition", 
#     model="nvidia/canary-1b"
# )

# # pipe = pipeline(
# #     "text-generation", 
# #     model="QuantFactory/Meta-Llama-3-8B-Instruct-GGUF", 
# #     model_kwargs={"torch_dtype": torch.bfloat16}, 
# #     device_map="auto"
# # )

# gr.Interface.from_pipeline(pipe,
#                            title="ASR",
#                            description="Using pipeline with Canary-1B",
#                            ).launch(inbrowser=True)

import gradio as gr
import json
import librosa
import os
import soundfile as sf
import tempfile
import uuid

import torch

from nemo.collections.asr.models import ASRModel
from nemo.collections.asr.parts.utils.streaming_utils import FrameBatchMultiTaskAED
from nemo.collections.asr.parts.utils.transcribe_utils import get_buffered_pred_feat_multitaskAED

SAMPLE_RATE = 16000 # Hz
MAX_AUDIO_MINUTES = 180 # wont try to transcribe if longer than this

model = ASRModel.from_pretrained("nvidia/canary-1b")
model.eval()

# make sure beam size always 1 for consistency
model.change_decoding_strategy(None)
decoding_cfg = model.cfg.decoding
decoding_cfg.beam.beam_size = 1
model.change_decoding_strategy(decoding_cfg)

# setup for buffered inference
model.cfg.preprocessor.dither = 0.0
model.cfg.preprocessor.pad_to = 0

feature_stride = model.cfg.preprocessor['window_stride']
model_stride_in_secs = feature_stride * 8 # 8 = model stride, which is 8 for FastConformer

frame_asr = FrameBatchMultiTaskAED(
	asr_model=model,
	frame_len=40.0,
	total_buffer=40.0,
	batch_size=16,
)

amp_dtype = torch.float16

def convert_audio(audio_filepath, tmpdir, utt_id):
	"""
	Convert all files to monochannel 16 kHz wav files.
	Do not convert and raise error if audio too long.
	Returns output filename and duration.
	"""

	data, sr = librosa.load(audio_filepath, sr=None, mono=True)

	duration = librosa.get_duration(y=data, sr=sr)

	if duration / 60.0 > MAX_AUDIO_MINUTES:
		raise gr.Error(
			f"This demo can transcribe up to {MAX_AUDIO_MINUTES} minutes of audio. "
			"If you wish, you may trim the audio using the Audio viewer in Step 1 "
			"(click on the scissors icon to start trimming audio)."
		)

	if sr != SAMPLE_RATE:
		data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE)

	out_filename = os.path.join(tmpdir, utt_id + '.wav')

	# save output audio
	sf.write(out_filename, data, SAMPLE_RATE)

	return out_filename, duration


def transcribe(audio_filepath, src_lang, tgt_lang, pnc):

	if audio_filepath is None:
		raise gr.Error("Please provide some input audio: either upload an audio file or use the microphone")
	
	utt_id = uuid.uuid4()
	with tempfile.TemporaryDirectory() as tmpdir:
		converted_audio_filepath, duration = convert_audio(audio_filepath, tmpdir, str(utt_id))

		# map src_lang and tgt_lang from long versions to short
		LANG_LONG_TO_LANG_SHORT = {
			"English": "en",
			"Spanish": "es",
			"French": "fr",
			"German": "de",
		}
		if src_lang not in LANG_LONG_TO_LANG_SHORT.keys():
			raise ValueError(f"src_lang must be one of {LANG_LONG_TO_LANG_SHORT.keys()}")
		else:
			src_lang = LANG_LONG_TO_LANG_SHORT[src_lang]
		
		if tgt_lang not in LANG_LONG_TO_LANG_SHORT.keys():
			raise ValueError(f"tgt_lang must be one of {LANG_LONG_TO_LANG_SHORT.keys()}")
		else:
			tgt_lang = LANG_LONG_TO_LANG_SHORT[tgt_lang]
		

		# infer taskname from src_lang and tgt_lang
		if src_lang == tgt_lang:
			taskname = "asr"
		else:
			taskname = "s2t_translation"

		# update pnc variable to be "yes" or "no"
		pnc = "yes" if pnc else "no"

		# make manifest file and save
		manifest_data = {
			"audio_filepath": converted_audio_filepath,
			"source_lang": src_lang,
			"target_lang": tgt_lang,
			"taskname": taskname,
			"pnc": pnc,
			"answer": "predict",
			"duration": str(duration),
		}

		manifest_filepath = os.path.join(tmpdir, f'{utt_id}.json')

		with open(manifest_filepath, 'w') as fout:
			line = json.dumps(manifest_data)
			fout.write(line + '\n')

		# call transcribe, passing in manifest filepath
		if duration < 40:
			output_text = model.transcribe(manifest_filepath)[0]
		else: # do buffered inference
			with torch.cuda.amp.autocast(dtype=amp_dtype): # TODO: make it work if no cuda
				with torch.no_grad():
					hyps = get_buffered_pred_feat_multitaskAED(
						frame_asr,
						model.cfg.preprocessor,
						model_stride_in_secs,
						model.device,
						manifest=manifest_filepath,
						filepaths=None,
					)

					output_text = hyps[0].text

	return output_text

with gr.Blocks(
	title="NeMo Canary Model",
	css="""
		textarea { font-size: 18px;}
		#model_output_text_box span {
			font-size: 18px;
			font-weight: bold;
		}
	""",
	theme=gr.themes.Default(text_size=gr.themes.sizes.text_lg) # make text slightly bigger (default is text_md )
) as demo:

	gr.HTML("<h1 style='text-align: center'>NeMo Canary model: Transcribe & Translate audio</h1>")

	with gr.Row():
		with gr.Column():
			gr.HTML(
				"<p><b>Step 1:</b> Upload an audio file or record with your microphone.</p>"

				"<p style='color: #A0A0A0;'>This demo supports audio files up to 10 mins long. "
				"You can transcribe longer files locally with this NeMo "
				"<a href='https://github.com/NVIDIA/NeMo/blob/main/examples/asr/speech_multitask/speech_to_text_aed_chunked_infer.py'>script</a>.</p>"
			)

			audio_file = gr.Audio(sources=["microphone", "upload"], type="filepath")

			gr.HTML("<p><b>Step 2:</b> Choose the input and output language.</p>")

			src_lang = gr.Dropdown(
				choices=["English", "Spanish", "French", "German"],
				value="English",
				label="Input audio is spoken in:"
			)

			with gr.Column():
				tgt_lang = gr.Dropdown(
					choices=["English", "Spanish", "French", "German"],
					value="English",
					label="Transcribe in language:"
				)
				pnc = gr.Checkbox(
					value=True,
					label="Punctuation & Capitalization in transcript?",
				)

		with gr.Column():

			gr.HTML("<p><b>Step 3:</b> Run the model.</p>")

			go_button = gr.Button(
				value="Run model",
				variant="primary", # make "primary" so it stands out (default is "secondary")
			)

			model_output_text_box = gr.Textbox(
				label="Model Output",
				elem_id="model_output_text_box",
			)

        with gr.Row():

    		gr.HTML(
    			"<p style='text-align: center'>"
    				"🐀 <a href='https://huggingface.co/nvidia/canary-1b' target='_blank'>Canary model</a> | "
    				"πŸ§‘β€πŸ’» <a href='https://github.com/NVIDIA/NeMo' target='_blank'>NeMo Repository</a>"
    			"</p>"
    		)

    	go_button.click(
    		fn=transcribe, 
    		inputs = [audio_file, src_lang, tgt_lang, pnc],
    		outputs = [model_output_text_box]
    	)
    
    	# call on_src_or_tgt_lang_change whenever src_lang or tgt_lang dropdown menus are changed	
    	src_lang.change(
    		fn=on_src_or_tgt_lang_change,
    		inputs=[src_lang, tgt_lang, pnc],
    		outputs=[src_lang, tgt_lang, pnc],
    	)
    	tgt_lang.change(
    		fn=on_src_or_tgt_lang_change,
    		inputs=[src_lang, tgt_lang, pnc],
    		outputs=[src_lang, tgt_lang, pnc],
    	)


demo.queue()
demo.launch(share=True)