Ahsen Khaliq commited on
Commit
a5805fe
·
1 Parent(s): a2676df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -16
app.py CHANGED
@@ -1,27 +1,18 @@
1
- import os
2
- os.system('pip freeze')
3
- os.system('pip install transformers --upgrade')
4
- os.system('pip freeze')
5
  import soundfile as sf
6
  import gradio as gr
7
  import torch
8
- from transformers import Speech2Text2Processor, SpeechEncoderDecoder
 
 
9
 
10
- model = SpeechEncoderDecoder.from_pretrained("facebook/s2t-wav2vec2-large-en-de")
11
- processor = Speech2Text2Processor.from_pretrained("facebook/s2t-wav2vec2-large-en-de")
12
- def map_to_array(file):
13
- speech, _ = sf.read(file)
14
- return speech
15
 
16
  def inference(audio):
17
- inputs = processor(map_to_array(audio.name), sampling_rate=16_000, return_tensors="pt")
18
- generated_ids = model.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask"])
19
- transcription = processor.batch_decode(generated_ids)
20
- return transcription[0]
21
  inputs = gr.inputs.Audio(label="Input Audio", type="file")
22
  outputs = gr.outputs.Textbox(label="Output Text")
23
  title = "Robust wav2vec 2.0"
24
  description = "Gradio demo for Robust wav2vec 2.0. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below. Currently supports .wav and .flac files"
25
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.01027' target='_blank'>Robust wav2vec 2.0: Analyzing Domain Shift in Self-Supervised Pre-Training</a> | <a href='https://github.com/pytorch/fairseq' target='_blank'>Github Repo</a></p>"
26
- examples=[['poem.wav']]
27
- gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
 
 
 
 
 
1
  import soundfile as sf
2
  import gradio as gr
3
  import torch
4
+ from transformers import pipeline
5
+
6
+ asr = pipeline("automatic-speech-recognition", model="facebook/s2t-wav2vec2-large-en-de", feature_extractor="facebook/s2t-wav2vec2-large-en-de")
7
 
 
 
 
 
 
8
 
9
  def inference(audio):
10
+ translation_de = asr(audio.name)
11
+ return translation_de[0]
12
+
 
13
  inputs = gr.inputs.Audio(label="Input Audio", type="file")
14
  outputs = gr.outputs.Textbox(label="Output Text")
15
  title = "Robust wav2vec 2.0"
16
  description = "Gradio demo for Robust wav2vec 2.0. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below. Currently supports .wav and .flac files"
17
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.01027' target='_blank'>Robust wav2vec 2.0: Analyzing Domain Shift in Self-Supervised Pre-Training</a> | <a href='https://github.com/pytorch/fairseq' target='_blank'>Github Repo</a></p>"
18
+ gr.Interface(inference, inputs, outputs, title=title, description=description, article=article).launch()