anzorq commited on
Commit
6f0ed3e
·
verified ·
1 Parent(s): 3012ae0

Add audio quality improvements for youtube audios.

Browse files
Files changed (1) hide show
  1. app.py +41 -11
app.py CHANGED
@@ -7,6 +7,10 @@ from transformers import AutoModelForCTC, Wav2Vec2BertProcessor
7
  from pytube import YouTube
8
  from transformers import pipeline
9
  import re
 
 
 
 
10
 
11
  # pipe = pipeline(model="anzorq/w2v-bert-2.0-kbd", device=0) # old model
12
  pipe = pipeline(model="anzorq/w2v-bert-2.0-kbd-v2", device=0) # new model with a new tokenizer
@@ -24,25 +28,50 @@ reverse_pattern = re.compile('|'.join(re.escape(key) for key in reverse_replacem
24
  def replace_symbols_back(text):
25
  return reverse_pattern.sub(lambda match: reverse_replacements[match.group(0)], text)
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  @spaces.GPU
28
  def transcribe_speech(audio, progress=gr.Progress()):
29
  if audio is None: # Handle the NoneType error for microphone input
30
  return "No audio received."
31
-
32
  progress(0.5, desc="Transcribing audio...")
33
  transcription = pipe(audio, chunk_length_s=10)['text']
34
-
35
  return replace_symbols_back(transcription)
36
 
37
- def transcribe_from_youtube(url, progress=gr.Progress()):
38
  progress(0, "Downloading YouTube audio...")
39
- # Download audio from YouTube using pytube
40
  audio_path = YouTube(url).streams.filter(only_audio=True)[0].download(filename="tmp.mp4")
41
 
 
 
 
 
 
 
 
 
 
 
 
42
  transcription = transcribe_speech(audio_path)
43
 
44
  os.remove(audio_path)
45
-
46
  return transcription
47
 
48
  def populate_metadata(url):
@@ -62,27 +91,28 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
62
  </div>
63
  """
64
  )
65
-
66
  with gr.Tab("Microphone Input"):
67
  gr.Markdown("## Transcribe speech from microphone")
68
  mic_audio = gr.Audio(sources=['microphone','upload'], type="filepath", label="Record or upload an audio")
69
  transcribe_button = gr.Button("Transcribe")
70
  transcription_output = gr.Textbox(label="Transcription")
71
-
72
  transcribe_button.click(fn=transcribe_speech, inputs=mic_audio, outputs=transcription_output)
73
 
74
  with gr.Tab("YouTube URL"):
75
  gr.Markdown("## Transcribe speech from YouTube video")
76
  youtube_url = gr.Textbox(label="Enter YouTube video URL")
77
-
 
78
  with gr.Row():
79
  img = gr.Image(label="Thumbnail", height=240, width=240, scale=1)
80
  title = gr.Label(label="Video Title", scale=2)
81
-
82
  transcribe_button = gr.Button("Transcribe")
83
  transcription_output = gr.Textbox(label="Transcription", placeholder="Transcription Output", lines=10)
84
-
85
- transcribe_button.click(fn=transcribe_from_youtube, inputs=youtube_url, outputs=transcription_output)
86
  youtube_url.change(populate_metadata, inputs=[youtube_url], outputs=[img, title])
87
 
88
  demo.launch()
 
7
  from pytube import YouTube
8
  from transformers import pipeline
9
  import re
10
+ from pydub import AudioSegment
11
+ from scipy.io import wavfile
12
+ from scipy.signal import wiener
13
+ import numpy as np
14
 
15
  # pipe = pipeline(model="anzorq/w2v-bert-2.0-kbd", device=0) # old model
16
  pipe = pipeline(model="anzorq/w2v-bert-2.0-kbd-v2", device=0) # new model with a new tokenizer
 
28
  def replace_symbols_back(text):
29
  return reverse_pattern.sub(lambda match: reverse_replacements[match.group(0)], text)
30
 
31
+ def normalize_audio(audio_path):
32
+ audio = AudioSegment.from_file(audio_path, format="mp4")
33
+ normalized_audio = audio.normalize()
34
+ normalized_audio.export(audio_path, format="mp4")
35
+
36
+ def apply_wiener_filter(audio_path):
37
+ sample_rate, audio_data = wavfile.read(audio_path)
38
+ filtered_audio = wiener(audio_data)
39
+ wavfile.write(audio_path, sample_rate, filtered_audio.astype(np.int16))
40
+
41
+ def resample_audio(audio_path, target_sample_rate=16000):
42
+ audio, sample_rate = torchaudio.load(audio_path)
43
+ resampled_audio = torchaudio.transforms.Resample(sample_rate, target_sample_rate)(audio)
44
+ torchaudio.save(audio_path, resampled_audio, target_sample_rate)
45
+
46
  @spaces.GPU
47
  def transcribe_speech(audio, progress=gr.Progress()):
48
  if audio is None: # Handle the NoneType error for microphone input
49
  return "No audio received."
50
+
51
  progress(0.5, desc="Transcribing audio...")
52
  transcription = pipe(audio, chunk_length_s=10)['text']
53
+
54
  return replace_symbols_back(transcription)
55
 
56
+ def transcribe_from_youtube(url, apply_improvements, progress=gr.Progress()):
57
  progress(0, "Downloading YouTube audio...")
 
58
  audio_path = YouTube(url).streams.filter(only_audio=True)[0].download(filename="tmp.mp4")
59
 
60
+ if apply_improvements:
61
+ progress(0.2, "Normalizing audio...")
62
+ normalize_audio(audio_path)
63
+
64
+ progress(0.4, "Applying Wiener filter...")
65
+ apply_wiener_filter(audio_path)
66
+
67
+ progress(0.6, "Resampling audio...")
68
+ resample_audio(audio_path)
69
+
70
+ progress(0.8, "Transcribing audio...")
71
  transcription = transcribe_speech(audio_path)
72
 
73
  os.remove(audio_path)
74
+
75
  return transcription
76
 
77
  def populate_metadata(url):
 
91
  </div>
92
  """
93
  )
94
+
95
  with gr.Tab("Microphone Input"):
96
  gr.Markdown("## Transcribe speech from microphone")
97
  mic_audio = gr.Audio(sources=['microphone','upload'], type="filepath", label="Record or upload an audio")
98
  transcribe_button = gr.Button("Transcribe")
99
  transcription_output = gr.Textbox(label="Transcription")
100
+
101
  transcribe_button.click(fn=transcribe_speech, inputs=mic_audio, outputs=transcription_output)
102
 
103
  with gr.Tab("YouTube URL"):
104
  gr.Markdown("## Transcribe speech from YouTube video")
105
  youtube_url = gr.Textbox(label="Enter YouTube video URL")
106
+ apply_improvements = gr.Checkbox(label="Apply Audio Improvements", value=True)
107
+
108
  with gr.Row():
109
  img = gr.Image(label="Thumbnail", height=240, width=240, scale=1)
110
  title = gr.Label(label="Video Title", scale=2)
111
+
112
  transcribe_button = gr.Button("Transcribe")
113
  transcription_output = gr.Textbox(label="Transcription", placeholder="Transcription Output", lines=10)
114
+
115
+ transcribe_button.click(fn=transcribe_from_youtube, inputs=[youtube_url, apply_improvements], outputs=transcription_output)
116
  youtube_url.change(populate_metadata, inputs=[youtube_url], outputs=[img, title])
117
 
118
  demo.launch()