roychao19477 commited on
Commit
a5096b9
·
1 Parent(s): 8c03ebe

Test on lengths

Browse files
Files changed (1) hide show
  1. app.py +0 -121
app.py CHANGED
@@ -237,124 +237,3 @@ iface = gr.Interface(
237
  iface.launch()
238
 
239
 
240
-
241
- ckpt = "ckpts/SEMamba_advanced.pth"
242
- cfg_f = "recipes/SEMamba_advanced.yaml"
243
-
244
- # load config
245
- with open(cfg_f, 'r') as f:
246
- cfg = yaml.safe_load(f)
247
-
248
-
249
- # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
250
- device = "cuda"
251
- model = SEMamba(cfg).to(device)
252
- #sdict = torch.load(ckpt, map_location=device)
253
- #model.load_state_dict(sdict["generator"])
254
- #model.eval()
255
-
256
- @spaces.GPU
257
- def enhance(filepath, model_name):
258
- # Load model based on selection
259
- ckpt_path = {
260
- "VCTK-Demand": "ckpts/SEMamba_advanced.pth",
261
- "VCTK+DNS": "ckpts/vd.pth"
262
- }[model_name]
263
-
264
- print("Loading:", ckpt_path)
265
- model.load_state_dict(torch.load(ckpt_path, map_location=device)["generator"])
266
- model.eval()
267
- with torch.no_grad():
268
- # load & resample
269
- wav, orig_sr = librosa.load(filepath, sr=None)
270
- noisy_wav = wav.copy()
271
- if orig_sr != 16000:
272
- wav = librosa.resample(wav, orig_sr=orig_sr, target_sr=16000)
273
- x = torch.from_numpy(wav).float().to(device)
274
- norm = torch.sqrt(len(x)/torch.sum(x**2))
275
- #x = (x * norm).unsqueeze(0)
276
- x = (x * norm)
277
-
278
- # split into 4s segments (64000 samples)
279
- segment_len = 4 * 16000
280
- chunks = x.split(segment_len)
281
- enhanced_chunks = []
282
-
283
- for chunk in chunks:
284
- if len(chunk) < segment_len:
285
- #pad = torch.zeros(segment_len - len(chunk), device=chunk.device)
286
- pad = (torch.randn(segment_len - len(chunk), device=chunk.device) * 1e-4)
287
- chunk = torch.cat([chunk, pad])
288
- chunk = chunk.unsqueeze(0)
289
-
290
- amp, pha, _ = mag_phase_stft(chunk, 400, 100, 400, 0.3)
291
- amp2, pha2, _ = model(amp, pha)
292
- out = mag_phase_istft(amp2, pha2, 400, 100, 400, 0.3)
293
- out = (out / norm).squeeze(0)
294
- enhanced_chunks.append(out)
295
-
296
- out = torch.cat(enhanced_chunks)[:len(x)].cpu().numpy() # trim padding
297
-
298
- # back to original rate
299
- if orig_sr != 16000:
300
- out = librosa.resample(out, orig_sr=16000, target_sr=orig_sr)
301
-
302
- # Normalize
303
- peak = np.max(np.abs(out))
304
- if peak > 0.05:
305
- out = out / peak * 0.85
306
-
307
- # write file
308
- sf.write("enhanced.wav", out, orig_sr)
309
-
310
- # spectrograms
311
- fig, axs = plt.subplots(1, 2, figsize=(16, 4))
312
-
313
- # noisy
314
- D_noisy = librosa.stft(noisy_wav, n_fft=512, hop_length=256)
315
- S_noisy = librosa.amplitude_to_db(np.abs(D_noisy), ref=np.max)
316
- librosa.display.specshow(S_noisy, sr=orig_sr, hop_length=256, x_axis="time", y_axis="hz", ax=axs[0], vmax=0)
317
- axs[0].set_title("Noisy Spectrogram")
318
-
319
- # enhanced
320
- D_clean = librosa.stft(out, n_fft=512, hop_length=256)
321
- S_clean = librosa.amplitude_to_db(np.abs(D_clean), ref=np.max)
322
- librosa.display.specshow(S_clean, sr=orig_sr, hop_length=256, x_axis="time", y_axis="hz", ax=axs[1], vmax=0)
323
- #librosa.display.specshow(S_clean, sr=16000, hop_length=512, x_axis="time", y_axis="hz", ax=axs[1], vmax=0)
324
- axs[1].set_title("Enhanced Spectrogram")
325
-
326
- plt.tight_layout()
327
-
328
- return "enhanced.wav", fig
329
-
330
- #with gr.Blocks() as demo:
331
- # gr.Markdown(ABOUT)
332
- # input_audio = gr.Audio(label="Input Audio", type="filepath", interactive=True)
333
- # enhance_btn = gr.Button("Enhance")
334
- # output_audio = gr.Audio(label="Enhanced Audio", type="filepath")
335
- # plot_output = gr.Plot(label="Spectrograms")
336
- #
337
- # enhance_btn.click(fn=enhance, inputs=input_audio, outputs=[output_audio, plot_output])
338
- #
339
- #demo.queue().launch()
340
-
341
- with gr.Blocks() as demo:
342
- gr.Markdown(ABOUT)
343
- input_audio = gr.Audio(label="Input Audio", type="filepath", interactive=True)
344
- model_choice = gr.Radio(
345
- label="Choose Model (The use of VCTK+DNS is recommended)",
346
- choices=["VCTK-Demand", "VCTK+DNS"],
347
- value="VCTK-Demand"
348
- )
349
- enhance_btn = gr.Button("Enhance")
350
- output_audio = gr.Audio(label="Enhanced Audio", type="filepath")
351
- plot_output = gr.Plot(label="Spectrograms")
352
-
353
- enhance_btn.click(
354
- fn=enhance,
355
- inputs=[input_audio, model_choice],
356
- outputs=[output_audio, plot_output]
357
- )
358
- gr.Markdown("**Note**: The current models are trained on 16kHz audio. Therefore, any input audio not sampled at 16kHz will be automatically resampled before enhancement.")
359
-
360
- demo.queue().launch()
 
237
  iface.launch()
238
 
239