DeepLearning101's picture
Update app.py
a23d52e verified
raw
history blame
2.31 kB
import os
import time
import json
import gradio as gr
import torch
import torchaudio
import numpy as np
from denoiser.demucs import Demucs
from pydub import AudioSegment
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# 设置 Hugging Face Hub 的 Access Token
auth_token = os.getenv("HF_TOKEN")
# 加载私有模型
model_id = "DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser"
model = AutoModelForSequenceClassification.from_pretrained(model_id, token=auth_token)
tokenizer = AutoTokenizer.from_pretrained(model_id, token=auth_token)
def transcribe(file_upload, microphone):
file = microphone if microphone is not None else file_upload
demucs_model = Demucs(hidden=64)
state_dict = torch.load("path_to_model_checkpoint", map_location='cpu') # 请确保提供正确的模型文件路径
demucs_model.load_state_dict(state_dict)
x, sr = torchaudio.load(file)
out = demucs_model(x[None])[0]
out = out / max(out.abs().max().item(), 1)
torchaudio.save('enhanced.wav', out, sr)
enhanced = AudioSegment.from_wav('enhanced.wav') # 只有去完噪的需要降bitrate再做语音识别
enhanced.export('enhanced.wav', format="wav", bitrate="256k")
# 假设模型是用于文本分类
inputs = tokenizer("enhanced.wav", return_tensors="pt")
outputs = model(**inputs)
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
return "enhanced.wav", predictions
demo = gr.Interface(
fn=transcribe,
inputs=[
gr.Audio(type="filepath", label="语音质检麦克风实时录音"),
gr.Audio(type="filepath", label="语音质检原始音档"),
],
outputs=[
gr.Audio(type="filepath", label="Output"),
gr.Textbox(label="Model Predictions")
],
title="<p style='text-align: center'><a href='https://www.twman.org/AI' target='_blank'>语音质检噪音去除 (语音增强):Meta Denoiser</a>",
description="为了提升语音识别的效果,可以在识别前先进行噪音去除",
allow_flagging="never",
examples=[
["exampleAudio/15s_2020-03-27_sep1.wav"],
["exampleAudio/13s_2020-03-27_sep2.wav"],
["exampleAudio/30s_2020-04-23_sep1.wav"],
["exampleAudio/15s_2020-04-23_sep2.wav"],
],
)
demo.launch(debug=True)