DeepLearning101's picture
Update app.py
00885ea verified
raw
history blame
2.31 kB
import os
import time
import json
import gradio as gr
import torch
import torchaudio
import numpy as np
from denoiser.demucs import Demucs
from pydub import AudioSegment
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# 設置 Hugging Face Hub 的 Access Token
auth_token = os.getenv("HF_TOKEN")
# 加載私有模型
model_id = "DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser"
model = AutoModelForSequenceClassification.from_pretrained(model_id, token=auth_token)
tokenizer = AutoTokenizer.from_pretrained(model_id, token=auth_token)
def transcribe(file_upload, microphone):
file = microphone if microphone is not None else file_upload
demucs_model = Demucs(hidden=64)
state_dict = torch.load("path_to_model_checkpoint", map_location='cpu') # 請確保提供正確的模型文件路徑
demucs_model.load_state_dict(state_dict)
x, sr = torchaudio.load(file)
out = demucs_model(x[None])[0]
out = out / max(out.abs().max().item(), 1)
torchaudio.save('enhanced.wav', out, sr)
enhanced = AudioSegment.from_wav('enhanced.wav') # 只有去完噪的需要降bitrate再做語音識別
enhanced.export('enhanced.wav', format="wav", bitrate="256k")
# 假設模型是用於文本分類
inputs = tokenizer("enhanced.wav", return_tensors="pt")
outputs = model(**inputs)
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
return "enhanced.wav", predictions
demo = gr.Interface(
fn=transcribe,
inputs=[
gr.Audio(type="filepath", label="語音質檢麥克風實時錄音"),
gr.Audio(type="filepath", label="語音質檢原始音檔"),
],
outputs=[
gr.Audio(type="filepath", label="Output"),
gr.Textbox(label="Model Predictions")
],
title="<p style='text-align: center'><a href='https://www.twman.org/AI' target='_blank'>語音質檢噪音去除 (語音增強):Meta Denoiser</a>",
description="為了提升語音識別的效果,可以在識別前先進行噪音去除",
allow_flagging="never",
examples=[
["exampleAudio/15s_2020-03-27_sep1.wav"],
["exampleAudio/13s_2020-03-27_sep2.wav"],
["exampleAudio/30s_2020-04-23_sep1.wav"],
["exampleAudio/15s_2020-04-23_sep2.wav"],
],
)
demo.launch(debug=True)