Update app.py
Browse files
app.py
CHANGED
@@ -1,38 +1,35 @@
|
|
1 |
import os
|
2 |
import time
|
3 |
import json
|
4 |
-
import random
|
5 |
import gradio as gr
|
6 |
import torch
|
7 |
import torchaudio
|
8 |
import numpy as np
|
9 |
-
from scipy.io import wavfile
|
10 |
-
import scipy.signal as sps
|
11 |
from denoiser.demucs import Demucs
|
12 |
from pydub import AudioSegment
|
13 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
14 |
|
15 |
-
#
|
16 |
-
auth_token = os.getenv("
|
17 |
|
18 |
-
#
|
19 |
model_id = "DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser"
|
20 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
21 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id,
|
22 |
|
23 |
def transcribe(file_upload, microphone):
|
24 |
file = microphone if microphone is not None else file_upload
|
25 |
demucs_model = Demucs(hidden=64)
|
26 |
-
state_dict = torch.load(
|
27 |
demucs_model.load_state_dict(state_dict)
|
28 |
x, sr = torchaudio.load(file)
|
29 |
out = demucs_model(x[None])[0]
|
30 |
out = out / max(out.abs().max().item(), 1)
|
31 |
torchaudio.save('enhanced.wav', out, sr)
|
32 |
-
enhanced = AudioSegment.from_wav('enhanced.wav')
|
33 |
enhanced.export('enhanced.wav', format="wav", bitrate="256k")
|
34 |
|
35 |
-
#
|
36 |
inputs = tokenizer(enhanced, return_tensors="pt")
|
37 |
outputs = model(**inputs)
|
38 |
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
@@ -42,17 +39,15 @@ def transcribe(file_upload, microphone):
|
|
42 |
demo = gr.Interface(
|
43 |
fn=transcribe,
|
44 |
inputs=[
|
45 |
-
gr.Audio(type="filepath", label="
|
46 |
-
gr.Audio(type="filepath", label="
|
47 |
],
|
48 |
outputs=[
|
49 |
gr.Audio(type="filepath", label="Output"),
|
50 |
gr.Textbox(label="Model Predictions")
|
51 |
],
|
52 |
-
title="<p style='text-align: center'><a href='https://www.twman.org/AI' target='_blank'
|
53 |
-
description=
|
54 |
-
"為了提升語音識別的效果,可以在識別前先進行噪音去除"
|
55 |
-
),
|
56 |
allow_flagging="never",
|
57 |
examples=[
|
58 |
["exampleAudio/15s_2020-03-27_sep1.wav"],
|
@@ -62,4 +57,4 @@ demo = gr.Interface(
|
|
62 |
],
|
63 |
)
|
64 |
|
65 |
-
demo.launch(debug=True)
|
|
|
1 |
import os
|
2 |
import time
|
3 |
import json
|
|
|
4 |
import gradio as gr
|
5 |
import torch
|
6 |
import torchaudio
|
7 |
import numpy as np
|
|
|
|
|
8 |
from denoiser.demucs import Demucs
|
9 |
from pydub import AudioSegment
|
10 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
11 |
|
12 |
+
# 设置 Hugging Face Hub 的 Access Token
|
13 |
+
auth_token = os.getenv("HF_TOKEN")
|
14 |
|
15 |
+
# 加载私有模型
|
16 |
model_id = "DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser"
|
17 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, use_auth_token=auth_token)
|
18 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=auth_token)
|
19 |
|
20 |
def transcribe(file_upload, microphone):
|
21 |
file = microphone if microphone is not None else file_upload
|
22 |
demucs_model = Demucs(hidden=64)
|
23 |
+
state_dict = torch.load("path_to_model_checkpoint", map_location='cpu') # 请确保提供正确的模型文件路径
|
24 |
demucs_model.load_state_dict(state_dict)
|
25 |
x, sr = torchaudio.load(file)
|
26 |
out = demucs_model(x[None])[0]
|
27 |
out = out / max(out.abs().max().item(), 1)
|
28 |
torchaudio.save('enhanced.wav', out, sr)
|
29 |
+
enhanced = AudioSegment.from_wav('enhanced.wav') # 只有去完噪的需要降bitrate再做语音识别
|
30 |
enhanced.export('enhanced.wav', format="wav", bitrate="256k")
|
31 |
|
32 |
+
# 假设模型是用于文本分类
|
33 |
inputs = tokenizer(enhanced, return_tensors="pt")
|
34 |
outputs = model(**inputs)
|
35 |
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
|
|
39 |
demo = gr.Interface(
|
40 |
fn=transcribe,
|
41 |
inputs=[
|
42 |
+
gr.Audio(type="filepath", label="语音质检麦克风实时录音"),
|
43 |
+
gr.Audio(type="filepath", label="语音质检原始音档"),
|
44 |
],
|
45 |
outputs=[
|
46 |
gr.Audio(type="filepath", label="Output"),
|
47 |
gr.Textbox(label="Model Predictions")
|
48 |
],
|
49 |
+
title="<p style='text-align: center'><a href='https://www.twman.org/AI' target='_blank'>语音质检噪音去除 (语音增强):Meta Denoiser</a>",
|
50 |
+
description="为了提升语音识别的效果,可以在识别前先进行噪音去除",
|
|
|
|
|
51 |
allow_flagging="never",
|
52 |
examples=[
|
53 |
["exampleAudio/15s_2020-03-27_sep1.wav"],
|
|
|
57 |
],
|
58 |
)
|
59 |
|
60 |
+
demo.launch(debug=True)
|