File size: 795 Bytes
d697a3c
0a92ca3
 
 
d697a3c
c3dbb4c
 
0a92ca3
c3dbb4c
 
0a92ca3
 
 
 
d697a3c
0a92ca3
d697a3c
0a92ca3
d697a3c
0a92ca3
 
d697a3c
0a92ca3
 
d697a3c
0a92ca3
 
 
 
c3dbb4c
 
0a92ca3
 
d697a3c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import os
os.system("pip install git+https://github.com/openai/whisper.git")
import gradio as gr
import whisper



model = whisper.load_model("large")


        
def inference(audio):
    audio = whisper.load_audio(audio)
    audio = whisper.pad_or_trim(audio)
    
    mel = whisper.log_mel_spectrogram(audio).to(model.device)
    
    _, probs = model.detect_language(mel)
    
    options = whisper.DecodingOptions(fp16 = False)
    result = whisper.decode(model, mel, options)
    
    print(result.text)
    return result.text, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
    
iface = gr.Interface(
    fn=inference,
    inputs=gr.Audio(type="filepath", label="上传音频文件 (.mp3, .wav等)"),
    outputs="text"
)

# 启动 Gradio 界面
iface.launch()