import os os.system("pip install git+https://github.com/openai/whisper.git") import gradio as gr import whisper model = whisper.load_model("large") def inference(audio): audio = whisper.load_audio(audio) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(model.device) _, probs = model.detect_language(mel) options = whisper.DecodingOptions(fp16 = False) result = whisper.decode(model, mel, options) print(result.text) return result.text, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) iface = gr.Interface( fn=inference, inputs=gr.Audio(type="filepath", label="上传音频文件 (.mp3, .wav等)"), outputs="text" ) # 启动 Gradio 界面 iface.launch()