import gradio as gr from transformers import WhisperProcessor, WhisperForConditionalGeneration from datasets import load_dataset import torch # 加载 Whisper 模型和 processor model_name = "openai/whisper-large-v3-turbo" processor = WhisperProcessor.from_pretrained(model_name) model = WhisperForConditionalGeneration.from_pretrained(model_name) # 加载数据集 bigcode/the-stack dataset = load_dataset("bigcode/the-stack", data_dir="data/html", split="train") def transcribe(audio): # 处理音频进行转录 audio_input = processor(audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(audio_input).logits predicted_ids = torch.argmax(logits, dim=-1) transcription = processor.batch_decode(predicted_ids) # 返回转录结果 return transcription[0] # Gradio 界面 iface = gr.Interface( fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text", title="Whisper Transcription for Developers", description="使用 Whisper 和 bigcode 数据集转录开发者相关术语。" ) # 启动 Gradio 应用 iface.launch()