File size: 1,225 Bytes
b47054b 1a82ef7 b47054b 72d6ac4 7c5454f 1a82ef7 72d6ac4 6f65c39 a2cdd68 1a82ef7 a2cdd68 1a82ef7 a2cdd68 1a82ef7 72d6ac4 1a82ef7 a2cdd68 72d6ac4 1a82ef7 15f9770 1a82ef7 72d6ac4 1a82ef7 72d6ac4 1a82ef7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import gradio as gr
from transformers import WhisperProcessor, WhisperForConditionalGeneration
from datasets import load_dataset
import torch
# 加载 Whisper 模型和 processor
model_name = "openai/whisper-large-v3-turbo"
processor = WhisperProcessor.from_pretrained(model_name)
model = WhisperForConditionalGeneration.from_pretrained(model_name)
# 加载数据集 bigcode/the-stack
ds = load_dataset("CoIR-Retrieval/CodeSearchNet-php-queries-corpus")
def transcribe(audio_path):
# 加载音频文件并转换为信号
audio, sr = librosa.load(audio_path, sr=16000)
input_values = processor(audio, return_tensors="pt", sampling_rate=16000).input_values
# 模型推理
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)
# 返回转录结果
return transcription[0]
# Gradio 界面
iface = gr.Interface(
fn=transcribe,
inputs=gr.Audio( type="filepath"),
outputs="text",
title="Whisper Transcription for Developers",
description="使用 Whisper 和 bigcode 数据集转录开发者相关术语。"
)
# 启动 Gradio 应用
iface.launch()
|