import gradio as gr import json import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "openai/whisper-large-v3" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=30, batch_size=16, return_timestamps=True, torch_dtype=torch_dtype, device=device, ) def process_audio(audio_file): # In this example, let's just return a hardcoded array of JSON objects output_data = [ {"label": "cat", "confidence": 0.8}, {"label": "dog", "confidence": 0.7}, {"label": "bird", "confidence": 0.6} ] return json.dumps(output_data) def process(audio): result = pipe('audio.mp3')['chunks'] for item in result: item['timestamp'] = list(item['timestamp']) return result iface = gr.Interface(fn=process_audio, inputs="audio", outputs="text") iface.launch()