import gradio as gr import whisper from transformers import pipeline # Load the tiny Whisper model # Check if GPU is available and set device accordingly device = 0 if torch.cuda.is_available() else -1 if device == 0: print("Running on GPU") else: print("Running on CPU") whisper_model = whisper.load_model("tiny", device=device) #model = whisper.load_model("base") # Load the text summarization model from Hugging Face summarizer = pipeline(task="summarization", model="facebook/bart-large-cnn", device=device) # Function to transcribe and summarize the audio file def transcribe_and_summarize(audio): # Step 1: Transcribe the audio using Whisper transcription_result = whisper_model.transcribe(audio) transcription = transcription_result['text'] # Step 2: Summarize the transcription summary = summarizer(transcription, min_length=10, max_length=100) summary_text = summary[0]['summary_text'] return transcription, summary_text # Define the Gradio interface interface = gr.Interface( fn=transcribe_and_summarize, # Function to run inputs=gr.Audio(type="filepath", label="Upload your audio file"), # Input audio field outputs=[gr.Textbox(label="Transcription"), gr.Textbox(label="Summary")], # Output fields title="Whisper Tiny Transcription and Summarization", description="Upload an audio file, get the transcription from Whisper tiny model and the summarized version using Hugging Face." ) # Launch the Gradio app interface.launch(debug=True)