import gradio as gr import requests from fpdf import FPDF import nltk import os import tempfile from nltk.tokenize import sent_tokenize import random # Attempt to download punkt tokenizer try: nltk.download("punkt") except: print("NLTK punkt tokenizer download failed. Using custom tokenizer.") def custom_sent_tokenize(text): return text.split(". ") def transcribe(audio_path): with open(audio_path, "rb") as audio_file: audio_data = audio_file.read() groq_api_endpoint = "https://api.groq.com/openai/v1/audio/transcriptions" headers = { "Authorization": "Bearer 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", # Replace with your actual API key } files = { 'file': ('audio.wav', audio_data, 'audio/wav'), } data = { 'model': 'whisper-large-v3-turbo', 'response_format': 'json', 'language': 'en', } response = requests.post(groq_api_endpoint, headers=headers, files=files, data=data) if response.status_code == 200: result = response.json() transcript = result.get("text", "No transcription available.") return generate_notes(transcript) else: error_msg = response.json().get("error", {}).get("message", "Unknown error.") print(f"API Error: {error_msg}") return create_error_pdf(f"API Error: {error_msg}") def generate_notes(transcript): try: sentences = sent_tokenize(transcript) except LookupError: sentences = custom_sent_tokenize(transcript) long_questions = [f"What is meant by '{sentence}'?" for sentence in sentences[:5]] short_questions = [f"Define '{sentence.split()[0]}'." for sentence in sentences[:5]] mcqs = [] for sentence in sentences[:5]: mcq = { "question": f"What is '{sentence.split()[0]}'?", "options": [sentence.split()[0]] + random.sample(["Option 1", "Option 2", "Option 3"], 3), "answer": sentence.split()[0] } mcqs.append(mcq) pdf_path = create_pdf(transcript, long_questions, short_questions, mcqs) return pdf_path def create_pdf(transcript, long_questions, short_questions, mcqs): pdf = FPDF() pdf.add_page() pdf.set_font("Arial", "B", 16) pdf.cell(200, 10, "Transcription Notes", ln=True, align="C") pdf.set_font("Arial", "", 12) pdf.multi_cell(0, 10, f"Transcription:\n{transcript.encode('latin1', 'replace').decode('latin1')}\n\n") pdf.set_font("Arial", "B", 14) pdf.cell(200, 10, "Long Questions", ln=True) pdf.set_font("Arial", "", 12) for question in long_questions: pdf.multi_cell(0, 10, f"- {question.encode('latin1', 'replace').decode('latin1')}\n") pdf.set_font("Arial", "B", 14) pdf.cell(200, 10, "Short Questions", ln=True) pdf.set_font("Arial", "", 12) for question in short_questions: pdf.multi_cell(0, 10, f"- {question.encode('latin1', 'replace').decode('latin1')}\n") pdf.set_font("Arial", "B", 14) pdf.cell(200, 10, "Multiple Choice Questions (MCQs)", ln=True) pdf.set_font("Arial", "", 12) for mcq in mcqs: pdf.multi_cell(0, 10, f"Q: {mcq['question'].encode('latin1', 'replace').decode('latin1')}") for option in mcq["options"]: pdf.multi_cell(0, 10, f" - {option.encode('latin1', 'replace').decode('latin1')}") pdf.multi_cell(0, 10, f"Answer: {mcq['answer'].encode('latin1', 'replace').decode('latin1')}\n") with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_pdf: pdf.output(temp_pdf.name) pdf_path = temp_pdf.name return pdf_path def create_error_pdf(message): pdf = FPDF() pdf.add_page() pdf.set_font("Arial", "B", 16) pdf.cell(200, 10, "Error Report", ln=True, align="C") pdf.set_font("Arial", "", 12) pdf.multi_cell(0, 10, message.encode('latin1', 'replace').decode('latin1')) with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_pdf: pdf.output(temp_pdf.name) error_pdf_path = temp_pdf.name return error_pdf_path iface = gr.Interface( fn=transcribe, inputs=gr.Audio(type="filepath"), outputs=gr.File(label="Download PDF with Notes or Error Report"), title="Voice to Text Converter and Notes Generator", ) iface.launch()