Voice-To-Text / app.py
lodhrangpt's picture
Update app.py
e584a9f verified
raw
history blame
3.87 kB
import gradio as gr
import requests
from fpdf import FPDF
import nltk
import os
import tempfile
from nltk.tokenize import sent_tokenize
import random
from groq import Groq
# Ensure no unexpected indentation here
api_key = os.environ.get("GROQ_API_KEY")
# Attempt to download punkt tokenizer
try:
nltk.download("punkt")
except:
print("NLTK punkt tokenizer download failed. Using custom tokenizer.")
def custom_sent_tokenize(text):
return text.split(". ")
def transcribe(audio_path):
with open(audio_path, "rb") as audio_file:
audio_data = audio_file.read()
groq_api_endpoint = "https://api.groq.com/openai/v1/audio/transcriptions"
headers = {
"Authorization": f"Bearer {api_key}", # Fix: api_key is used properly
}
files = {
'file': ('audio.wav', audio_data, 'audio/wav'),
}
data = {
'model': 'whisper-large-v3-turbo',
'response_format': 'json',
'language': 'en',
}
response = requests.post(groq_api_endpoint, headers=headers, files=files, data=data)
if response.status_code == 200:
result = response.json()
transcript = result.get("text", "No transcription available.")
return generate_notes(transcript)
else:
error_msg = response.json().get("error", {}).get("message", "Unknown error.")
print(f"API Error: {error_msg}")
return create_error_pdf(f"API Error: {error_msg}")
def generate_notes(transcript):
client = Groq(api_key=api_key) # Use the api_key here
chat_completion = client.chat.completions.create(
messages=[
{
"role": "system",
"content": "you are expert question generator from content. Generate one long question, possible number of short questions and mcqs. plz also provide the notes"
},
{
"role": "user",
"content": transcript,
}
],
model="llama3-8b-8192",
temperature=0.5,
max_tokens=1024,
top_p=1,
stop=None,
stream=False,
)
res = chat_completion.choices[0].message.content
# Generate and save a structured PDF
pdf_path = create_pdf(res, transcript)
return pdf_path
def create_pdf(question, transcript):
pdf = FPDF()
pdf.add_page()
# Add title
pdf.set_font("Arial", "B", 16)
pdf.cell(200, 10, "Transcription Notes and Questions", ln=True, align="C")
# Add transcription content
pdf.set_font("Arial", "", 12)
pdf.multi_cell(0, 10, f"Transcription:\n{transcript.encode('latin1', 'replace').decode('latin1')}\n\n")
# Add long questions
pdf.set_font("Arial", "B", 14)
pdf.cell(200, 10, "Questions", ln=True)
pdf.set_font("Arial", "", 12)
pdf.multi_cell(0, 10, f"- {question.encode('latin1', 'replace').decode('latin1')}\n")
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_pdf:
pdf.output(temp_pdf.name)
pdf_path = temp_pdf.name
return pdf_path
def create_error_pdf(message):
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", "B", 16)
pdf.cell(200, 10, "Error Report", ln=True, align="C")
pdf.set_font("Arial", "", 12)
pdf.multi_cell(0, 10, message.encode('latin1', 'replace').decode('latin1'))
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_pdf:
pdf.output(temp_pdf.name)
error_pdf_path = temp_pdf.name
return error_pdf_path
iface = gr.Interface(
fn=transcribe,
inputs=gr.Audio(type="filepath"),
outputs=gr.File(label="Download PDF with Notes or Error Report"),
title="Voice to Text Converter and Notes Generator",
description="This app converts audio to text and generates academic questions including long, short, and multiple-choice questions."
)
iface.launch()