Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,144 +1,72 @@
|
|
1 |
import gradio as gr
|
2 |
-
import requests
|
3 |
-
from fpdf import FPDF
|
4 |
import nltk
|
5 |
-
import tempfile
|
6 |
from nltk.tokenize import sent_tokenize, word_tokenize
|
7 |
from nltk.corpus import stopwords
|
8 |
-
from
|
9 |
-
import
|
|
|
10 |
|
11 |
-
#
|
12 |
try:
|
13 |
-
nltk.
|
14 |
-
nltk.
|
15 |
-
except:
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
def
|
21 |
-
return text
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
groq_api_endpoint = "https://api.groq.com/openai/v1/audio/transcriptions"
|
28 |
-
headers = {
|
29 |
-
"Authorization": "Bearer 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", # Replace with your actual API key
|
30 |
-
}
|
31 |
-
files = {
|
32 |
-
'file': ('audio.wav', audio_data, 'audio/wav'),
|
33 |
-
}
|
34 |
-
data = {
|
35 |
-
'model': 'whisper-large-v3-turbo',
|
36 |
-
'response_format': 'json',
|
37 |
-
'language': 'en',
|
38 |
-
}
|
39 |
-
|
40 |
-
response = requests.post(groq_api_endpoint, headers=headers, files=files, data=data)
|
41 |
-
|
42 |
-
if response.status_code == 200:
|
43 |
-
result = response.json()
|
44 |
-
transcript = result.get("text", "No transcription available.")
|
45 |
-
return generate_notes(transcript)
|
46 |
-
else:
|
47 |
-
error_msg = response.json().get("error", {}).get("message", "Unknown error.")
|
48 |
-
print(f"API Error: {error_msg}")
|
49 |
-
return create_error_pdf(f"API Error: {error_msg}")
|
50 |
-
|
51 |
def extract_key_sentences(transcript):
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
def generate_questions(sentences):
|
62 |
-
long_questions = [f"Explain the importance of: '{sentence}'." for sentence in sentences]
|
63 |
-
short_questions = [f"What does '{sentence.split()[0]}' refer to?" for sentence in sentences[:5]]
|
64 |
-
|
65 |
-
mcqs = []
|
66 |
-
for sentence in sentences[:5]:
|
67 |
-
words = [word for word in word_tokenize(sentence) if word.isalpha() and word.lower() not in stop_words]
|
68 |
-
if not words:
|
69 |
-
continue
|
70 |
-
key_word = random.choice(words)
|
71 |
-
mcq = {
|
72 |
-
"question": f"What is '{key_word}'?",
|
73 |
-
"options": [key_word] + random.sample(["Option A", "Option B", "Option C"], 3),
|
74 |
-
"answer": key_word
|
75 |
-
}
|
76 |
-
mcqs.append(mcq)
|
77 |
-
|
78 |
-
return long_questions, short_questions, mcqs
|
79 |
-
|
80 |
def generate_notes(transcript):
|
81 |
key_sentences = extract_key_sentences(transcript)
|
82 |
-
long_questions, short_questions, mcqs = generate_questions(key_sentences)
|
83 |
-
pdf_path = create_pdf(transcript, long_questions, short_questions, mcqs)
|
84 |
-
return pdf_path
|
85 |
-
|
86 |
-
def create_pdf(transcript, long_questions, short_questions, mcqs):
|
87 |
-
pdf = FPDF()
|
88 |
-
pdf.add_page()
|
89 |
-
|
90 |
-
pdf.set_font("Arial", "B", 16)
|
91 |
-
pdf.cell(200, 10, "Transcription Notes", ln=True, align="C")
|
92 |
-
|
93 |
-
pdf.set_font("Arial", "", 12)
|
94 |
-
pdf.multi_cell(0, 10, f"Transcription:\n{transcript.encode('latin1', 'replace').decode('latin1')}\n\n")
|
95 |
-
|
96 |
-
pdf.set_font("Arial", "B", 14)
|
97 |
-
pdf.cell(200, 10, "Long Questions", ln=True)
|
98 |
-
pdf.set_font("Arial", "", 12)
|
99 |
-
for question in long_questions:
|
100 |
-
pdf.multi_cell(0, 10, f"- {question.encode('latin1', 'replace').decode('latin1')}\n")
|
101 |
-
|
102 |
-
pdf.set_font("Arial", "B", 14)
|
103 |
-
pdf.cell(200, 10, "Short Questions", ln=True)
|
104 |
-
pdf.set_font("Arial", "", 12)
|
105 |
-
for question in short_questions:
|
106 |
-
pdf.multi_cell(0, 10, f"- {question.encode('latin1', 'replace').decode('latin1')}\n")
|
107 |
-
|
108 |
-
pdf.set_font("Arial", "B", 14)
|
109 |
-
pdf.cell(200, 10, "Multiple Choice Questions (MCQs)", ln=True)
|
110 |
-
pdf.set_font("Arial", "", 12)
|
111 |
-
for mcq in mcqs:
|
112 |
-
pdf.multi_cell(0, 10, f"Q: {mcq['question'].encode('latin1', 'replace').decode('latin1')}")
|
113 |
-
for option in mcq["options"]:
|
114 |
-
pdf.multi_cell(0, 10, f" - {option.encode('latin1', 'replace').decode('latin1')}")
|
115 |
-
pdf.multi_cell(0, 10, f"Answer: {mcq['answer'].encode('latin1', 'replace').decode('latin1')}\n")
|
116 |
-
|
117 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_pdf:
|
118 |
-
pdf.output(temp_pdf.name)
|
119 |
-
pdf_path = temp_pdf.name
|
120 |
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
|
|
130 |
|
131 |
-
|
132 |
-
|
133 |
-
|
|
|
|
|
134 |
|
135 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
|
|
137 |
iface = gr.Interface(
|
138 |
fn=transcribe,
|
139 |
-
inputs=
|
140 |
-
outputs=
|
141 |
-
title="
|
|
|
142 |
)
|
143 |
|
|
|
144 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
import nltk
|
|
|
3 |
from nltk.tokenize import sent_tokenize, word_tokenize
|
4 |
from nltk.corpus import stopwords
|
5 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
6 |
+
import openai
|
7 |
+
import datetime
|
8 |
|
9 |
+
# Ensure necessary NLTK resources are downloaded
|
10 |
try:
|
11 |
+
nltk.data.find('tokenizers/punkt')
|
12 |
+
nltk.data.find('corpora/stopwords')
|
13 |
+
except LookupError:
|
14 |
+
nltk.download('punkt')
|
15 |
+
nltk.download('stopwords')
|
16 |
+
|
17 |
+
# Transcription function (mocked for this example)
|
18 |
+
def transcribe_audio(file_path):
|
19 |
+
# Assume some transcription service is being used, and return text as output
|
20 |
+
transcript = "This is a sample transcription of an audio file. It contains information that can be converted into important points for study notes."
|
21 |
+
return transcript
|
22 |
+
|
23 |
+
# Function to extract key sentences
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def extract_key_sentences(transcript):
|
25 |
+
stop_words = set(stopwords.words("english"))
|
26 |
+
sentences = sent_tokenize(transcript)
|
27 |
+
important_sentences = [
|
28 |
+
sentence for sentence in sentences
|
29 |
+
if any(word.lower() not in stop_words for word in word_tokenize(sentence))
|
30 |
+
]
|
31 |
+
return important_sentences
|
32 |
+
|
33 |
+
# Function to generate study notes from the transcription
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def generate_notes(transcript):
|
35 |
key_sentences = extract_key_sentences(transcript)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
+
# Using TfidfVectorizer for scoring and ranking sentences
|
38 |
+
vectorizer = TfidfVectorizer(stop_words='english')
|
39 |
+
tfidf_matrix = vectorizer.fit_transform(key_sentences)
|
40 |
+
scores = tfidf_matrix.sum(axis=1).A1
|
41 |
+
scored_sentences = sorted(zip(scores, key_sentences), reverse=True)
|
42 |
+
|
43 |
+
# Generating notes as a mix of important sentences
|
44 |
+
long_questions = scored_sentences[:3] # Take top 3 for long questions
|
45 |
+
short_questions = scored_sentences[3:6] # Next 3 for short questions
|
46 |
+
mcqs = scored_sentences[6:9] # Following 3 for MCQs
|
47 |
|
48 |
+
notes = {
|
49 |
+
"Long Questions": [sentence for _, sentence in long_questions],
|
50 |
+
"Short Questions": [sentence for _, sentence in short_questions],
|
51 |
+
"MCQs": [sentence for _, sentence in mcqs],
|
52 |
+
}
|
53 |
|
54 |
+
return notes
|
55 |
+
|
56 |
+
# Main function for Gradio app
|
57 |
+
def transcribe(file):
|
58 |
+
transcript = transcribe_audio(file.name)
|
59 |
+
notes = generate_notes(transcript)
|
60 |
+
return notes
|
61 |
|
62 |
+
# Gradio UI setup
|
63 |
iface = gr.Interface(
|
64 |
fn=transcribe,
|
65 |
+
inputs="file",
|
66 |
+
outputs="json",
|
67 |
+
title="Audio to Study Notes",
|
68 |
+
description="Transcribe audio to extract key sentences for study notes, including Long Questions, Short Questions, and MCQs."
|
69 |
)
|
70 |
|
71 |
+
# Run the app
|
72 |
iface.launch()
|