Update app.py
Browse files
app.py
CHANGED
@@ -29,6 +29,9 @@ def generate_mcq_quiz(pdf_content, num_questions, openai_api_key, model_choice):
|
|
29 |
|
30 |
openai.api_key = openai_api_key
|
31 |
|
|
|
|
|
|
|
32 |
prompt = f"""Based on the following document content, generate {num_questions} multiple-choice quiz questions.
|
33 |
For each question:
|
34 |
1. Create a clear question based on key concepts in the document
|
@@ -39,21 +42,20 @@ For each question:
|
|
39 |
Format the output clearly with each question numbered and separated.
|
40 |
|
41 |
Document content:
|
42 |
-
{
|
43 |
"""
|
44 |
|
45 |
try:
|
46 |
messages = [
|
47 |
-
{"role": "user", "content":
|
48 |
]
|
49 |
|
50 |
response = openai.ChatCompletion.create(
|
51 |
model=model_choice,
|
52 |
-
messages=messages
|
53 |
-
max_completion_tokens=2000
|
54 |
)
|
55 |
|
56 |
-
return response
|
57 |
except Exception as e:
|
58 |
return f"Error generating quiz: {str(e)}"
|
59 |
|
@@ -85,11 +87,11 @@ def generate_response(input_text, image, pdf_content, openai_api_key, reasoning_
|
|
85 |
]
|
86 |
else:
|
87 |
messages = [
|
88 |
-
{"role": "user", "content":
|
89 |
]
|
90 |
elif model_choice == "o3-mini":
|
91 |
messages = [
|
92 |
-
{"role": "user", "content":
|
93 |
]
|
94 |
|
95 |
try:
|
@@ -97,11 +99,10 @@ def generate_response(input_text, image, pdf_content, openai_api_key, reasoning_
|
|
97 |
response = openai.ChatCompletion.create(
|
98 |
model=model_choice,
|
99 |
messages=messages,
|
100 |
-
|
101 |
-
max_completion_tokens=2000
|
102 |
)
|
103 |
|
104 |
-
return response
|
105 |
except Exception as e:
|
106 |
return f"Error calling OpenAI API: {str(e)}"
|
107 |
|
@@ -132,12 +133,15 @@ def transcribe_audio(audio, openai_api_key):
|
|
132 |
|
133 |
# Transcribe the audio to text using OpenAI's whisper model
|
134 |
audio_file_transcription = openai.Audio.transcribe(file=audio_file_obj, model="whisper-1")
|
135 |
-
return audio_file_transcription
|
136 |
except Exception as e:
|
137 |
return f"Error transcribing audio: {str(e)}"
|
138 |
|
139 |
# The function that will be used by Gradio interface
|
140 |
-
def chatbot(input_text, image, audio, pdf_file, openai_api_key, reasoning_effort, model_choice, pdf_content, num_quiz_questions, pdf_quiz_mode, history
|
|
|
|
|
|
|
141 |
# If there's audio, transcribe it to text
|
142 |
if audio:
|
143 |
input_text = transcribe_audio(audio, openai_api_key)
|
@@ -148,10 +152,13 @@ def chatbot(input_text, image, audio, pdf_file, openai_api_key, reasoning_effort
|
|
148 |
new_pdf_content = extract_text_from_pdf(pdf_file)
|
149 |
|
150 |
# Check if we're in PDF quiz mode
|
151 |
-
if pdf_quiz_mode
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
155 |
else:
|
156 |
# Regular chat mode - generate the response
|
157 |
response = generate_response(input_text, image, new_pdf_content, openai_api_key, reasoning_effort, model_choice)
|
@@ -159,8 +166,12 @@ def chatbot(input_text, image, audio, pdf_file, openai_api_key, reasoning_effort
|
|
159 |
# Append the response to the history
|
160 |
if input_text:
|
161 |
history.append((f"User: {input_text}", f"Assistant: {response}"))
|
|
|
|
|
|
|
|
|
162 |
else:
|
163 |
-
history.append((f"User: [
|
164 |
|
165 |
return "", None, None, None, new_pdf_content, history
|
166 |
|
@@ -177,15 +188,15 @@ def process_pdf(pdf_file):
|
|
177 |
# Function to update visible components based on input type selection
|
178 |
def update_input_type(choice):
|
179 |
if choice == "Text":
|
180 |
-
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(
|
181 |
elif choice == "Image":
|
182 |
-
return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(
|
183 |
elif choice == "Voice":
|
184 |
-
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(
|
185 |
elif choice == "PDF":
|
186 |
-
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(
|
187 |
elif choice == "PDF(QUIZ)":
|
188 |
-
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(
|
189 |
|
190 |
# Custom CSS styles with animations and button colors
|
191 |
custom_css = """
|
@@ -474,21 +485,30 @@ def create_interface():
|
|
474 |
outputs=[pdf_content]
|
475 |
)
|
476 |
|
477 |
-
# Update quiz mode when PDF(QUIZ) is selected
|
478 |
-
def update_quiz_mode(choice):
|
479 |
-
return True if choice == "PDF(QUIZ)" else False
|
480 |
-
|
481 |
-
input_type.change(
|
482 |
-
fn=update_quiz_mode,
|
483 |
-
inputs=[input_type],
|
484 |
-
outputs=[quiz_mode]
|
485 |
-
)
|
486 |
-
|
487 |
# Button interactions
|
488 |
submit_btn.click(
|
489 |
fn=chatbot,
|
490 |
-
inputs=[
|
491 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
492 |
)
|
493 |
|
494 |
clear_btn.click(
|
|
|
29 |
|
30 |
openai.api_key = openai_api_key
|
31 |
|
32 |
+
# Limit content length to avoid token limits
|
33 |
+
limited_content = pdf_content[:8000] if len(pdf_content) > 8000 else pdf_content
|
34 |
+
|
35 |
prompt = f"""Based on the following document content, generate {num_questions} multiple-choice quiz questions.
|
36 |
For each question:
|
37 |
1. Create a clear question based on key concepts in the document
|
|
|
42 |
Format the output clearly with each question numbered and separated.
|
43 |
|
44 |
Document content:
|
45 |
+
{limited_content}
|
46 |
"""
|
47 |
|
48 |
try:
|
49 |
messages = [
|
50 |
+
{"role": "user", "content": prompt}
|
51 |
]
|
52 |
|
53 |
response = openai.ChatCompletion.create(
|
54 |
model=model_choice,
|
55 |
+
messages=messages
|
|
|
56 |
)
|
57 |
|
58 |
+
return response.choices[0].message.content
|
59 |
except Exception as e:
|
60 |
return f"Error generating quiz: {str(e)}"
|
61 |
|
|
|
87 |
]
|
88 |
else:
|
89 |
messages = [
|
90 |
+
{"role": "user", "content": input_content}
|
91 |
]
|
92 |
elif model_choice == "o3-mini":
|
93 |
messages = [
|
94 |
+
{"role": "user", "content": input_content}
|
95 |
]
|
96 |
|
97 |
try:
|
|
|
99 |
response = openai.ChatCompletion.create(
|
100 |
model=model_choice,
|
101 |
messages=messages,
|
102 |
+
max_tokens=2000
|
|
|
103 |
)
|
104 |
|
105 |
+
return response.choices[0].message.content
|
106 |
except Exception as e:
|
107 |
return f"Error calling OpenAI API: {str(e)}"
|
108 |
|
|
|
133 |
|
134 |
# Transcribe the audio to text using OpenAI's whisper model
|
135 |
audio_file_transcription = openai.Audio.transcribe(file=audio_file_obj, model="whisper-1")
|
136 |
+
return audio_file_transcription.text
|
137 |
except Exception as e:
|
138 |
return f"Error transcribing audio: {str(e)}"
|
139 |
|
140 |
# The function that will be used by Gradio interface
|
141 |
+
def chatbot(input_text, image, audio, pdf_file, openai_api_key, reasoning_effort, model_choice, pdf_content, num_quiz_questions, pdf_quiz_mode, history):
|
142 |
+
if history is None:
|
143 |
+
history = []
|
144 |
+
|
145 |
# If there's audio, transcribe it to text
|
146 |
if audio:
|
147 |
input_text = transcribe_audio(audio, openai_api_key)
|
|
|
152 |
new_pdf_content = extract_text_from_pdf(pdf_file)
|
153 |
|
154 |
# Check if we're in PDF quiz mode
|
155 |
+
if pdf_quiz_mode:
|
156 |
+
if new_pdf_content:
|
157 |
+
# Generate MCQ quiz questions
|
158 |
+
quiz_response = generate_mcq_quiz(new_pdf_content, int(num_quiz_questions), openai_api_key, model_choice)
|
159 |
+
history.append((f"User: [Uploaded PDF for Quiz - {int(num_quiz_questions)} questions]", f"Assistant: {quiz_response}"))
|
160 |
+
else:
|
161 |
+
history.append(("User: [Attempted to generate quiz without PDF]", "Assistant: Please upload a PDF file to generate quiz questions."))
|
162 |
else:
|
163 |
# Regular chat mode - generate the response
|
164 |
response = generate_response(input_text, image, new_pdf_content, openai_api_key, reasoning_effort, model_choice)
|
|
|
166 |
# Append the response to the history
|
167 |
if input_text:
|
168 |
history.append((f"User: {input_text}", f"Assistant: {response}"))
|
169 |
+
elif image is not None:
|
170 |
+
history.append((f"User: [Uploaded image]", f"Assistant: {response}"))
|
171 |
+
elif pdf_file is not None:
|
172 |
+
history.append((f"User: [Uploaded PDF]", f"Assistant: {response}"))
|
173 |
else:
|
174 |
+
history.append((f"User: [No input provided]", f"Assistant: Please provide some input (text, image, or PDF) for me to respond to."))
|
175 |
|
176 |
return "", None, None, None, new_pdf_content, history
|
177 |
|
|
|
188 |
# Function to update visible components based on input type selection
|
189 |
def update_input_type(choice):
|
190 |
if choice == "Text":
|
191 |
+
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(value=False)
|
192 |
elif choice == "Image":
|
193 |
+
return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(value=False)
|
194 |
elif choice == "Voice":
|
195 |
+
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(value=False)
|
196 |
elif choice == "PDF":
|
197 |
+
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(value=False)
|
198 |
elif choice == "PDF(QUIZ)":
|
199 |
+
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(value=True)
|
200 |
|
201 |
# Custom CSS styles with animations and button colors
|
202 |
custom_css = """
|
|
|
485 |
outputs=[pdf_content]
|
486 |
)
|
487 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
488 |
# Button interactions
|
489 |
submit_btn.click(
|
490 |
fn=chatbot,
|
491 |
+
inputs=[
|
492 |
+
input_text,
|
493 |
+
image_input,
|
494 |
+
audio_input,
|
495 |
+
pdf_input,
|
496 |
+
openai_api_key,
|
497 |
+
reasoning_effort,
|
498 |
+
model_choice,
|
499 |
+
pdf_content,
|
500 |
+
quiz_questions_slider,
|
501 |
+
quiz_mode,
|
502 |
+
chat_history
|
503 |
+
],
|
504 |
+
outputs=[
|
505 |
+
input_text,
|
506 |
+
image_input,
|
507 |
+
audio_input,
|
508 |
+
pdf_input,
|
509 |
+
pdf_content,
|
510 |
+
chat_history
|
511 |
+
]
|
512 |
)
|
513 |
|
514 |
clear_btn.click(
|