helliun commited on
Commit
16783d5
·
verified ·
1 Parent(s): 98cf720

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -41
app.py CHANGED
@@ -5,31 +5,31 @@ import gradio as gr
5
 
6
  client = OpenAI()
7
 
8
- def generate_questions(category):
9
- if category == "":
10
- category = "general knowledge"
11
- print(category)
12
- response = client.chat.completions.create(
13
- model="gpt-4o-mini",
14
- messages=[
15
- {
16
- "role": "user",
17
- "content": [
18
- {
19
- "type": "text",
20
- "text": "Break the category \""+category+"\" into 6 subcategories, and for each subcategory create 5 True/False questions ranging from a question a Beginner would know to a question only an Expert would know. There should be as many True as False, and the structure of the questions should not make it obvious which is the answer. The harder questions should be tricky and trick non-experts into saying the wrong thing. Provide the correct answers and a field with a 1 sentence explanation. This will total out to 30 questions. Output just a JSON, nothing else. Here's an example JSON output for \"nutrition\":\n\n```json\n{\n \"Macronutrients\": [\n {\n \"question\": \"Protein is one of the three primary macronutrients.\",\n \"answer\": True,\n \"explanation\": \"Protein is one of the three primary macronutrients, along with carbohydrates and fats.\"\n },\n {\n \"question\": \"Carbohydrates are the body's main source of energy.\",\n \"answer\": True,\n \"explanation\": \"Carbohydrates are typically the body's preferred energy source.\"\n },\n {\n \"question\": \"Fats have the same caloric content per gram as carbohydrates.\",\n \"answer\": False,\n \"explanation\": \"Fats have 9 calories per gram, while carbohydrates have 4 calories per gram.\"\n },\n {\n \"question\": \"All proteins are equally effective for muscle growth.\",\n \"answer\": False,\n \"explanation\": \"Different proteins have varying amino acid profiles and bioavailability, affecting their effectiveness.\"\n },\n {\n \"question\": \"Omega-3 fatty acids are a type of fat that can reduce inflammation.\",\n \"answer\": True,\n \"explanation\": \"Omega-3 fatty acids, found in foods like fish, are known to have anti-inflammatory properties.\"\n }\n ],\n \"Micronutrients\": [\n { ..."
21
- }
22
- ]
23
- }
24
- ],
25
- response_format={ "type": "json_object" },
26
- temperature=1,
27
- max_tokens=4071,
28
- top_p=1,
29
- frequency_penalty=0,
30
- presence_penalty=0
31
- )
32
- return json.loads(response.choices[0].message.content)
33
 
34
  # Function to calculate Metaknowledge score
35
  def calculate_meta_cog_score(df):
@@ -151,10 +151,8 @@ def analyze_results(df, overall_scores, subcategory_scores):
151
 
152
  return f"## Analysis of Results\n\n{analysis}\n\n## Detailed Questions and Answers\n\n{question_details}"
153
 
154
-
155
-
156
  # Modify the submit_answer function to include analysis
157
- def submit_answer(category, questions, index, user_answer, confidence, user_answers):
158
  question_data = questions[index]
159
  subcategory = question_data["subcategory"]
160
 
@@ -181,27 +179,31 @@ def submit_answer(category, questions, index, user_answer, confidence, user_answ
181
  return "", index, gr.update(visible=False), user_answers, results_df, gr.update(visible=False), gr.update(visible=False), gr.update(value=analysis, visible=True)
182
  else:
183
  question_text, _, _, visible = display_current_question(questions, index)
184
- return question_text, index, gr.update(visible=True), user_answers, gr.update(visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)
185
 
186
  # Gradio UI setup
187
  with gr.Blocks(theme="soft") as app:
188
- gr.Markdown("""## Metaknowledge Test
189
- See how well you know what you know. Enter a category and a 30-question quiz will be generated. Answer the True/False questions about it, and scores will then be calculated on your knowledge of the category, and on your ***knowledge of your knowledge*** of the category.""")
190
- category_input = gr.Textbox(label="Category", placeholder="general knowledge")
 
 
 
 
191
  submit_category = gr.Button("Generate Quiz")
192
  question_area = gr.Markdown(visible=False)
193
  answer_area = gr.Radio(["True", "False"], label="Your Answer", visible=False)
194
  confidence_slider = gr.Slider(0, 1, label="Confidence Level", value=0.5, visible=False)
195
  submit_answer_btn = gr.Button("Submit Answer", visible=False)
196
  result_area = gr.DataFrame(label="Results", visible=False)
197
- loading_text = gr.Textbox(label="Generating Test...", visible=False)
198
  analysis_area = gr.Markdown(visible=False) # Add this line for analysis area
199
  questions_state = gr.State()
200
  index_state = gr.State(0)
201
  user_answers_state = gr.State([])
202
 
203
- def on_generate_quiz(category):
204
- questions_data = generate_questions(category)
205
 
206
  questions = []
207
  for subcategory, qs in questions_data.items():
@@ -211,6 +213,7 @@ with gr.Blocks(theme="soft") as app:
211
 
212
  import random
213
  random.shuffle(questions)
 
214
 
215
  index = 0
216
  question_text, _, _, visible = display_current_question(questions, index)
@@ -220,8 +223,8 @@ with gr.Blocks(theme="soft") as app:
220
  index,
221
  [],
222
  gr.update(visible=visible),
223
- gr.update(visible=True),
224
- gr.update(visible=True),
225
  gr.update(visible=True),
226
  gr.update(visible=False),
227
  gr.update(visible=False),
@@ -234,16 +237,36 @@ with gr.Blocks(theme="soft") as app:
234
  def display_loading():
235
  return gr.update(visible=True)
236
 
 
 
 
 
237
  def display_results(index, questions):
238
  if index >= len(questions):
239
- return gr.update(visible=True)
 
 
 
240
 
241
  submit_category.click(remove_button, inputs=[], outputs=[submit_category])
242
  submit_category.click(display_loading, inputs=[], outputs=[loading_text])
243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  submit_category.click(
245
  on_generate_quiz,
246
- inputs=[category_input],
247
  outputs=[
248
  question_area,
249
  questions_state,
@@ -262,11 +285,11 @@ with gr.Blocks(theme="soft") as app:
262
 
263
  submit_answer_btn.click(
264
  submit_answer,
265
- inputs=[category_input, questions_state, index_state, answer_area, confidence_slider, user_answers_state],
266
  outputs=[question_area, index_state, submit_answer_btn, user_answers_state, result_area, confidence_slider, answer_area, analysis_area] # Add analysis_area here
267
  )
268
 
269
  submit_answer_btn.click(display_results, inputs=[index_state, questions_state], outputs=[result_area])
270
-
271
  # Launch the app
272
  app.launch(share=False)
 
5
 
6
  client = OpenAI()
7
 
8
+ def generate_questions(category, num_categories, num_questions):
9
+ if category == "":
10
+ category = "general knowledge"
11
+ print(category)
12
+ response = client.chat.completions.create(
13
+ model="gpt-4o-mini",
14
+ messages=[
15
+ {
16
+ "role": "user",
17
+ "content": [
18
+ {
19
+ "type": "text",
20
+ "text": f"Break the category \"{category}\" into {num_categories} subcategories, and for each subcategory create {num_questions} True/False questions ranging from a question a Beginner would know to a question only an Expert would know. There should be as many True as False, and the structure of the questions should not make it obvious which is the answer. The harder questions should be tricky and trick non-experts into saying the wrong thing. Provide the correct answers and a field with a 1 sentence explanation. This will total out to {num_categories * num_questions} questions. Output just a JSON, nothing else. Below is an example JSON output for \"nutrition\" where 6 categories of 5 questions were requested, but remember, for you, there should be a total of {num_categories * num_questions} questions with {num_categories} categories and {num_questions} questions per category.\n\n```json\n{{\n \"Macronutrients\": [\n {{\n \"question\": \"Protein is one of the three primary macronutrients.\",\n \"answer\": True,\n \"explanation\": \"Protein is one of the three primary macronutrients, along with carbohydrates and fats.\"\n }},\n {{\n \"question\": \"Carbohydrates are the body's main source of energy.\",\n \"answer\": True,\n \"explanation\": \"Carbohydrates are typically the body's preferred energy source.\"\n }},\n {{\n \"question\": \"Fats have the same caloric content per gram as carbohydrates.\",\n \"answer\": False,\n \"explanation\": \"Fats have 9 calories per gram, while carbohydrates have 4 calories per gram.\"\n }},\n {{\n \"question\": \"All proteins are equally effective for muscle growth.\",\n \"answer\": False,\n \"explanation\": \"Different proteins have varying amino acid profiles and bioavailability, affecting their effectiveness.\"\n }},\n {{\n \"question\": \"Omega-3 fatty acids are a type of fat that can reduce inflammation.\",\n \"answer\": True,\n \"explanation\": \"Omega-3 fatty acids, found in foods like fish, are known to have anti-inflammatory properties.\"\n }}\n ],\n \"Micronutrients\": [\n {{ ...\"}}"
21
+ }
22
+ ]
23
+ }
24
+ ],
25
+ response_format={"type": "json_object"},
26
+ temperature=1,
27
+ max_tokens=4071,
28
+ top_p=1,
29
+ frequency_penalty=0,
30
+ presence_penalty=0
31
+ )
32
+ return json.loads(response.choices[0].message.content)
33
 
34
  # Function to calculate Metaknowledge score
35
  def calculate_meta_cog_score(df):
 
151
 
152
  return f"## Analysis of Results\n\n{analysis}\n\n## Detailed Questions and Answers\n\n{question_details}"
153
 
 
 
154
  # Modify the submit_answer function to include analysis
155
+ def submit_answer(category, num_categories, num_questions, questions, index, user_answer, confidence, user_answers):
156
  question_data = questions[index]
157
  subcategory = question_data["subcategory"]
158
 
 
179
  return "", index, gr.update(visible=False), user_answers, results_df, gr.update(visible=False), gr.update(visible=False), gr.update(value=analysis, visible=True)
180
  else:
181
  question_text, _, _, visible = display_current_question(questions, index)
182
+ return question_text, index, gr.update(visible=True), user_answers, gr.update(visible=False), gr.update(visible=True, value=0.5), gr.update(visible=True, value=None), gr.update(visible=False)
183
 
184
  # Gradio UI setup
185
  with gr.Blocks(theme="soft") as app:
186
+ gr.Markdown("""## Metaknowledge Quiz
187
+ See how well you know what you know. Enter a category and a quiz will be generated. Answer the True/False questions about it, and scores will then be calculated on your knowledge of the category, and on your ***knowledge of your knowledge*** of the category.""")
188
+ with gr.Row():
189
+ category_input = gr.Textbox(label="Category", placeholder="general knowledge", scale=4)
190
+ num_categories_input = gr.Number(label="Number of Categories", value=6, scale=1)
191
+ num_questions_input = gr.Number(label="Questions per Category", value=5, scale=1)
192
+ total_questions_display = gr.Number(label="Total Questions in Quiz", value=30, scale=1)
193
  submit_category = gr.Button("Generate Quiz")
194
  question_area = gr.Markdown(visible=False)
195
  answer_area = gr.Radio(["True", "False"], label="Your Answer", visible=False)
196
  confidence_slider = gr.Slider(0, 1, label="Confidence Level", value=0.5, visible=False)
197
  submit_answer_btn = gr.Button("Submit Answer", visible=False)
198
  result_area = gr.DataFrame(label="Results", visible=False)
199
+ loading_text = gr.Textbox(label="Generating Quiz...", visible=False)
200
  analysis_area = gr.Markdown(visible=False) # Add this line for analysis area
201
  questions_state = gr.State()
202
  index_state = gr.State(0)
203
  user_answers_state = gr.State([])
204
 
205
+ def on_generate_quiz(category, num_categories, num_questions):
206
+ questions_data = generate_questions(category, num_categories, num_questions)
207
 
208
  questions = []
209
  for subcategory, qs in questions_data.items():
 
213
 
214
  import random
215
  random.shuffle(questions)
216
+ print(len(questions))
217
 
218
  index = 0
219
  question_text, _, _, visible = display_current_question(questions, index)
 
223
  index,
224
  [],
225
  gr.update(visible=visible),
226
+ gr.update(visible=True, value=None),
227
+ gr.update(visible=True, value=0.5),
228
  gr.update(visible=True),
229
  gr.update(visible=False),
230
  gr.update(visible=False),
 
237
  def display_loading():
238
  return gr.update(visible=True)
239
 
240
+ def update_total_questions(num_categories, num_questions):
241
+ total_questions = num_categories * num_questions
242
+ return gr.update(value=total_questions)
243
+
244
  def display_results(index, questions):
245
  if index >= len(questions):
246
+ return gr.update(visible=True)
247
+
248
+ num_categories_input.change(update_total_questions, inputs=[num_categories_input, num_questions_input], outputs=[total_questions_display])
249
+ num_questions_input.change(update_total_questions, inputs=[num_categories_input, num_questions_input], outputs=[total_questions_display])
250
 
251
  submit_category.click(remove_button, inputs=[], outputs=[submit_category])
252
  submit_category.click(display_loading, inputs=[], outputs=[loading_text])
253
 
254
+ def make_uninteractive():
255
+ return (
256
+ gr.update(interactive=False),
257
+ gr.update(interactive=False),
258
+ gr.update(interactive=False)
259
+ )
260
+
261
+ submit_category.click(
262
+ make_uninteractive,
263
+ inputs=[],
264
+ outputs=[category_input, num_categories_input, num_questions_input]
265
+ )
266
+
267
  submit_category.click(
268
  on_generate_quiz,
269
+ inputs=[category_input, num_categories_input, num_questions_input],
270
  outputs=[
271
  question_area,
272
  questions_state,
 
285
 
286
  submit_answer_btn.click(
287
  submit_answer,
288
+ inputs=[category_input, num_categories_input, num_questions_input, questions_state, index_state, answer_area, confidence_slider, user_answers_state],
289
  outputs=[question_area, index_state, submit_answer_btn, user_answers_state, result_area, confidence_slider, answer_area, analysis_area] # Add analysis_area here
290
  )
291
 
292
  submit_answer_btn.click(display_results, inputs=[index_state, questions_state], outputs=[result_area])
293
+
294
  # Launch the app
295
  app.launch(share=False)