helliun commited on
Commit
0970f00
·
verified ·
1 Parent(s): 5325678

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -31,7 +31,7 @@ def generate_questions(category):
31
  )
32
  return json.loads(response.choices[0].message.content)
33
 
34
- # Function to calculate MetaCog score
35
  def calculate_meta_cog_score(df):
36
  df['Correct'] = df['User Answer'] == df['Correct Answer']
37
  df['C'] = df['Correct'].apply(lambda x: 1 if x else -1)
@@ -71,7 +71,7 @@ def calculate_scores(df):
71
  underconfidence = df['Underconfidence'].sum() / n
72
 
73
  return {
74
- 'MetaCog Score': f"{round(meta_cog_ratio * 100, 0)}%",
75
  'Accuracy': f"{round(accuracy * 100, 0)}%",
76
  'Overconfidence': f"{round(overconfidence * 100, 0)}%",
77
  'Underconfidence': f"{round(underconfidence * 100, 0)}%"
@@ -95,7 +95,7 @@ def analyze_results(df, overall_scores, subcategory_scores):
95
  "role": "user",
96
  "content": f"""
97
  Analyze the following quiz results:
98
- - Overall MetaCog Score: {overall_scores['MetaCog Score']}
99
  - Overall Accuracy: {overall_scores['Accuracy']}
100
  - Overall Overconfidence: {overall_scores['Overconfidence']}
101
  - Overall Underconfidence: {overall_scores['Underconfidence']}
@@ -174,8 +174,8 @@ def submit_answer(category, questions, index, user_answer, confidence, user_answ
174
  subcategory_scores = df.groupby('Subcategory').apply(calculate_scores).to_dict()
175
  analysis = analyze_results(df, overall_scores, subcategory_scores)
176
 
177
- overall_score_df = pd.DataFrame([["Overall", *overall_scores.values()]], columns=['Subcategory', 'Accuracy', 'MetaCog Score', 'Overconfidence', 'Underconfidence'])
178
- subcategory_scores_df = pd.DataFrame([(subcategory, *score.values()) for subcategory, score in subcategory_scores.items()], columns=['Subcategory', 'MetaCog Score', 'Accuracy', 'Overconfidence', 'Underconfidence'])
179
  results_df = pd.concat([overall_score_df, subcategory_scores_df], ignore_index=True)
180
  results_df = gr.DataFrame(label="Results", value=results_df, visible=True)
181
  return "", index, gr.update(visible=False), user_answers, results_df, gr.update(visible=False), gr.update(visible=False), gr.update(value=analysis, visible=True)
@@ -185,7 +185,7 @@ def submit_answer(category, questions, index, user_answer, confidence, user_answ
185
 
186
  # Gradio UI setup
187
  with gr.Blocks(theme="soft") as app:
188
- gr.Markdown("""## Metacognition Test
189
  See how well you know what you know. Enter a category and a 30-question quiz will be generated. Answer the True/False questions about it, and scores will then be calculated on your knowledge of the category, and on your ***knowledge of your knowledge*** of the category.""")
190
  category_input = gr.Textbox(label="Category", placeholder="general knowledge")
191
  submit_category = gr.Button("Generate Quiz")
 
31
  )
32
  return json.loads(response.choices[0].message.content)
33
 
34
+ # Function to calculate Metaknowledge score
35
  def calculate_meta_cog_score(df):
36
  df['Correct'] = df['User Answer'] == df['Correct Answer']
37
  df['C'] = df['Correct'].apply(lambda x: 1 if x else -1)
 
71
  underconfidence = df['Underconfidence'].sum() / n
72
 
73
  return {
74
+ 'Metaknowledge Score': f"{round(meta_cog_ratio * 100, 0)}%",
75
  'Accuracy': f"{round(accuracy * 100, 0)}%",
76
  'Overconfidence': f"{round(overconfidence * 100, 0)}%",
77
  'Underconfidence': f"{round(underconfidence * 100, 0)}%"
 
95
  "role": "user",
96
  "content": f"""
97
  Analyze the following quiz results:
98
+ - Overall Metaknowledge Score: {overall_scores['Metaknowledge Score']}
99
  - Overall Accuracy: {overall_scores['Accuracy']}
100
  - Overall Overconfidence: {overall_scores['Overconfidence']}
101
  - Overall Underconfidence: {overall_scores['Underconfidence']}
 
174
  subcategory_scores = df.groupby('Subcategory').apply(calculate_scores).to_dict()
175
  analysis = analyze_results(df, overall_scores, subcategory_scores)
176
 
177
+ overall_score_df = pd.DataFrame([["Overall", *overall_scores.values()]], columns=['Subcategory', 'Accuracy', 'Metaknowledge Score', 'Overconfidence', 'Underconfidence'])
178
+ subcategory_scores_df = pd.DataFrame([(subcategory, *score.values()) for subcategory, score in subcategory_scores.items()], columns=['Subcategory', 'Metaknowledge Score', 'Accuracy', 'Overconfidence', 'Underconfidence'])
179
  results_df = pd.concat([overall_score_df, subcategory_scores_df], ignore_index=True)
180
  results_df = gr.DataFrame(label="Results", value=results_df, visible=True)
181
  return "", index, gr.update(visible=False), user_answers, results_df, gr.update(visible=False), gr.update(visible=False), gr.update(value=analysis, visible=True)
 
185
 
186
  # Gradio UI setup
187
  with gr.Blocks(theme="soft") as app:
188
+ gr.Markdown("""## Metaknowledge Test
189
  See how well you know what you know. Enter a category and a 30-question quiz will be generated. Answer the True/False questions about it, and scores will then be calculated on your knowledge of the category, and on your ***knowledge of your knowledge*** of the category.""")
190
  category_input = gr.Textbox(label="Category", placeholder="general knowledge")
191
  submit_category = gr.Button("Generate Quiz")