Merge branch 'main' of https://huggingface.co/spaces/SPJIMR-Internship/SPJIMR_FlipClassroom_RCopilot_ResearchInternship
Browse files- gen_mcqs.py +282 -0
- rubrics.py +8 -8
- session_page.py +0 -0
- subjective_test_evaluation.py +798 -252
gen_mcqs.py
CHANGED
|
@@ -1,3 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import ast
|
| 2 |
from pymongo import MongoClient
|
| 3 |
from datetime import datetime
|
|
@@ -23,6 +230,7 @@ model = genai.GenerativeModel('gemini-pro')
|
|
| 23 |
client = MongoClient(MONGO_URI)
|
| 24 |
db = client['novascholar_db']
|
| 25 |
quizzes_collection = db["quizzes"]
|
|
|
|
| 26 |
|
| 27 |
def strip_code_markers(response_text):
|
| 28 |
"""Strip off the markers ``` and python from a LLM model's response"""
|
|
@@ -117,6 +325,26 @@ def save_quiz(course_id, session_id, title, questions, user_id):
|
|
| 117 |
print(f"Error saving quiz: {e}")
|
| 118 |
return None
|
| 119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
def get_student_quiz_score(quiz_id, student_id):
|
| 122 |
"""Get student's score for a specific quiz"""
|
|
@@ -131,6 +359,19 @@ def get_student_quiz_score(quiz_id, student_id):
|
|
| 131 |
return quiz['submissions'][0].get('score')
|
| 132 |
return None
|
| 133 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
# def submit_quiz_answers(quiz_id, student_id, student_answers):
|
| 135 |
# """Submit and score student's quiz answers"""
|
| 136 |
# quiz = quizzes_collection.find_one({"_id": quiz_id})
|
|
@@ -201,6 +442,47 @@ def submit_quiz_answers(quiz_id, student_id, student_answers):
|
|
| 201 |
|
| 202 |
return score if result.modified_count > 0 else None
|
| 203 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
except Exception as e:
|
| 205 |
print(f"Error submitting quiz: {e}")
|
| 206 |
return None
|
|
|
|
| 1 |
+
# import ast
|
| 2 |
+
# from pymongo import MongoClient
|
| 3 |
+
# from datetime import datetime
|
| 4 |
+
# import openai
|
| 5 |
+
# import google.generativeai as genai
|
| 6 |
+
# from google.generativeai import GenerativeModel
|
| 7 |
+
# from dotenv import load_dotenv
|
| 8 |
+
# import os
|
| 9 |
+
# from file_upload_vectorize import resources_collection, vectors_collection, courses_collection2, faculty_collection
|
| 10 |
+
|
| 11 |
+
# # Load environment variables
|
| 12 |
+
# load_dotenv()
|
| 13 |
+
# MONGO_URI = os.getenv('MONGO_URI')
|
| 14 |
+
# OPENAI_KEY = os.getenv('OPENAI_KEY')
|
| 15 |
+
# GEMINI_KEY = os.getenv('GEMINI_KEY')
|
| 16 |
+
|
| 17 |
+
# # Configure APIs
|
| 18 |
+
# openai.api_key = OPENAI_KEY
|
| 19 |
+
# genai.configure(api_key=GEMINI_KEY)
|
| 20 |
+
# model = genai.GenerativeModel('gemini-pro')
|
| 21 |
+
|
| 22 |
+
# # Connect to MongoDB
|
| 23 |
+
# client = MongoClient(MONGO_URI)
|
| 24 |
+
# db = client['novascholar_db']
|
| 25 |
+
# quizzes_collection = db["quizzes"]
|
| 26 |
+
|
| 27 |
+
# def strip_code_markers(response_text):
|
| 28 |
+
# """Strip off the markers ``` and python from a LLM model's response"""
|
| 29 |
+
# if response_text.startswith("```python"):
|
| 30 |
+
# response_text = response_text[len("```python"):].strip()
|
| 31 |
+
# if response_text.startswith("```"):
|
| 32 |
+
# response_text = response_text[len("```"):].strip()
|
| 33 |
+
# if response_text.endswith("```"):
|
| 34 |
+
# response_text = response_text[:-len("```")].strip()
|
| 35 |
+
# return response_text
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# # New function to generate MCQs using Gemini
|
| 39 |
+
# def generate_mcqs(context, num_questions, session_title, session_description):
|
| 40 |
+
# """Generate MCQs either from context or session details"""
|
| 41 |
+
# try:
|
| 42 |
+
# # Initialize Gemini model
|
| 43 |
+
# if context:
|
| 44 |
+
# prompt = f"""
|
| 45 |
+
# Based on the following content, generate {num_questions} multiple choice questions.
|
| 46 |
+
# Format each question as a Python dictionary with the following structure:
|
| 47 |
+
# {{
|
| 48 |
+
# "question": "Question text here",
|
| 49 |
+
# "options": ["A) option1", "B) option2", "C) option3", "D) option4"],
|
| 50 |
+
# "correct_option": "A) option1" or "B) option2" or "C) option3" or "D) option4"
|
| 51 |
+
# }}
|
| 52 |
+
|
| 53 |
+
# Content:
|
| 54 |
+
# {context}
|
| 55 |
+
|
| 56 |
+
# Generate challenging but clear questions that test understanding of key concepts.
|
| 57 |
+
# Return only the Python list of dictionaries.
|
| 58 |
+
# """
|
| 59 |
+
# else:
|
| 60 |
+
# prompt = f"""
|
| 61 |
+
# Generate {num_questions} multiple choice questions about the topic:
|
| 62 |
+
# Title: {session_title}
|
| 63 |
+
# Description: {session_description}
|
| 64 |
+
|
| 65 |
+
# Format each question as a Python dictionary with the following structure:
|
| 66 |
+
# {{
|
| 67 |
+
# "question": "Question text here",
|
| 68 |
+
# "options": ["A) option1", "B) option2", "C) option3", "D) option4"],
|
| 69 |
+
# "correct_option": "A" or "B" or "C" or "D"
|
| 70 |
+
# }}
|
| 71 |
+
|
| 72 |
+
# Generate challenging but clear questions.
|
| 73 |
+
# Return only the Python list of dictionaries without any additional formatting or markers
|
| 74 |
+
# Do not write any other text, do not start the response with (```python), do not end the response with backticks(```)
|
| 75 |
+
# A Sample response should look like this: Response Text: [
|
| 76 |
+
# {
|
| 77 |
+
# "question": "Which of the following is NOT a valid data type in C++?",
|
| 78 |
+
# "options": ["int", "double", "boolean", "char"],
|
| 79 |
+
# "correct_option": "C"
|
| 80 |
+
# }
|
| 81 |
+
# ] (Notice that there are no backticks(```) around the response and no (```python))
|
| 82 |
+
# .
|
| 83 |
+
# """
|
| 84 |
+
|
| 85 |
+
# response = model.generate_content(prompt)
|
| 86 |
+
# response_text = response.text.strip()
|
| 87 |
+
# print("Response Text:", response_text)
|
| 88 |
+
# modified_response_text = strip_code_markers(response_text)
|
| 89 |
+
# print("Response Text Modified to:", modified_response_text)
|
| 90 |
+
# # Extract and parse the response to get the list of MCQs
|
| 91 |
+
# mcqs = ast.literal_eval(modified_response_text) # Be careful with eval, consider using ast.literal_eval for production
|
| 92 |
+
# print(mcqs)
|
| 93 |
+
# if not mcqs:
|
| 94 |
+
# raise ValueError("No questions generated")
|
| 95 |
+
# return mcqs
|
| 96 |
+
# except Exception as e:
|
| 97 |
+
# print(f"Error generating MCQs: , error: {e}")
|
| 98 |
+
# return None
|
| 99 |
+
|
| 100 |
+
# # New function to save quiz to database
|
| 101 |
+
# def save_quiz(course_id, session_id, title, questions, user_id):
|
| 102 |
+
# """Save quiz to database"""
|
| 103 |
+
# try:
|
| 104 |
+
# quiz_data = {
|
| 105 |
+
# "user_id": user_id,
|
| 106 |
+
# "course_id": course_id,
|
| 107 |
+
# "session_id": session_id,
|
| 108 |
+
# "title": title,
|
| 109 |
+
# "questions": questions,
|
| 110 |
+
# "created_at": datetime.utcnow(),
|
| 111 |
+
# "status": "active",
|
| 112 |
+
# "submissions": []
|
| 113 |
+
# }
|
| 114 |
+
# result = quizzes_collection.insert_one(quiz_data)
|
| 115 |
+
# return result.inserted_id
|
| 116 |
+
# except Exception as e:
|
| 117 |
+
# print(f"Error saving quiz: {e}")
|
| 118 |
+
# return None
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# def get_student_quiz_score(quiz_id, student_id):
|
| 122 |
+
# """Get student's score for a specific quiz"""
|
| 123 |
+
# quiz = quizzes_collection.find_one(
|
| 124 |
+
# {
|
| 125 |
+
# "_id": quiz_id,
|
| 126 |
+
# "submissions.student_id": student_id
|
| 127 |
+
# },
|
| 128 |
+
# {"submissions.$": 1}
|
| 129 |
+
# )
|
| 130 |
+
# if quiz and quiz.get('submissions'):
|
| 131 |
+
# return quiz['submissions'][0].get('score')
|
| 132 |
+
# return None
|
| 133 |
+
|
| 134 |
+
# # def submit_quiz_answers(quiz_id, student_id, student_answers):
|
| 135 |
+
# # """Submit and score student's quiz answers"""
|
| 136 |
+
# # quiz = quizzes_collection.find_one({"_id": quiz_id})
|
| 137 |
+
# # if not quiz:
|
| 138 |
+
# # return None
|
| 139 |
+
|
| 140 |
+
# # # Calculate score
|
| 141 |
+
# # correct_answers = 0
|
| 142 |
+
# # total_questions = len(quiz['questions'])
|
| 143 |
+
|
| 144 |
+
# # for q_idx, question in enumerate(quiz['questions']):
|
| 145 |
+
# # if student_answers.get(str(q_idx)) == question['correct_option']:
|
| 146 |
+
# # correct_answers += 1
|
| 147 |
+
|
| 148 |
+
# # score = (correct_answers / total_questions) * 100
|
| 149 |
+
|
| 150 |
+
# # # Store submission
|
| 151 |
+
# # submission_data = {
|
| 152 |
+
# # "student_id": student_id,
|
| 153 |
+
# # "answers": student_answers,
|
| 154 |
+
# # "score": score,
|
| 155 |
+
# # "submitted_at": datetime.utcnow()
|
| 156 |
+
# # }
|
| 157 |
+
|
| 158 |
+
# # # Update quiz with submission
|
| 159 |
+
# # quizzes_collection.update_one(
|
| 160 |
+
# # {"_id": quiz_id},
|
| 161 |
+
# # {
|
| 162 |
+
# # "$push": {"submissions": submission_data}
|
| 163 |
+
# # }
|
| 164 |
+
# # )
|
| 165 |
+
|
| 166 |
+
# # return score
|
| 167 |
+
# def submit_quiz_answers(quiz_id, student_id, student_answers):
|
| 168 |
+
# """Submit and score student's quiz answers"""
|
| 169 |
+
# try:
|
| 170 |
+
# quiz = quizzes_collection.find_one({"_id": quiz_id})
|
| 171 |
+
# if not quiz:
|
| 172 |
+
# return None
|
| 173 |
+
|
| 174 |
+
# # Calculate score
|
| 175 |
+
# correct_answers = 0
|
| 176 |
+
# total_questions = len(quiz['questions'])
|
| 177 |
+
|
| 178 |
+
# for q_idx, question in enumerate(quiz['questions']):
|
| 179 |
+
# student_answer = student_answers.get(str(q_idx))
|
| 180 |
+
# if student_answer: # Only check if answer was provided
|
| 181 |
+
# # Extract the option letter (A, B, C, D) from the full answer string
|
| 182 |
+
# answer_letter = student_answer.split(')')[0].strip()
|
| 183 |
+
# if answer_letter == question['correct_option']:
|
| 184 |
+
# correct_answers += 1
|
| 185 |
+
|
| 186 |
+
# score = (correct_answers / total_questions) * 100
|
| 187 |
+
|
| 188 |
+
# # Store submission
|
| 189 |
+
# submission_data = {
|
| 190 |
+
# "student_id": student_id,
|
| 191 |
+
# "answers": student_answers,
|
| 192 |
+
# "score": score,
|
| 193 |
+
# "submitted_at": datetime.utcnow()
|
| 194 |
+
# }
|
| 195 |
+
|
| 196 |
+
# # Update quiz with submission
|
| 197 |
+
# result = quizzes_collection.update_one(
|
| 198 |
+
# {"_id": quiz_id},
|
| 199 |
+
# {"$push": {"submissions": submission_data}}
|
| 200 |
+
# )
|
| 201 |
+
|
| 202 |
+
# return score if result.modified_count > 0 else None
|
| 203 |
+
|
| 204 |
+
# except Exception as e:
|
| 205 |
+
# print(f"Error submitting quiz: {e}")
|
| 206 |
+
# return None
|
| 207 |
+
|
| 208 |
import ast
|
| 209 |
from pymongo import MongoClient
|
| 210 |
from datetime import datetime
|
|
|
|
| 230 |
client = MongoClient(MONGO_URI)
|
| 231 |
db = client['novascholar_db']
|
| 232 |
quizzes_collection = db["quizzes"]
|
| 233 |
+
surprise_quizzes_collection = db["surprise_quizzes"]
|
| 234 |
|
| 235 |
def strip_code_markers(response_text):
|
| 236 |
"""Strip off the markers ``` and python from a LLM model's response"""
|
|
|
|
| 325 |
print(f"Error saving quiz: {e}")
|
| 326 |
return None
|
| 327 |
|
| 328 |
+
def save_surprise_quiz(course_id, session_id, title, questions, user_id, no_minutes):
|
| 329 |
+
"""Save quiz to database"""
|
| 330 |
+
try:
|
| 331 |
+
quiz_data = {
|
| 332 |
+
"user_id": user_id,
|
| 333 |
+
"course_id": course_id,
|
| 334 |
+
"session_id": session_id,
|
| 335 |
+
"title": title,
|
| 336 |
+
"questions": questions,
|
| 337 |
+
"created_at": datetime.now(),
|
| 338 |
+
"status": "active",
|
| 339 |
+
"submissions": [],
|
| 340 |
+
"no_minutes": no_minutes
|
| 341 |
+
}
|
| 342 |
+
result = surprise_quizzes_collection.insert_one(quiz_data)
|
| 343 |
+
return result.inserted_id
|
| 344 |
+
except Exception as e:
|
| 345 |
+
print(f"Error saving quiz: {e}")
|
| 346 |
+
return None
|
| 347 |
+
|
| 348 |
|
| 349 |
def get_student_quiz_score(quiz_id, student_id):
|
| 350 |
"""Get student's score for a specific quiz"""
|
|
|
|
| 359 |
return quiz['submissions'][0].get('score')
|
| 360 |
return None
|
| 361 |
|
| 362 |
+
def get_student_surprise_quiz_score(quiz_id, student_id):
|
| 363 |
+
"""Get student's score for a specific quiz"""
|
| 364 |
+
quiz = surprise_quizzes_collection.find_one(
|
| 365 |
+
{
|
| 366 |
+
"_id": quiz_id,
|
| 367 |
+
"submissions.student_id": student_id
|
| 368 |
+
},
|
| 369 |
+
{"submissions.$": 1}
|
| 370 |
+
)
|
| 371 |
+
if quiz and quiz.get('submissions'):
|
| 372 |
+
return quiz['submissions'][0].get('score')
|
| 373 |
+
return None
|
| 374 |
+
|
| 375 |
# def submit_quiz_answers(quiz_id, student_id, student_answers):
|
| 376 |
# """Submit and score student's quiz answers"""
|
| 377 |
# quiz = quizzes_collection.find_one({"_id": quiz_id})
|
|
|
|
| 442 |
|
| 443 |
return score if result.modified_count > 0 else None
|
| 444 |
|
| 445 |
+
except Exception as e:
|
| 446 |
+
print(f"Error submitting quiz: {e}")
|
| 447 |
+
return None
|
| 448 |
+
|
| 449 |
+
def submit_surprise_quiz_answers(quiz_id, student_id, student_answers):
|
| 450 |
+
"""Submit and score student's quiz answers"""
|
| 451 |
+
try:
|
| 452 |
+
quiz = surprise_quizzes_collection.find_one({"_id": quiz_id})
|
| 453 |
+
if not quiz:
|
| 454 |
+
return None
|
| 455 |
+
|
| 456 |
+
# Calculate score
|
| 457 |
+
correct_answers = 0
|
| 458 |
+
total_questions = len(quiz['questions'])
|
| 459 |
+
|
| 460 |
+
for q_idx, question in enumerate(quiz['questions']):
|
| 461 |
+
student_answer = student_answers.get(str(q_idx))
|
| 462 |
+
if student_answer: # Only check if answer was provided
|
| 463 |
+
# Extract the option letter (A, B, C, D) from the full answer string
|
| 464 |
+
answer_letter = student_answer.split(')')[0].strip()
|
| 465 |
+
if answer_letter == question['correct_option']:
|
| 466 |
+
correct_answers += 1
|
| 467 |
+
|
| 468 |
+
score = (correct_answers / total_questions) * 100
|
| 469 |
+
|
| 470 |
+
# Store submission
|
| 471 |
+
submission_data = {
|
| 472 |
+
"student_id": student_id,
|
| 473 |
+
"answers": student_answers,
|
| 474 |
+
"score": score,
|
| 475 |
+
"submitted_at": datetime.utcnow()
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
# Update quiz with submission
|
| 479 |
+
result = surprise_quizzes_collection.update_one(
|
| 480 |
+
{"_id": quiz_id},
|
| 481 |
+
{"$push": {"submissions": submission_data}}
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
return score if result.modified_count > 0 else None
|
| 485 |
+
|
| 486 |
except Exception as e:
|
| 487 |
print(f"Error submitting quiz: {e}")
|
| 488 |
return None
|
rubrics.py
CHANGED
|
@@ -98,14 +98,14 @@ def display_rubrics_tab(session, course_id):
|
|
| 98 |
|
| 99 |
if rubric:
|
| 100 |
st.json(rubric)
|
| 101 |
-
if st.button("Save Rubric"):
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
else:
|
| 110 |
st.error("No learning outcomes found for this session")
|
| 111 |
else:
|
|
|
|
| 98 |
|
| 99 |
if rubric:
|
| 100 |
st.json(rubric)
|
| 101 |
+
# if st.button("Save Rubric"):
|
| 102 |
+
rubric_data = {
|
| 103 |
+
"course_id": course_id,
|
| 104 |
+
"session_id": session['session_id'],
|
| 105 |
+
"rubric": json.loads(rubric)
|
| 106 |
+
}
|
| 107 |
+
rubrics_collection.insert_one(rubric_data)
|
| 108 |
+
st.success("Rubric saved successfully!")
|
| 109 |
else:
|
| 110 |
st.error("No learning outcomes found for this session")
|
| 111 |
else:
|
session_page.py
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
subjective_test_evaluation.py
CHANGED
|
@@ -1,252 +1,798 @@
|
|
| 1 |
-
import
|
| 2 |
-
from
|
| 3 |
-
from
|
| 4 |
-
import os
|
| 5 |
-
from
|
| 6 |
-
import
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
resources_collection = db[
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# import streamlit as st
|
| 2 |
+
# from datetime import datetime
|
| 3 |
+
# from pymongo import MongoClient
|
| 4 |
+
# import os
|
| 5 |
+
# from openai import OpenAI
|
| 6 |
+
# from dotenv import load_dotenv
|
| 7 |
+
# from bson import ObjectId
|
| 8 |
+
|
| 9 |
+
# load_dotenv()
|
| 10 |
+
|
| 11 |
+
# # MongoDB setup
|
| 12 |
+
# MONGO_URI = os.getenv('MONGO_URI')
|
| 13 |
+
# client = MongoClient(MONGO_URI)
|
| 14 |
+
# db = client["novascholar_db"]
|
| 15 |
+
# subjective_tests_collection = db["subjective_tests"]
|
| 16 |
+
# subjective_test_evaluation_collection = db["subjective_test_evaluation"]
|
| 17 |
+
# resources_collection = db["resources"]
|
| 18 |
+
# students_collection = db["students"]
|
| 19 |
+
|
| 20 |
+
# def evaluate_subjective_answers(session_id, student_id, test_id):
|
| 21 |
+
# """
|
| 22 |
+
# Generate evaluation and analysis for subjective test answers
|
| 23 |
+
# """
|
| 24 |
+
# try:
|
| 25 |
+
# # Fetch test and student submission
|
| 26 |
+
# test = subjective_tests_collection.find_one({"_id": test_id})
|
| 27 |
+
# if not test:
|
| 28 |
+
# return None
|
| 29 |
+
|
| 30 |
+
# # Find student's submission
|
| 31 |
+
# submission = next(
|
| 32 |
+
# (sub for sub in test.get('submissions', [])
|
| 33 |
+
# if sub['student_id'] == str(student_id)),
|
| 34 |
+
# None
|
| 35 |
+
# )
|
| 36 |
+
# if not submission:
|
| 37 |
+
# return None
|
| 38 |
+
|
| 39 |
+
# # Fetch pre-class materials
|
| 40 |
+
# pre_class_materials = resources_collection.find({"session_id": session_id})
|
| 41 |
+
# pre_class_content = ""
|
| 42 |
+
# for material in pre_class_materials:
|
| 43 |
+
# if 'text_content' in material:
|
| 44 |
+
# pre_class_content += material['text_content'] + "\n"
|
| 45 |
+
|
| 46 |
+
# # Default rubric (can be customized later)
|
| 47 |
+
# default_rubric = """
|
| 48 |
+
# 1. Content Understanding (1-4):
|
| 49 |
+
# - Demonstrates comprehensive understanding of core concepts
|
| 50 |
+
# - Accurately applies relevant theories and principles
|
| 51 |
+
# - Provides specific examples and evidence
|
| 52 |
+
|
| 53 |
+
# 2. Critical Analysis (1-4):
|
| 54 |
+
# - Shows depth of analysis
|
| 55 |
+
# - Makes meaningful connections
|
| 56 |
+
# - Demonstrates original thinking
|
| 57 |
+
|
| 58 |
+
# 3. Organization & Clarity (1-4):
|
| 59 |
+
# - Clear structure and flow
|
| 60 |
+
# - Well-developed arguments
|
| 61 |
+
# - Effective use of examples
|
| 62 |
+
# """
|
| 63 |
+
|
| 64 |
+
# # Initialize OpenAI client
|
| 65 |
+
# client = OpenAI(api_key=os.getenv('OPENAI_KEY'))
|
| 66 |
+
|
| 67 |
+
# evaluations = []
|
| 68 |
+
# for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
|
| 69 |
+
# analysis_content = f"""
|
| 70 |
+
# Question: {question['question']}
|
| 71 |
+
# Student Answer: {answer}
|
| 72 |
+
# """
|
| 73 |
+
|
| 74 |
+
# prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
|
| 75 |
+
|
| 76 |
+
# 1. Evaluation Process:
|
| 77 |
+
# - Use each rubric criterion (scored 1-4) for internal assessment
|
| 78 |
+
# - Compare response with pre-class materials
|
| 79 |
+
# - Check alignment with all rubric requirements
|
| 80 |
+
# - Calculate final score: sum of criteria scores converted to 10-point scale
|
| 81 |
+
|
| 82 |
+
# Pre-class Materials:
|
| 83 |
+
# {pre_class_content[:1000]} # Truncate to avoid token limits
|
| 84 |
+
|
| 85 |
+
# Rubric Criteria:
|
| 86 |
+
# {default_rubric}
|
| 87 |
+
|
| 88 |
+
# Question and Answer:
|
| 89 |
+
# {analysis_content}
|
| 90 |
+
|
| 91 |
+
# Provide your assessment in the following format:
|
| 92 |
+
|
| 93 |
+
# **Score and Evidence**
|
| 94 |
+
# - Score: [X]/10
|
| 95 |
+
# - Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
|
| 96 |
+
|
| 97 |
+
# **Key Areas for Improvement**
|
| 98 |
+
# - [Concise improvement point 1]
|
| 99 |
+
# - [Concise improvement point 2]
|
| 100 |
+
# - [Concise improvement point 3]
|
| 101 |
+
# """
|
| 102 |
+
|
| 103 |
+
# # Generate evaluation using OpenAI
|
| 104 |
+
# response = client.chat.completions.create(
|
| 105 |
+
# model="gpt-4o-mini",
|
| 106 |
+
# messages=[{"role": "user", "content": prompt_template}],
|
| 107 |
+
# max_tokens=500,
|
| 108 |
+
# temperature=0.4
|
| 109 |
+
# )
|
| 110 |
+
|
| 111 |
+
# evaluations.append({
|
| 112 |
+
# "question_number": i + 1,
|
| 113 |
+
# "question": question['question'],
|
| 114 |
+
# "answer": answer,
|
| 115 |
+
# "evaluation": response.choices[0].message.content
|
| 116 |
+
# })
|
| 117 |
+
|
| 118 |
+
# # Store evaluation in MongoDB
|
| 119 |
+
# evaluation_doc = {
|
| 120 |
+
# "test_id": test_id,
|
| 121 |
+
# "student_id": student_id,
|
| 122 |
+
# "session_id": session_id,
|
| 123 |
+
# "evaluations": evaluations,
|
| 124 |
+
# "evaluated_at": datetime.utcnow()
|
| 125 |
+
# }
|
| 126 |
+
|
| 127 |
+
# subjective_test_evaluation_collection.insert_one(evaluation_doc)
|
| 128 |
+
# return evaluation_doc
|
| 129 |
+
|
| 130 |
+
# except Exception as e:
|
| 131 |
+
# print(f"Error in evaluate_subjective_answers: {str(e)}")
|
| 132 |
+
# return None
|
| 133 |
+
|
| 134 |
+
# def display_evaluation_to_faculty(session_id, student_id, course_id):
|
| 135 |
+
# """
|
| 136 |
+
# Display interface for faculty to generate and view evaluations
|
| 137 |
+
# """
|
| 138 |
+
# st.header("Evaluate Subjective Tests")
|
| 139 |
+
|
| 140 |
+
# try:
|
| 141 |
+
# # Fetch available tests
|
| 142 |
+
# tests = list(subjective_tests_collection.find({
|
| 143 |
+
# "session_id": str(session_id),
|
| 144 |
+
# "status": "active"
|
| 145 |
+
# }))
|
| 146 |
+
|
| 147 |
+
# if not tests:
|
| 148 |
+
# st.info("No subjective tests found for this session.")
|
| 149 |
+
# return
|
| 150 |
+
|
| 151 |
+
# # Select test
|
| 152 |
+
# test_options = {
|
| 153 |
+
# f"{test['title']} (Created: {test['created_at'].strftime('%Y-%m-%d %H:%M')})" if 'created_at' in test else test['title']: test['_id']
|
| 154 |
+
# for test in tests
|
| 155 |
+
# }
|
| 156 |
+
|
| 157 |
+
# if test_options:
|
| 158 |
+
# selected_test = st.selectbox(
|
| 159 |
+
# "Select Test to Evaluate",
|
| 160 |
+
# options=list(test_options.keys())
|
| 161 |
+
# )
|
| 162 |
+
|
| 163 |
+
# if selected_test:
|
| 164 |
+
# test_id = test_options[selected_test]
|
| 165 |
+
# test = subjective_tests_collection.find_one({"_id": test_id})
|
| 166 |
+
|
| 167 |
+
# if test:
|
| 168 |
+
# submissions = test.get('submissions', [])
|
| 169 |
+
# if not submissions:
|
| 170 |
+
# st.warning("No submissions found for this test.")
|
| 171 |
+
# return
|
| 172 |
+
|
| 173 |
+
# # Create a dropdown for student submissions
|
| 174 |
+
# student_options = {
|
| 175 |
+
# f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub['student_id']
|
| 176 |
+
# for sub in submissions
|
| 177 |
+
# }
|
| 178 |
+
|
| 179 |
+
# selected_student = st.selectbox(
|
| 180 |
+
# "Select Student Submission",
|
| 181 |
+
# options=list(student_options.keys())
|
| 182 |
+
# )
|
| 183 |
+
|
| 184 |
+
# if selected_student:
|
| 185 |
+
# student_id = student_options[selected_student]
|
| 186 |
+
# submission = next(sub for sub in submissions if sub['student_id'] == student_id)
|
| 187 |
+
|
| 188 |
+
# st.markdown(f"**Submission Date:** {submission.get('submitted_at', 'No submission date')}")
|
| 189 |
+
# st.markdown("---")
|
| 190 |
+
|
| 191 |
+
# # Display questions and answers
|
| 192 |
+
# st.subheader("Submission Details")
|
| 193 |
+
# for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
|
| 194 |
+
# st.markdown(f"**Question {i+1}:** {question['question']}")
|
| 195 |
+
# st.markdown(f"**Answer:** {answer}")
|
| 196 |
+
# st.markdown("---")
|
| 197 |
+
|
| 198 |
+
# # Check for existing evaluation
|
| 199 |
+
# existing_eval = subjective_test_evaluation_collection.find_one({
|
| 200 |
+
# "test_id": test_id,
|
| 201 |
+
# "student_id": student_id,
|
| 202 |
+
# "session_id": str(session_id)
|
| 203 |
+
# })
|
| 204 |
+
|
| 205 |
+
# if existing_eval:
|
| 206 |
+
# st.subheader("Evaluation Results")
|
| 207 |
+
# for eval_item in existing_eval['evaluations']:
|
| 208 |
+
# st.markdown(f"### Evaluation for Question {eval_item['question_number']}")
|
| 209 |
+
# st.markdown(eval_item['evaluation'])
|
| 210 |
+
# st.markdown("---")
|
| 211 |
+
|
| 212 |
+
# st.success("✓ Evaluation completed")
|
| 213 |
+
# if st.button("Regenerate Evaluation", key=f"regenerate_{student_id}_{test_id}"):
|
| 214 |
+
# with st.spinner("Regenerating evaluation..."):
|
| 215 |
+
# evaluation = evaluate_subjective_answers(
|
| 216 |
+
# str(session_id),
|
| 217 |
+
# student_id,
|
| 218 |
+
# test_id
|
| 219 |
+
# )
|
| 220 |
+
# if evaluation:
|
| 221 |
+
# st.success("Evaluation regenerated successfully!")
|
| 222 |
+
# st.rerun()
|
| 223 |
+
# else:
|
| 224 |
+
# st.error("Error regenerating evaluation.")
|
| 225 |
+
# else:
|
| 226 |
+
# st.subheader("Generate Evaluation")
|
| 227 |
+
# if st.button("Generate Evaluation", key=f"evaluate_{student_id}_{test_id}"):
|
| 228 |
+
# with st.spinner("Generating evaluation..."):
|
| 229 |
+
# evaluation = evaluate_subjective_answers(
|
| 230 |
+
# str(session_id),
|
| 231 |
+
# student_id,
|
| 232 |
+
# test_id
|
| 233 |
+
# )
|
| 234 |
+
# if evaluation:
|
| 235 |
+
# st.success("Evaluation generated successfully!")
|
| 236 |
+
# st.markdown("### Generated Evaluation")
|
| 237 |
+
# for eval_item in evaluation['evaluations']:
|
| 238 |
+
# st.markdown(f"#### Question {eval_item['question_number']}")
|
| 239 |
+
# st.markdown(eval_item['evaluation'])
|
| 240 |
+
# st.markdown("---")
|
| 241 |
+
# st.rerun()
|
| 242 |
+
# else:
|
| 243 |
+
# st.error("Error generating evaluation.")
|
| 244 |
+
|
| 245 |
+
# except Exception as e:
|
| 246 |
+
# st.error(f"An error occurred while loading the evaluations: {str(e)}")
|
| 247 |
+
# print(f"Error in display_evaluation_to_faculty: {str(e)}")
|
| 248 |
+
|
| 249 |
+
import streamlit as st
|
| 250 |
+
from datetime import datetime
|
| 251 |
+
from pymongo import MongoClient
|
| 252 |
+
import os
|
| 253 |
+
from openai import OpenAI
|
| 254 |
+
from dotenv import load_dotenv
|
| 255 |
+
from bson import ObjectId
|
| 256 |
+
|
| 257 |
+
load_dotenv()
|
| 258 |
+
|
| 259 |
+
# MongoDB setup
|
| 260 |
+
MONGO_URI = os.getenv("MONGO_URI")
|
| 261 |
+
client = MongoClient(MONGO_URI)
|
| 262 |
+
db = client["novascholar_db"]
|
| 263 |
+
subjective_tests_collection = db["subjective_tests"]
|
| 264 |
+
subjective_test_evaluation_collection = db["subjective_test_evaluation"]
|
| 265 |
+
pre_subjective_tests_collection = db["pre_subjective_tests"]
|
| 266 |
+
resources_collection = db["resources"]
|
| 267 |
+
students_collection = db["students"]
|
| 268 |
+
pre_subjective_test_evaluation_collection = db["pre_subjective_test_evaluation"]
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def evaluate_subjective_answers(session_id, student_id, test_id):
|
| 272 |
+
"""
|
| 273 |
+
Generate evaluation and analysis for subjective test answers
|
| 274 |
+
"""
|
| 275 |
+
try:
|
| 276 |
+
# Fetch test and student submission
|
| 277 |
+
test = subjective_tests_collection.find_one({"_id": test_id})
|
| 278 |
+
if not test:
|
| 279 |
+
return None
|
| 280 |
+
|
| 281 |
+
# Find student's submission
|
| 282 |
+
submission = next(
|
| 283 |
+
(
|
| 284 |
+
sub
|
| 285 |
+
for sub in test.get("submissions", [])
|
| 286 |
+
if sub["student_id"] == str(student_id)
|
| 287 |
+
),
|
| 288 |
+
None,
|
| 289 |
+
)
|
| 290 |
+
if not submission:
|
| 291 |
+
return None
|
| 292 |
+
|
| 293 |
+
# Fetch pre-class materials
|
| 294 |
+
pre_class_materials = resources_collection.find({"session_id": session_id})
|
| 295 |
+
pre_class_content = ""
|
| 296 |
+
for material in pre_class_materials:
|
| 297 |
+
if "text_content" in material:
|
| 298 |
+
pre_class_content += material["text_content"] + "\n"
|
| 299 |
+
|
| 300 |
+
# Default rubric (can be customized later)
|
| 301 |
+
default_rubric = """
|
| 302 |
+
1. Content Understanding (1-4):
|
| 303 |
+
- Demonstrates comprehensive understanding of core concepts
|
| 304 |
+
- Accurately applies relevant theories and principles
|
| 305 |
+
- Provides specific examples and evidence
|
| 306 |
+
|
| 307 |
+
2. Critical Analysis (1-4):
|
| 308 |
+
- Shows depth of analysis
|
| 309 |
+
- Makes meaningful connections
|
| 310 |
+
- Demonstrates original thinking
|
| 311 |
+
|
| 312 |
+
3. Organization & Clarity (1-4):
|
| 313 |
+
- Clear structure and flow
|
| 314 |
+
- Well-developed arguments
|
| 315 |
+
- Effective use of examples
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
# Initialize OpenAI client
|
| 319 |
+
client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
|
| 320 |
+
|
| 321 |
+
evaluations = []
|
| 322 |
+
for i, (question, answer) in enumerate(
|
| 323 |
+
zip(test["questions"], submission["answers"])
|
| 324 |
+
):
|
| 325 |
+
analysis_content = f"""
|
| 326 |
+
Question: {question['question']}
|
| 327 |
+
Student Answer: {answer}
|
| 328 |
+
"""
|
| 329 |
+
|
| 330 |
+
prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
|
| 331 |
+
|
| 332 |
+
1. Evaluation Process:
|
| 333 |
+
- Use each rubric criterion (scored 1-4) for internal assessment
|
| 334 |
+
- Compare response with pre-class materials
|
| 335 |
+
- Check alignment with all rubric requirements
|
| 336 |
+
- Calculate final score: sum of criteria scores converted to 10-point scale
|
| 337 |
+
|
| 338 |
+
Pre-class Materials:
|
| 339 |
+
{pre_class_content[:1000]} # Truncate to avoid token limits
|
| 340 |
+
|
| 341 |
+
Rubric Criteria:
|
| 342 |
+
{default_rubric}
|
| 343 |
+
|
| 344 |
+
Question and Answer:
|
| 345 |
+
{analysis_content}
|
| 346 |
+
|
| 347 |
+
Provide your assessment in the following format:
|
| 348 |
+
|
| 349 |
+
**Score and Evidence**
|
| 350 |
+
- Score: [X]/10
|
| 351 |
+
- Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
|
| 352 |
+
|
| 353 |
+
**Key Areas for Improvement**
|
| 354 |
+
- [Concise improvement point 1]
|
| 355 |
+
- [Concise improvement point 2]
|
| 356 |
+
- [Concise improvement point 3]
|
| 357 |
+
"""
|
| 358 |
+
|
| 359 |
+
# Generate evaluation using OpenAI
|
| 360 |
+
response = client.chat.completions.create(
|
| 361 |
+
model="gpt-4o-mini",
|
| 362 |
+
messages=[{"role": "user", "content": prompt_template}],
|
| 363 |
+
max_tokens=500,
|
| 364 |
+
temperature=0.4,
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
evaluations.append(
|
| 368 |
+
{
|
| 369 |
+
"question_number": i + 1,
|
| 370 |
+
"question": question["question"],
|
| 371 |
+
"answer": answer,
|
| 372 |
+
"evaluation": response.choices[0].message.content,
|
| 373 |
+
}
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
# Store evaluation in MongoDB
|
| 377 |
+
evaluation_doc = {
|
| 378 |
+
"test_id": test_id,
|
| 379 |
+
"student_id": student_id,
|
| 380 |
+
"session_id": session_id,
|
| 381 |
+
"evaluations": evaluations,
|
| 382 |
+
"evaluated_at": datetime.utcnow(),
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
subjective_test_evaluation_collection.insert_one(evaluation_doc)
|
| 386 |
+
return evaluation_doc
|
| 387 |
+
|
| 388 |
+
except Exception as e:
|
| 389 |
+
print(f"Error in evaluate_subjective_answers: {str(e)}")
|
| 390 |
+
return None
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def pre_evaluate_subjective_answers(session_id, student_id, test_id):
|
| 394 |
+
"""
|
| 395 |
+
Generate evaluation and analysis for subjective test answers
|
| 396 |
+
"""
|
| 397 |
+
try:
|
| 398 |
+
# Fetch test and student submission
|
| 399 |
+
test = pre_subjective_tests_collection.find_one({"_id": test_id})
|
| 400 |
+
if not test:
|
| 401 |
+
return None
|
| 402 |
+
|
| 403 |
+
# Find student's submission
|
| 404 |
+
submission = next(
|
| 405 |
+
(
|
| 406 |
+
sub
|
| 407 |
+
for sub in test.get("submissions", [])
|
| 408 |
+
if sub["student_id"] == str(student_id)
|
| 409 |
+
),
|
| 410 |
+
None,
|
| 411 |
+
)
|
| 412 |
+
if not submission:
|
| 413 |
+
return None
|
| 414 |
+
|
| 415 |
+
# Fetch pre-class materials
|
| 416 |
+
pre_class_materials = resources_collection.find({"session_id": session_id})
|
| 417 |
+
pre_class_content = ""
|
| 418 |
+
for material in pre_class_materials:
|
| 419 |
+
if "text_content" in material:
|
| 420 |
+
pre_class_content += material["text_content"] + "\n"
|
| 421 |
+
|
| 422 |
+
# Default rubric (can be customized later)
|
| 423 |
+
default_rubric = """
|
| 424 |
+
1. Content Understanding (1-4):
|
| 425 |
+
- Demonstrates comprehensive understanding of core concepts
|
| 426 |
+
- Accurately applies relevant theories and principles
|
| 427 |
+
- Provides specific examples and evidence
|
| 428 |
+
|
| 429 |
+
2. Critical Analysis (1-4):
|
| 430 |
+
- Shows depth of analysis
|
| 431 |
+
- Makes meaningful connections
|
| 432 |
+
- Demonstrates original thinking
|
| 433 |
+
|
| 434 |
+
3. Organization & Clarity (1-4):
|
| 435 |
+
- Clear structure and flow
|
| 436 |
+
- Well-developed arguments
|
| 437 |
+
- Effective use of examples
|
| 438 |
+
"""
|
| 439 |
+
|
| 440 |
+
# Initialize OpenAI client
|
| 441 |
+
client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
|
| 442 |
+
|
| 443 |
+
evaluations = []
|
| 444 |
+
for i, (question, answer) in enumerate(
|
| 445 |
+
zip(test["questions"], submission["answers"])
|
| 446 |
+
):
|
| 447 |
+
analysis_content = f"""
|
| 448 |
+
Question: {question['question']}
|
| 449 |
+
Student Answer: {answer}
|
| 450 |
+
"""
|
| 451 |
+
|
| 452 |
+
prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
|
| 453 |
+
|
| 454 |
+
1. Evaluation Process:
|
| 455 |
+
- Use each rubric criterion (scored 1-4) for internal assessment
|
| 456 |
+
- Compare response with pre-class materials
|
| 457 |
+
- Check alignment with all rubric requirements
|
| 458 |
+
- Calculate final score: sum of criteria scores converted to 10-point scale
|
| 459 |
+
|
| 460 |
+
Pre-class Materials:
|
| 461 |
+
{pre_class_content[:1000]} # Truncate to avoid token limits
|
| 462 |
+
|
| 463 |
+
Rubric Criteria:
|
| 464 |
+
{default_rubric}
|
| 465 |
+
|
| 466 |
+
Question and Answer:
|
| 467 |
+
{analysis_content}
|
| 468 |
+
|
| 469 |
+
Provide your assessment in the following format:
|
| 470 |
+
|
| 471 |
+
**Score and Evidence**
|
| 472 |
+
- Score: [X]/10
|
| 473 |
+
- Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
|
| 474 |
+
|
| 475 |
+
**Key Areas for Improvement**
|
| 476 |
+
- [Concise improvement point 1]
|
| 477 |
+
- [Concise improvement point 2]
|
| 478 |
+
- [Concise improvement point 3]
|
| 479 |
+
"""
|
| 480 |
+
|
| 481 |
+
# Generate evaluation using OpenAI
|
| 482 |
+
response = client.chat.completions.create(
|
| 483 |
+
model="gpt-4o-mini",
|
| 484 |
+
messages=[{"role": "user", "content": prompt_template}],
|
| 485 |
+
max_tokens=500,
|
| 486 |
+
temperature=0.4,
|
| 487 |
+
)
|
| 488 |
+
|
| 489 |
+
evaluations.append(
|
| 490 |
+
{
|
| 491 |
+
"question_number": i + 1,
|
| 492 |
+
"question": question["question"],
|
| 493 |
+
"answer": answer,
|
| 494 |
+
"evaluation": response.choices[0].message.content,
|
| 495 |
+
}
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
# Store evaluation in MongoDB
|
| 499 |
+
evaluation_doc = {
|
| 500 |
+
"test_id": test_id,
|
| 501 |
+
"student_id": student_id,
|
| 502 |
+
"session_id": session_id,
|
| 503 |
+
"evaluations": evaluations,
|
| 504 |
+
"evaluated_at": datetime.utcnow(),
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
pre_subjective_test_evaluation_collection.insert_one(evaluation_doc)
|
| 508 |
+
return evaluation_doc
|
| 509 |
+
|
| 510 |
+
except Exception as e:
|
| 511 |
+
print(f"Error in evaluate_subjective_answers: {str(e)}")
|
| 512 |
+
return None
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
def display_evaluation_to_faculty(session_id, student_id, course_id):
|
| 516 |
+
"""
|
| 517 |
+
Display interface for faculty to generate and view evaluations
|
| 518 |
+
"""
|
| 519 |
+
st.header("Evaluate Subjective Tests")
|
| 520 |
+
|
| 521 |
+
try:
|
| 522 |
+
# Fetch available tests
|
| 523 |
+
print("session_id", session_id, "student_id", student_id, "course_id", course_id)
|
| 524 |
+
tests = list(
|
| 525 |
+
subjective_tests_collection.find(
|
| 526 |
+
{"session_id": str(session_id), "status": "active"}
|
| 527 |
+
)
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
print("tests" ,tests)
|
| 531 |
+
if not tests:
|
| 532 |
+
st.info("No subjective tests found for this session.")
|
| 533 |
+
return
|
| 534 |
+
|
| 535 |
+
# Select test
|
| 536 |
+
test_options = {
|
| 537 |
+
(
|
| 538 |
+
f"{test['title']} (Created: {test['created_at'].strftime('%Y-%m-%d %H:%M')})"
|
| 539 |
+
if "created_at" in test
|
| 540 |
+
else test["title"]
|
| 541 |
+
): test["_id"]
|
| 542 |
+
for test in tests
|
| 543 |
+
}
|
| 544 |
+
|
| 545 |
+
if test_options:
|
| 546 |
+
selected_test = st.selectbox(
|
| 547 |
+
"Select Test to Evaluate", options=list(test_options.keys())
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
if selected_test:
|
| 551 |
+
test_id = test_options[selected_test]
|
| 552 |
+
test = subjective_tests_collection.find_one({"_id": test_id})
|
| 553 |
+
|
| 554 |
+
if test:
|
| 555 |
+
submissions = test.get("submissions", [])
|
| 556 |
+
if not submissions:
|
| 557 |
+
st.warning("No submissions found for this test.")
|
| 558 |
+
return
|
| 559 |
+
|
| 560 |
+
# Create a dropdown for student submissions
|
| 561 |
+
student_options = {
|
| 562 |
+
f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub[
|
| 563 |
+
"student_id"
|
| 564 |
+
]
|
| 565 |
+
for sub in submissions
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
+
selected_student = st.selectbox(
|
| 569 |
+
"Select Student Submission",
|
| 570 |
+
options=list(student_options.keys()),
|
| 571 |
+
)
|
| 572 |
+
|
| 573 |
+
if selected_student:
|
| 574 |
+
student_id = student_options[selected_student]
|
| 575 |
+
submission = next(
|
| 576 |
+
sub
|
| 577 |
+
for sub in submissions
|
| 578 |
+
if sub["student_id"] == student_id
|
| 579 |
+
)
|
| 580 |
+
|
| 581 |
+
st.markdown(
|
| 582 |
+
f"**Submission Date:** {submission.get('submitted_at', 'No submission date')}"
|
| 583 |
+
)
|
| 584 |
+
st.markdown("---")
|
| 585 |
+
|
| 586 |
+
# Display questions and answers
|
| 587 |
+
st.subheader("Submission Details")
|
| 588 |
+
for i, (question, answer) in enumerate(
|
| 589 |
+
zip(test["questions"], submission["answers"])
|
| 590 |
+
):
|
| 591 |
+
st.markdown(f"**Question {i+1}:** {question['question']}")
|
| 592 |
+
st.markdown(f"**Answer:** {answer}")
|
| 593 |
+
st.markdown("---")
|
| 594 |
+
|
| 595 |
+
# Check for existing evaluation
|
| 596 |
+
existing_eval = subjective_test_evaluation_collection.find_one(
|
| 597 |
+
{
|
| 598 |
+
"test_id": test_id,
|
| 599 |
+
"student_id": student_id,
|
| 600 |
+
"session_id": str(session_id),
|
| 601 |
+
}
|
| 602 |
+
)
|
| 603 |
+
|
| 604 |
+
if existing_eval:
|
| 605 |
+
st.subheader("Evaluation Results")
|
| 606 |
+
for eval_item in existing_eval["evaluations"]:
|
| 607 |
+
st.markdown(
|
| 608 |
+
f"### Evaluation for Question {eval_item['question_number']}"
|
| 609 |
+
)
|
| 610 |
+
st.markdown(eval_item["evaluation"])
|
| 611 |
+
st.markdown("---")
|
| 612 |
+
|
| 613 |
+
st.success("✓ Evaluation completed")
|
| 614 |
+
if st.button(
|
| 615 |
+
"Regenerate Evaluation",
|
| 616 |
+
key=f"regenerate_{student_id}_{test_id}",
|
| 617 |
+
):
|
| 618 |
+
with st.spinner("Regenerating evaluation..."):
|
| 619 |
+
evaluation = evaluate_subjective_answers(
|
| 620 |
+
str(session_id), student_id, test_id
|
| 621 |
+
)
|
| 622 |
+
if evaluation:
|
| 623 |
+
st.success(
|
| 624 |
+
"Evaluation regenerated successfully!"
|
| 625 |
+
)
|
| 626 |
+
st.rerun()
|
| 627 |
+
else:
|
| 628 |
+
st.error("Error regenerating evaluation.")
|
| 629 |
+
else:
|
| 630 |
+
st.subheader("Generate Evaluation")
|
| 631 |
+
if st.button(
|
| 632 |
+
"Generate Evaluation",
|
| 633 |
+
key=f"evaluate_{student_id}_{test_id}",
|
| 634 |
+
):
|
| 635 |
+
with st.spinner("Generating evaluation..."):
|
| 636 |
+
evaluation = evaluate_subjective_answers(
|
| 637 |
+
str(session_id), student_id, test_id
|
| 638 |
+
)
|
| 639 |
+
if evaluation:
|
| 640 |
+
st.success("Evaluation generated successfully!")
|
| 641 |
+
st.markdown("### Generated Evaluation")
|
| 642 |
+
for eval_item in evaluation["evaluations"]:
|
| 643 |
+
st.markdown(
|
| 644 |
+
f"#### Question {eval_item['question_number']}"
|
| 645 |
+
)
|
| 646 |
+
st.markdown(eval_item["evaluation"])
|
| 647 |
+
st.markdown("---")
|
| 648 |
+
st.rerun()
|
| 649 |
+
else:
|
| 650 |
+
st.error("Error generating evaluation.")
|
| 651 |
+
|
| 652 |
+
except Exception as e:
|
| 653 |
+
st.error(f"An error occurred while loading the evaluations: {str(e)}")
|
| 654 |
+
print(f"Error in display_evaluation_to_faculty: {str(e)}")
|
| 655 |
+
return None
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
def pre_display_evaluation_to_faculty(session_id, student_id, course_id):
|
| 659 |
+
"""
|
| 660 |
+
Display interface for faculty to generate and view evaluations
|
| 661 |
+
"""
|
| 662 |
+
st.header("Evaluate Pre Subjective Tests")
|
| 663 |
+
|
| 664 |
+
try:
|
| 665 |
+
# Fetch available tests
|
| 666 |
+
tests = list(
|
| 667 |
+
pre_subjective_tests_collection.find(
|
| 668 |
+
{"session_id": str(session_id), "status": "active"}
|
| 669 |
+
)
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
if not tests:
|
| 673 |
+
st.info("No subjective tests found for this session.")
|
| 674 |
+
return
|
| 675 |
+
|
| 676 |
+
# Select test
|
| 677 |
+
test_options = {
|
| 678 |
+
(
|
| 679 |
+
f"{test['title']} (Created: {test['created_at'].strftime('%Y-%m-%d %H:%M')})"
|
| 680 |
+
if "created_at" in test
|
| 681 |
+
else test["title"]
|
| 682 |
+
): test["_id"]
|
| 683 |
+
for test in tests
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
if test_options:
|
| 687 |
+
selected_test = st.selectbox(
|
| 688 |
+
"Select Test to Evaluate", options=list(test_options.keys())
|
| 689 |
+
)
|
| 690 |
+
|
| 691 |
+
if selected_test:
|
| 692 |
+
test_id = test_options[selected_test]
|
| 693 |
+
test = pre_subjective_tests_collection.find_one({"_id": test_id})
|
| 694 |
+
|
| 695 |
+
if test:
|
| 696 |
+
submissions = test.get("submissions", [])
|
| 697 |
+
if not submissions:
|
| 698 |
+
st.warning("No submissions found for this test.")
|
| 699 |
+
return
|
| 700 |
+
|
| 701 |
+
# Create a dropdown for student submissions
|
| 702 |
+
student_options = {
|
| 703 |
+
f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub[
|
| 704 |
+
"student_id"
|
| 705 |
+
]
|
| 706 |
+
for sub in submissions
|
| 707 |
+
}
|
| 708 |
+
|
| 709 |
+
selected_student = st.selectbox(
|
| 710 |
+
"Select Student Submission",
|
| 711 |
+
options=list(student_options.keys()),
|
| 712 |
+
)
|
| 713 |
+
|
| 714 |
+
if selected_student:
|
| 715 |
+
student_id = student_options[selected_student]
|
| 716 |
+
submission = next(
|
| 717 |
+
sub
|
| 718 |
+
for sub in submissions
|
| 719 |
+
if sub["student_id"] == student_id
|
| 720 |
+
)
|
| 721 |
+
|
| 722 |
+
st.markdown(
|
| 723 |
+
f"**Submission Date:** {submission.get('submitted_at', 'No submission date')}"
|
| 724 |
+
)
|
| 725 |
+
st.markdown("---")
|
| 726 |
+
|
| 727 |
+
# Display questions and answers
|
| 728 |
+
st.subheader("Submission Details")
|
| 729 |
+
for i, (question, answer) in enumerate(
|
| 730 |
+
zip(test["questions"], submission["answers"])
|
| 731 |
+
):
|
| 732 |
+
st.markdown(f"**Question {i+1}:** {question['question']}")
|
| 733 |
+
st.markdown(f"**Answer:** {answer}")
|
| 734 |
+
st.markdown("---")
|
| 735 |
+
|
| 736 |
+
# Check for existing evaluation
|
| 737 |
+
existing_eval = (
|
| 738 |
+
pre_subjective_test_evaluation_collection.find_one(
|
| 739 |
+
{
|
| 740 |
+
"test_id": test_id,
|
| 741 |
+
"student_id": student_id,
|
| 742 |
+
"session_id": str(session_id),
|
| 743 |
+
}
|
| 744 |
+
)
|
| 745 |
+
)
|
| 746 |
+
|
| 747 |
+
if existing_eval:
|
| 748 |
+
st.subheader("Evaluation Results")
|
| 749 |
+
for eval_item in existing_eval["evaluations"]:
|
| 750 |
+
st.markdown(
|
| 751 |
+
f"### Evaluation for Question {eval_item['question_number']}"
|
| 752 |
+
)
|
| 753 |
+
st.markdown(eval_item["evaluation"])
|
| 754 |
+
st.markdown("---")
|
| 755 |
+
|
| 756 |
+
st.success("✓ Evaluation completed")
|
| 757 |
+
if st.button(
|
| 758 |
+
"Regenerate Evaluation",
|
| 759 |
+
key=f"regenerate_{student_id}_{test_id}",
|
| 760 |
+
):
|
| 761 |
+
with st.spinner("Regenerating evaluation..."):
|
| 762 |
+
evaluation = pre_evaluate_subjective_answers(
|
| 763 |
+
str(session_id), student_id, test_id
|
| 764 |
+
)
|
| 765 |
+
if evaluation:
|
| 766 |
+
st.success(
|
| 767 |
+
"Evaluation regenerated successfully!"
|
| 768 |
+
)
|
| 769 |
+
st.rerun()
|
| 770 |
+
else:
|
| 771 |
+
st.error("Error regenerating evaluation.")
|
| 772 |
+
else:
|
| 773 |
+
st.subheader("Generate Evaluation")
|
| 774 |
+
if st.button(
|
| 775 |
+
"Generate Evaluation",
|
| 776 |
+
key=f"pre_evaluate_{student_id}_{test_id}",
|
| 777 |
+
):
|
| 778 |
+
with st.spinner("Generating evaluation..."):
|
| 779 |
+
print("session_id", session_id, "student_id", student_id, "test_id", test_id)
|
| 780 |
+
evaluation = pre_evaluate_subjective_answers(
|
| 781 |
+
str(session_id), student_id, test_id
|
| 782 |
+
)
|
| 783 |
+
if evaluation:
|
| 784 |
+
st.success("Evaluation generated successfully!")
|
| 785 |
+
st.markdown("### Generated Evaluation")
|
| 786 |
+
for eval_item in evaluation["evaluations"]:
|
| 787 |
+
st.markdown(
|
| 788 |
+
f"#### Question {eval_item['question_number']}"
|
| 789 |
+
)
|
| 790 |
+
st.markdown(eval_item["evaluation"])
|
| 791 |
+
st.markdown("---")
|
| 792 |
+
st.rerun()
|
| 793 |
+
else:
|
| 794 |
+
st.error("Error generating evaluation.")
|
| 795 |
+
|
| 796 |
+
except Exception as e:
|
| 797 |
+
st.error(f"An error occurred while loading the evaluations: {str(e)}")
|
| 798 |
+
print(f"Error in display_evaluation_to_faculty: {str(e)}")
|