Update subjective_test_evaluation.py
Browse files- subjective_test_evaluation.py +798 -247
subjective_test_evaluation.py
CHANGED
@@ -1,247 +1,798 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from datetime import datetime
|
3 |
-
from pymongo import MongoClient
|
4 |
-
import os
|
5 |
-
from openai import OpenAI
|
6 |
-
from dotenv import load_dotenv
|
7 |
-
from bson import ObjectId
|
8 |
-
|
9 |
-
load_dotenv()
|
10 |
-
|
11 |
-
# MongoDB setup
|
12 |
-
MONGO_URI = os.getenv('MONGO_URI')
|
13 |
-
client = MongoClient(MONGO_URI)
|
14 |
-
db = client["novascholar_db"]
|
15 |
-
subjective_tests_collection = db["subjective_tests"]
|
16 |
-
subjective_test_evaluation_collection = db["subjective_test_evaluation"]
|
17 |
-
resources_collection = db["resources"]
|
18 |
-
students_collection = db["students"]
|
19 |
-
|
20 |
-
def evaluate_subjective_answers(session_id, student_id, test_id):
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
def display_evaluation_to_faculty(session_id, student_id, course_id):
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import streamlit as st
|
2 |
+
# from datetime import datetime
|
3 |
+
# from pymongo import MongoClient
|
4 |
+
# import os
|
5 |
+
# from openai import OpenAI
|
6 |
+
# from dotenv import load_dotenv
|
7 |
+
# from bson import ObjectId
|
8 |
+
|
9 |
+
# load_dotenv()
|
10 |
+
|
11 |
+
# # MongoDB setup
|
12 |
+
# MONGO_URI = os.getenv('MONGO_URI')
|
13 |
+
# client = MongoClient(MONGO_URI)
|
14 |
+
# db = client["novascholar_db"]
|
15 |
+
# subjective_tests_collection = db["subjective_tests"]
|
16 |
+
# subjective_test_evaluation_collection = db["subjective_test_evaluation"]
|
17 |
+
# resources_collection = db["resources"]
|
18 |
+
# students_collection = db["students"]
|
19 |
+
|
20 |
+
# def evaluate_subjective_answers(session_id, student_id, test_id):
|
21 |
+
# """
|
22 |
+
# Generate evaluation and analysis for subjective test answers
|
23 |
+
# """
|
24 |
+
# try:
|
25 |
+
# # Fetch test and student submission
|
26 |
+
# test = subjective_tests_collection.find_one({"_id": test_id})
|
27 |
+
# if not test:
|
28 |
+
# return None
|
29 |
+
|
30 |
+
# # Find student's submission
|
31 |
+
# submission = next(
|
32 |
+
# (sub for sub in test.get('submissions', [])
|
33 |
+
# if sub['student_id'] == str(student_id)),
|
34 |
+
# None
|
35 |
+
# )
|
36 |
+
# if not submission:
|
37 |
+
# return None
|
38 |
+
|
39 |
+
# # Fetch pre-class materials
|
40 |
+
# pre_class_materials = resources_collection.find({"session_id": session_id})
|
41 |
+
# pre_class_content = ""
|
42 |
+
# for material in pre_class_materials:
|
43 |
+
# if 'text_content' in material:
|
44 |
+
# pre_class_content += material['text_content'] + "\n"
|
45 |
+
|
46 |
+
# # Default rubric (can be customized later)
|
47 |
+
# default_rubric = """
|
48 |
+
# 1. Content Understanding (1-4):
|
49 |
+
# - Demonstrates comprehensive understanding of core concepts
|
50 |
+
# - Accurately applies relevant theories and principles
|
51 |
+
# - Provides specific examples and evidence
|
52 |
+
|
53 |
+
# 2. Critical Analysis (1-4):
|
54 |
+
# - Shows depth of analysis
|
55 |
+
# - Makes meaningful connections
|
56 |
+
# - Demonstrates original thinking
|
57 |
+
|
58 |
+
# 3. Organization & Clarity (1-4):
|
59 |
+
# - Clear structure and flow
|
60 |
+
# - Well-developed arguments
|
61 |
+
# - Effective use of examples
|
62 |
+
# """
|
63 |
+
|
64 |
+
# # Initialize OpenAI client
|
65 |
+
# client = OpenAI(api_key=os.getenv('OPENAI_KEY'))
|
66 |
+
|
67 |
+
# evaluations = []
|
68 |
+
# for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
|
69 |
+
# analysis_content = f"""
|
70 |
+
# Question: {question['question']}
|
71 |
+
# Student Answer: {answer}
|
72 |
+
# """
|
73 |
+
|
74 |
+
# prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
|
75 |
+
|
76 |
+
# 1. Evaluation Process:
|
77 |
+
# - Use each rubric criterion (scored 1-4) for internal assessment
|
78 |
+
# - Compare response with pre-class materials
|
79 |
+
# - Check alignment with all rubric requirements
|
80 |
+
# - Calculate final score: sum of criteria scores converted to 10-point scale
|
81 |
+
|
82 |
+
# Pre-class Materials:
|
83 |
+
# {pre_class_content[:1000]} # Truncate to avoid token limits
|
84 |
+
|
85 |
+
# Rubric Criteria:
|
86 |
+
# {default_rubric}
|
87 |
+
|
88 |
+
# Question and Answer:
|
89 |
+
# {analysis_content}
|
90 |
+
|
91 |
+
# Provide your assessment in the following format:
|
92 |
+
|
93 |
+
# **Score and Evidence**
|
94 |
+
# - Score: [X]/10
|
95 |
+
# - Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
|
96 |
+
|
97 |
+
# **Key Areas for Improvement**
|
98 |
+
# - [Concise improvement point 1]
|
99 |
+
# - [Concise improvement point 2]
|
100 |
+
# - [Concise improvement point 3]
|
101 |
+
# """
|
102 |
+
|
103 |
+
# # Generate evaluation using OpenAI
|
104 |
+
# response = client.chat.completions.create(
|
105 |
+
# model="gpt-4o-mini",
|
106 |
+
# messages=[{"role": "user", "content": prompt_template}],
|
107 |
+
# max_tokens=500,
|
108 |
+
# temperature=0.4
|
109 |
+
# )
|
110 |
+
|
111 |
+
# evaluations.append({
|
112 |
+
# "question_number": i + 1,
|
113 |
+
# "question": question['question'],
|
114 |
+
# "answer": answer,
|
115 |
+
# "evaluation": response.choices[0].message.content
|
116 |
+
# })
|
117 |
+
|
118 |
+
# # Store evaluation in MongoDB
|
119 |
+
# evaluation_doc = {
|
120 |
+
# "test_id": test_id,
|
121 |
+
# "student_id": student_id,
|
122 |
+
# "session_id": session_id,
|
123 |
+
# "evaluations": evaluations,
|
124 |
+
# "evaluated_at": datetime.utcnow()
|
125 |
+
# }
|
126 |
+
|
127 |
+
# subjective_test_evaluation_collection.insert_one(evaluation_doc)
|
128 |
+
# return evaluation_doc
|
129 |
+
|
130 |
+
# except Exception as e:
|
131 |
+
# print(f"Error in evaluate_subjective_answers: {str(e)}")
|
132 |
+
# return None
|
133 |
+
|
134 |
+
# def display_evaluation_to_faculty(session_id, student_id, course_id):
|
135 |
+
# """
|
136 |
+
# Display interface for faculty to generate and view evaluations
|
137 |
+
# """
|
138 |
+
# st.header("Evaluate Subjective Tests")
|
139 |
+
|
140 |
+
# try:
|
141 |
+
# # Fetch available tests
|
142 |
+
# tests = list(subjective_tests_collection.find({
|
143 |
+
# "session_id": str(session_id),
|
144 |
+
# "status": "active"
|
145 |
+
# }))
|
146 |
+
|
147 |
+
# if not tests:
|
148 |
+
# st.info("No subjective tests found for this session.")
|
149 |
+
# return
|
150 |
+
|
151 |
+
# # Select test
|
152 |
+
# test_options = {
|
153 |
+
# f"{test['title']} (Created: {test['created_at'].strftime('%Y-%m-%d %H:%M')})" if 'created_at' in test else test['title']: test['_id']
|
154 |
+
# for test in tests
|
155 |
+
# }
|
156 |
+
|
157 |
+
# if test_options:
|
158 |
+
# selected_test = st.selectbox(
|
159 |
+
# "Select Test to Evaluate",
|
160 |
+
# options=list(test_options.keys())
|
161 |
+
# )
|
162 |
+
|
163 |
+
# if selected_test:
|
164 |
+
# test_id = test_options[selected_test]
|
165 |
+
# test = subjective_tests_collection.find_one({"_id": test_id})
|
166 |
+
|
167 |
+
# if test:
|
168 |
+
# submissions = test.get('submissions', [])
|
169 |
+
# if not submissions:
|
170 |
+
# st.warning("No submissions found for this test.")
|
171 |
+
# return
|
172 |
+
|
173 |
+
# # Create a dropdown for student submissions
|
174 |
+
# student_options = {
|
175 |
+
# f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub['student_id']
|
176 |
+
# for sub in submissions
|
177 |
+
# }
|
178 |
+
|
179 |
+
# selected_student = st.selectbox(
|
180 |
+
# "Select Student Submission",
|
181 |
+
# options=list(student_options.keys())
|
182 |
+
# )
|
183 |
+
|
184 |
+
# if selected_student:
|
185 |
+
# student_id = student_options[selected_student]
|
186 |
+
# submission = next(sub for sub in submissions if sub['student_id'] == student_id)
|
187 |
+
|
188 |
+
# st.markdown(f"**Submission Date:** {submission.get('submitted_at', 'No submission date')}")
|
189 |
+
# st.markdown("---")
|
190 |
+
|
191 |
+
# # Display questions and answers
|
192 |
+
# st.subheader("Submission Details")
|
193 |
+
# for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
|
194 |
+
# st.markdown(f"**Question {i+1}:** {question['question']}")
|
195 |
+
# st.markdown(f"**Answer:** {answer}")
|
196 |
+
# st.markdown("---")
|
197 |
+
|
198 |
+
# # Check for existing evaluation
|
199 |
+
# existing_eval = subjective_test_evaluation_collection.find_one({
|
200 |
+
# "test_id": test_id,
|
201 |
+
# "student_id": student_id,
|
202 |
+
# "session_id": str(session_id)
|
203 |
+
# })
|
204 |
+
|
205 |
+
# if existing_eval:
|
206 |
+
# st.subheader("Evaluation Results")
|
207 |
+
# for eval_item in existing_eval['evaluations']:
|
208 |
+
# st.markdown(f"### Evaluation for Question {eval_item['question_number']}")
|
209 |
+
# st.markdown(eval_item['evaluation'])
|
210 |
+
# st.markdown("---")
|
211 |
+
|
212 |
+
# st.success("✓ Evaluation completed")
|
213 |
+
# if st.button("Regenerate Evaluation", key=f"regenerate_{student_id}_{test_id}"):
|
214 |
+
# with st.spinner("Regenerating evaluation..."):
|
215 |
+
# evaluation = evaluate_subjective_answers(
|
216 |
+
# str(session_id),
|
217 |
+
# student_id,
|
218 |
+
# test_id
|
219 |
+
# )
|
220 |
+
# if evaluation:
|
221 |
+
# st.success("Evaluation regenerated successfully!")
|
222 |
+
# st.rerun()
|
223 |
+
# else:
|
224 |
+
# st.error("Error regenerating evaluation.")
|
225 |
+
# else:
|
226 |
+
# st.subheader("Generate Evaluation")
|
227 |
+
# if st.button("Generate Evaluation", key=f"evaluate_{student_id}_{test_id}"):
|
228 |
+
# with st.spinner("Generating evaluation..."):
|
229 |
+
# evaluation = evaluate_subjective_answers(
|
230 |
+
# str(session_id),
|
231 |
+
# student_id,
|
232 |
+
# test_id
|
233 |
+
# )
|
234 |
+
# if evaluation:
|
235 |
+
# st.success("Evaluation generated successfully!")
|
236 |
+
# st.markdown("### Generated Evaluation")
|
237 |
+
# for eval_item in evaluation['evaluations']:
|
238 |
+
# st.markdown(f"#### Question {eval_item['question_number']}")
|
239 |
+
# st.markdown(eval_item['evaluation'])
|
240 |
+
# st.markdown("---")
|
241 |
+
# st.rerun()
|
242 |
+
# else:
|
243 |
+
# st.error("Error generating evaluation.")
|
244 |
+
|
245 |
+
# except Exception as e:
|
246 |
+
# st.error(f"An error occurred while loading the evaluations: {str(e)}")
|
247 |
+
# print(f"Error in display_evaluation_to_faculty: {str(e)}")
|
248 |
+
|
249 |
+
import streamlit as st
|
250 |
+
from datetime import datetime
|
251 |
+
from pymongo import MongoClient
|
252 |
+
import os
|
253 |
+
from openai import OpenAI
|
254 |
+
from dotenv import load_dotenv
|
255 |
+
from bson import ObjectId
|
256 |
+
|
257 |
+
load_dotenv()
|
258 |
+
|
259 |
+
# MongoDB setup
|
260 |
+
MONGO_URI = os.getenv("MONGO_URI")
|
261 |
+
client = MongoClient(MONGO_URI)
|
262 |
+
db = client["novascholar_db"]
|
263 |
+
subjective_tests_collection = db["subjective_tests"]
|
264 |
+
subjective_test_evaluation_collection = db["subjective_test_evaluation"]
|
265 |
+
pre_subjective_tests_collection = db["pre_subjective_tests"]
|
266 |
+
resources_collection = db["resources"]
|
267 |
+
students_collection = db["students"]
|
268 |
+
pre_subjective_test_evaluation_collection = db["pre_subjective_test_evaluation"]
|
269 |
+
|
270 |
+
|
271 |
+
def evaluate_subjective_answers(session_id, student_id, test_id):
|
272 |
+
"""
|
273 |
+
Generate evaluation and analysis for subjective test answers
|
274 |
+
"""
|
275 |
+
try:
|
276 |
+
# Fetch test and student submission
|
277 |
+
test = subjective_tests_collection.find_one({"_id": test_id})
|
278 |
+
if not test:
|
279 |
+
return None
|
280 |
+
|
281 |
+
# Find student's submission
|
282 |
+
submission = next(
|
283 |
+
(
|
284 |
+
sub
|
285 |
+
for sub in test.get("submissions", [])
|
286 |
+
if sub["student_id"] == str(student_id)
|
287 |
+
),
|
288 |
+
None,
|
289 |
+
)
|
290 |
+
if not submission:
|
291 |
+
return None
|
292 |
+
|
293 |
+
# Fetch pre-class materials
|
294 |
+
pre_class_materials = resources_collection.find({"session_id": session_id})
|
295 |
+
pre_class_content = ""
|
296 |
+
for material in pre_class_materials:
|
297 |
+
if "text_content" in material:
|
298 |
+
pre_class_content += material["text_content"] + "\n"
|
299 |
+
|
300 |
+
# Default rubric (can be customized later)
|
301 |
+
default_rubric = """
|
302 |
+
1. Content Understanding (1-4):
|
303 |
+
- Demonstrates comprehensive understanding of core concepts
|
304 |
+
- Accurately applies relevant theories and principles
|
305 |
+
- Provides specific examples and evidence
|
306 |
+
|
307 |
+
2. Critical Analysis (1-4):
|
308 |
+
- Shows depth of analysis
|
309 |
+
- Makes meaningful connections
|
310 |
+
- Demonstrates original thinking
|
311 |
+
|
312 |
+
3. Organization & Clarity (1-4):
|
313 |
+
- Clear structure and flow
|
314 |
+
- Well-developed arguments
|
315 |
+
- Effective use of examples
|
316 |
+
"""
|
317 |
+
|
318 |
+
# Initialize OpenAI client
|
319 |
+
client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
|
320 |
+
|
321 |
+
evaluations = []
|
322 |
+
for i, (question, answer) in enumerate(
|
323 |
+
zip(test["questions"], submission["answers"])
|
324 |
+
):
|
325 |
+
analysis_content = f"""
|
326 |
+
Question: {question['question']}
|
327 |
+
Student Answer: {answer}
|
328 |
+
"""
|
329 |
+
|
330 |
+
prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
|
331 |
+
|
332 |
+
1. Evaluation Process:
|
333 |
+
- Use each rubric criterion (scored 1-4) for internal assessment
|
334 |
+
- Compare response with pre-class materials
|
335 |
+
- Check alignment with all rubric requirements
|
336 |
+
- Calculate final score: sum of criteria scores converted to 10-point scale
|
337 |
+
|
338 |
+
Pre-class Materials:
|
339 |
+
{pre_class_content[:1000]} # Truncate to avoid token limits
|
340 |
+
|
341 |
+
Rubric Criteria:
|
342 |
+
{default_rubric}
|
343 |
+
|
344 |
+
Question and Answer:
|
345 |
+
{analysis_content}
|
346 |
+
|
347 |
+
Provide your assessment in the following format:
|
348 |
+
|
349 |
+
**Score and Evidence**
|
350 |
+
- Score: [X]/10
|
351 |
+
- Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
|
352 |
+
|
353 |
+
**Key Areas for Improvement**
|
354 |
+
- [Concise improvement point 1]
|
355 |
+
- [Concise improvement point 2]
|
356 |
+
- [Concise improvement point 3]
|
357 |
+
"""
|
358 |
+
|
359 |
+
# Generate evaluation using OpenAI
|
360 |
+
response = client.chat.completions.create(
|
361 |
+
model="gpt-4o-mini",
|
362 |
+
messages=[{"role": "user", "content": prompt_template}],
|
363 |
+
max_tokens=500,
|
364 |
+
temperature=0.4,
|
365 |
+
)
|
366 |
+
|
367 |
+
evaluations.append(
|
368 |
+
{
|
369 |
+
"question_number": i + 1,
|
370 |
+
"question": question["question"],
|
371 |
+
"answer": answer,
|
372 |
+
"evaluation": response.choices[0].message.content,
|
373 |
+
}
|
374 |
+
)
|
375 |
+
|
376 |
+
# Store evaluation in MongoDB
|
377 |
+
evaluation_doc = {
|
378 |
+
"test_id": test_id,
|
379 |
+
"student_id": student_id,
|
380 |
+
"session_id": session_id,
|
381 |
+
"evaluations": evaluations,
|
382 |
+
"evaluated_at": datetime.utcnow(),
|
383 |
+
}
|
384 |
+
|
385 |
+
subjective_test_evaluation_collection.insert_one(evaluation_doc)
|
386 |
+
return evaluation_doc
|
387 |
+
|
388 |
+
except Exception as e:
|
389 |
+
print(f"Error in evaluate_subjective_answers: {str(e)}")
|
390 |
+
return None
|
391 |
+
|
392 |
+
|
393 |
+
def pre_evaluate_subjective_answers(session_id, student_id, test_id):
|
394 |
+
"""
|
395 |
+
Generate evaluation and analysis for subjective test answers
|
396 |
+
"""
|
397 |
+
try:
|
398 |
+
# Fetch test and student submission
|
399 |
+
test = pre_subjective_tests_collection.find_one({"_id": test_id})
|
400 |
+
if not test:
|
401 |
+
return None
|
402 |
+
|
403 |
+
# Find student's submission
|
404 |
+
submission = next(
|
405 |
+
(
|
406 |
+
sub
|
407 |
+
for sub in test.get("submissions", [])
|
408 |
+
if sub["student_id"] == str(student_id)
|
409 |
+
),
|
410 |
+
None,
|
411 |
+
)
|
412 |
+
if not submission:
|
413 |
+
return None
|
414 |
+
|
415 |
+
# Fetch pre-class materials
|
416 |
+
pre_class_materials = resources_collection.find({"session_id": session_id})
|
417 |
+
pre_class_content = ""
|
418 |
+
for material in pre_class_materials:
|
419 |
+
if "text_content" in material:
|
420 |
+
pre_class_content += material["text_content"] + "\n"
|
421 |
+
|
422 |
+
# Default rubric (can be customized later)
|
423 |
+
default_rubric = """
|
424 |
+
1. Content Understanding (1-4):
|
425 |
+
- Demonstrates comprehensive understanding of core concepts
|
426 |
+
- Accurately applies relevant theories and principles
|
427 |
+
- Provides specific examples and evidence
|
428 |
+
|
429 |
+
2. Critical Analysis (1-4):
|
430 |
+
- Shows depth of analysis
|
431 |
+
- Makes meaningful connections
|
432 |
+
- Demonstrates original thinking
|
433 |
+
|
434 |
+
3. Organization & Clarity (1-4):
|
435 |
+
- Clear structure and flow
|
436 |
+
- Well-developed arguments
|
437 |
+
- Effective use of examples
|
438 |
+
"""
|
439 |
+
|
440 |
+
# Initialize OpenAI client
|
441 |
+
client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
|
442 |
+
|
443 |
+
evaluations = []
|
444 |
+
for i, (question, answer) in enumerate(
|
445 |
+
zip(test["questions"], submission["answers"])
|
446 |
+
):
|
447 |
+
analysis_content = f"""
|
448 |
+
Question: {question['question']}
|
449 |
+
Student Answer: {answer}
|
450 |
+
"""
|
451 |
+
|
452 |
+
prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
|
453 |
+
|
454 |
+
1. Evaluation Process:
|
455 |
+
- Use each rubric criterion (scored 1-4) for internal assessment
|
456 |
+
- Compare response with pre-class materials
|
457 |
+
- Check alignment with all rubric requirements
|
458 |
+
- Calculate final score: sum of criteria scores converted to 10-point scale
|
459 |
+
|
460 |
+
Pre-class Materials:
|
461 |
+
{pre_class_content[:1000]} # Truncate to avoid token limits
|
462 |
+
|
463 |
+
Rubric Criteria:
|
464 |
+
{default_rubric}
|
465 |
+
|
466 |
+
Question and Answer:
|
467 |
+
{analysis_content}
|
468 |
+
|
469 |
+
Provide your assessment in the following format:
|
470 |
+
|
471 |
+
**Score and Evidence**
|
472 |
+
- Score: [X]/10
|
473 |
+
- Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
|
474 |
+
|
475 |
+
**Key Areas for Improvement**
|
476 |
+
- [Concise improvement point 1]
|
477 |
+
- [Concise improvement point 2]
|
478 |
+
- [Concise improvement point 3]
|
479 |
+
"""
|
480 |
+
|
481 |
+
# Generate evaluation using OpenAI
|
482 |
+
response = client.chat.completions.create(
|
483 |
+
model="gpt-4o-mini",
|
484 |
+
messages=[{"role": "user", "content": prompt_template}],
|
485 |
+
max_tokens=500,
|
486 |
+
temperature=0.4,
|
487 |
+
)
|
488 |
+
|
489 |
+
evaluations.append(
|
490 |
+
{
|
491 |
+
"question_number": i + 1,
|
492 |
+
"question": question["question"],
|
493 |
+
"answer": answer,
|
494 |
+
"evaluation": response.choices[0].message.content,
|
495 |
+
}
|
496 |
+
)
|
497 |
+
|
498 |
+
# Store evaluation in MongoDB
|
499 |
+
evaluation_doc = {
|
500 |
+
"test_id": test_id,
|
501 |
+
"student_id": student_id,
|
502 |
+
"session_id": session_id,
|
503 |
+
"evaluations": evaluations,
|
504 |
+
"evaluated_at": datetime.utcnow(),
|
505 |
+
}
|
506 |
+
|
507 |
+
pre_subjective_test_evaluation_collection.insert_one(evaluation_doc)
|
508 |
+
return evaluation_doc
|
509 |
+
|
510 |
+
except Exception as e:
|
511 |
+
print(f"Error in evaluate_subjective_answers: {str(e)}")
|
512 |
+
return None
|
513 |
+
|
514 |
+
|
515 |
+
def display_evaluation_to_faculty(session_id, student_id, course_id):
|
516 |
+
"""
|
517 |
+
Display interface for faculty to generate and view evaluations
|
518 |
+
"""
|
519 |
+
st.header("Evaluate Subjective Tests")
|
520 |
+
|
521 |
+
try:
|
522 |
+
# Fetch available tests
|
523 |
+
print("session_id", session_id, "student_id", student_id, "course_id", course_id)
|
524 |
+
tests = list(
|
525 |
+
subjective_tests_collection.find(
|
526 |
+
{"session_id": str(session_id), "status": "active"}
|
527 |
+
)
|
528 |
+
)
|
529 |
+
|
530 |
+
print("tests" ,tests)
|
531 |
+
if not tests:
|
532 |
+
st.info("No subjective tests found for this session.")
|
533 |
+
return
|
534 |
+
|
535 |
+
# Select test
|
536 |
+
test_options = {
|
537 |
+
(
|
538 |
+
f"{test['title']} (Created: {test['created_at'].strftime('%Y-%m-%d %H:%M')})"
|
539 |
+
if "created_at" in test
|
540 |
+
else test["title"]
|
541 |
+
): test["_id"]
|
542 |
+
for test in tests
|
543 |
+
}
|
544 |
+
|
545 |
+
if test_options:
|
546 |
+
selected_test = st.selectbox(
|
547 |
+
"Select Test to Evaluate", options=list(test_options.keys())
|
548 |
+
)
|
549 |
+
|
550 |
+
if selected_test:
|
551 |
+
test_id = test_options[selected_test]
|
552 |
+
test = subjective_tests_collection.find_one({"_id": test_id})
|
553 |
+
|
554 |
+
if test:
|
555 |
+
submissions = test.get("submissions", [])
|
556 |
+
if not submissions:
|
557 |
+
st.warning("No submissions found for this test.")
|
558 |
+
return
|
559 |
+
|
560 |
+
# Create a dropdown for student submissions
|
561 |
+
student_options = {
|
562 |
+
f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub[
|
563 |
+
"student_id"
|
564 |
+
]
|
565 |
+
for sub in submissions
|
566 |
+
}
|
567 |
+
|
568 |
+
selected_student = st.selectbox(
|
569 |
+
"Select Student Submission",
|
570 |
+
options=list(student_options.keys()),
|
571 |
+
)
|
572 |
+
|
573 |
+
if selected_student:
|
574 |
+
student_id = student_options[selected_student]
|
575 |
+
submission = next(
|
576 |
+
sub
|
577 |
+
for sub in submissions
|
578 |
+
if sub["student_id"] == student_id
|
579 |
+
)
|
580 |
+
|
581 |
+
st.markdown(
|
582 |
+
f"**Submission Date:** {submission.get('submitted_at', 'No submission date')}"
|
583 |
+
)
|
584 |
+
st.markdown("---")
|
585 |
+
|
586 |
+
# Display questions and answers
|
587 |
+
st.subheader("Submission Details")
|
588 |
+
for i, (question, answer) in enumerate(
|
589 |
+
zip(test["questions"], submission["answers"])
|
590 |
+
):
|
591 |
+
st.markdown(f"**Question {i+1}:** {question['question']}")
|
592 |
+
st.markdown(f"**Answer:** {answer}")
|
593 |
+
st.markdown("---")
|
594 |
+
|
595 |
+
# Check for existing evaluation
|
596 |
+
existing_eval = subjective_test_evaluation_collection.find_one(
|
597 |
+
{
|
598 |
+
"test_id": test_id,
|
599 |
+
"student_id": student_id,
|
600 |
+
"session_id": str(session_id),
|
601 |
+
}
|
602 |
+
)
|
603 |
+
|
604 |
+
if existing_eval:
|
605 |
+
st.subheader("Evaluation Results")
|
606 |
+
for eval_item in existing_eval["evaluations"]:
|
607 |
+
st.markdown(
|
608 |
+
f"### Evaluation for Question {eval_item['question_number']}"
|
609 |
+
)
|
610 |
+
st.markdown(eval_item["evaluation"])
|
611 |
+
st.markdown("---")
|
612 |
+
|
613 |
+
st.success("✓ Evaluation completed")
|
614 |
+
if st.button(
|
615 |
+
"Regenerate Evaluation",
|
616 |
+
key=f"regenerate_{student_id}_{test_id}",
|
617 |
+
):
|
618 |
+
with st.spinner("Regenerating evaluation..."):
|
619 |
+
evaluation = evaluate_subjective_answers(
|
620 |
+
str(session_id), student_id, test_id
|
621 |
+
)
|
622 |
+
if evaluation:
|
623 |
+
st.success(
|
624 |
+
"Evaluation regenerated successfully!"
|
625 |
+
)
|
626 |
+
st.rerun()
|
627 |
+
else:
|
628 |
+
st.error("Error regenerating evaluation.")
|
629 |
+
else:
|
630 |
+
st.subheader("Generate Evaluation")
|
631 |
+
if st.button(
|
632 |
+
"Generate Evaluation",
|
633 |
+
key=f"evaluate_{student_id}_{test_id}",
|
634 |
+
):
|
635 |
+
with st.spinner("Generating evaluation..."):
|
636 |
+
evaluation = evaluate_subjective_answers(
|
637 |
+
str(session_id), student_id, test_id
|
638 |
+
)
|
639 |
+
if evaluation:
|
640 |
+
st.success("Evaluation generated successfully!")
|
641 |
+
st.markdown("### Generated Evaluation")
|
642 |
+
for eval_item in evaluation["evaluations"]:
|
643 |
+
st.markdown(
|
644 |
+
f"#### Question {eval_item['question_number']}"
|
645 |
+
)
|
646 |
+
st.markdown(eval_item["evaluation"])
|
647 |
+
st.markdown("---")
|
648 |
+
st.rerun()
|
649 |
+
else:
|
650 |
+
st.error("Error generating evaluation.")
|
651 |
+
|
652 |
+
except Exception as e:
|
653 |
+
st.error(f"An error occurred while loading the evaluations: {str(e)}")
|
654 |
+
print(f"Error in display_evaluation_to_faculty: {str(e)}")
|
655 |
+
return None
|
656 |
+
|
657 |
+
|
658 |
+
def pre_display_evaluation_to_faculty(session_id, student_id, course_id):
|
659 |
+
"""
|
660 |
+
Display interface for faculty to generate and view evaluations
|
661 |
+
"""
|
662 |
+
st.header("Evaluate Pre Subjective Tests")
|
663 |
+
|
664 |
+
try:
|
665 |
+
# Fetch available tests
|
666 |
+
tests = list(
|
667 |
+
pre_subjective_tests_collection.find(
|
668 |
+
{"session_id": str(session_id), "status": "active"}
|
669 |
+
)
|
670 |
+
)
|
671 |
+
|
672 |
+
if not tests:
|
673 |
+
st.info("No subjective tests found for this session.")
|
674 |
+
return
|
675 |
+
|
676 |
+
# Select test
|
677 |
+
test_options = {
|
678 |
+
(
|
679 |
+
f"{test['title']} (Created: {test['created_at'].strftime('%Y-%m-%d %H:%M')})"
|
680 |
+
if "created_at" in test
|
681 |
+
else test["title"]
|
682 |
+
): test["_id"]
|
683 |
+
for test in tests
|
684 |
+
}
|
685 |
+
|
686 |
+
if test_options:
|
687 |
+
selected_test = st.selectbox(
|
688 |
+
"Select Test to Evaluate", options=list(test_options.keys())
|
689 |
+
)
|
690 |
+
|
691 |
+
if selected_test:
|
692 |
+
test_id = test_options[selected_test]
|
693 |
+
test = pre_subjective_tests_collection.find_one({"_id": test_id})
|
694 |
+
|
695 |
+
if test:
|
696 |
+
submissions = test.get("submissions", [])
|
697 |
+
if not submissions:
|
698 |
+
st.warning("No submissions found for this test.")
|
699 |
+
return
|
700 |
+
|
701 |
+
# Create a dropdown for student submissions
|
702 |
+
student_options = {
|
703 |
+
f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub[
|
704 |
+
"student_id"
|
705 |
+
]
|
706 |
+
for sub in submissions
|
707 |
+
}
|
708 |
+
|
709 |
+
selected_student = st.selectbox(
|
710 |
+
"Select Student Submission",
|
711 |
+
options=list(student_options.keys()),
|
712 |
+
)
|
713 |
+
|
714 |
+
if selected_student:
|
715 |
+
student_id = student_options[selected_student]
|
716 |
+
submission = next(
|
717 |
+
sub
|
718 |
+
for sub in submissions
|
719 |
+
if sub["student_id"] == student_id
|
720 |
+
)
|
721 |
+
|
722 |
+
st.markdown(
|
723 |
+
f"**Submission Date:** {submission.get('submitted_at', 'No submission date')}"
|
724 |
+
)
|
725 |
+
st.markdown("---")
|
726 |
+
|
727 |
+
# Display questions and answers
|
728 |
+
st.subheader("Submission Details")
|
729 |
+
for i, (question, answer) in enumerate(
|
730 |
+
zip(test["questions"], submission["answers"])
|
731 |
+
):
|
732 |
+
st.markdown(f"**Question {i+1}:** {question['question']}")
|
733 |
+
st.markdown(f"**Answer:** {answer}")
|
734 |
+
st.markdown("---")
|
735 |
+
|
736 |
+
# Check for existing evaluation
|
737 |
+
existing_eval = (
|
738 |
+
pre_subjective_test_evaluation_collection.find_one(
|
739 |
+
{
|
740 |
+
"test_id": test_id,
|
741 |
+
"student_id": student_id,
|
742 |
+
"session_id": str(session_id),
|
743 |
+
}
|
744 |
+
)
|
745 |
+
)
|
746 |
+
|
747 |
+
if existing_eval:
|
748 |
+
st.subheader("Evaluation Results")
|
749 |
+
for eval_item in existing_eval["evaluations"]:
|
750 |
+
st.markdown(
|
751 |
+
f"### Evaluation for Question {eval_item['question_number']}"
|
752 |
+
)
|
753 |
+
st.markdown(eval_item["evaluation"])
|
754 |
+
st.markdown("---")
|
755 |
+
|
756 |
+
st.success("✓ Evaluation completed")
|
757 |
+
if st.button(
|
758 |
+
"Regenerate Evaluation",
|
759 |
+
key=f"regenerate_{student_id}_{test_id}",
|
760 |
+
):
|
761 |
+
with st.spinner("Regenerating evaluation..."):
|
762 |
+
evaluation = pre_evaluate_subjective_answers(
|
763 |
+
str(session_id), student_id, test_id
|
764 |
+
)
|
765 |
+
if evaluation:
|
766 |
+
st.success(
|
767 |
+
"Evaluation regenerated successfully!"
|
768 |
+
)
|
769 |
+
st.rerun()
|
770 |
+
else:
|
771 |
+
st.error("Error regenerating evaluation.")
|
772 |
+
else:
|
773 |
+
st.subheader("Generate Evaluation")
|
774 |
+
if st.button(
|
775 |
+
"Generate Evaluation",
|
776 |
+
key=f"pre_evaluate_{student_id}_{test_id}",
|
777 |
+
):
|
778 |
+
with st.spinner("Generating evaluation..."):
|
779 |
+
print("session_id", session_id, "student_id", student_id, "test_id", test_id)
|
780 |
+
evaluation = pre_evaluate_subjective_answers(
|
781 |
+
str(session_id), student_id, test_id
|
782 |
+
)
|
783 |
+
if evaluation:
|
784 |
+
st.success("Evaluation generated successfully!")
|
785 |
+
st.markdown("### Generated Evaluation")
|
786 |
+
for eval_item in evaluation["evaluations"]:
|
787 |
+
st.markdown(
|
788 |
+
f"#### Question {eval_item['question_number']}"
|
789 |
+
)
|
790 |
+
st.markdown(eval_item["evaluation"])
|
791 |
+
st.markdown("---")
|
792 |
+
st.rerun()
|
793 |
+
else:
|
794 |
+
st.error("Error generating evaluation.")
|
795 |
+
|
796 |
+
except Exception as e:
|
797 |
+
st.error(f"An error occurred while loading the evaluations: {str(e)}")
|
798 |
+
print(f"Error in display_evaluation_to_faculty: {str(e)}")
|