Harshal Vhatkar commited on
Commit
7e44e6c
·
2 Parent(s): 1936b33 abd0aa3

Merge branch 'main' of https://huggingface.co/spaces/SPJIMR-Internship/SPJIMR_FlipClassroom_RCopilot_ResearchInternship

Browse files
Files changed (4) hide show
  1. gen_mcqs.py +282 -0
  2. rubrics.py +8 -8
  3. session_page.py +0 -0
  4. subjective_test_evaluation.py +798 -252
gen_mcqs.py CHANGED
@@ -1,3 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import ast
2
  from pymongo import MongoClient
3
  from datetime import datetime
@@ -23,6 +230,7 @@ model = genai.GenerativeModel('gemini-pro')
23
  client = MongoClient(MONGO_URI)
24
  db = client['novascholar_db']
25
  quizzes_collection = db["quizzes"]
 
26
 
27
  def strip_code_markers(response_text):
28
  """Strip off the markers ``` and python from a LLM model's response"""
@@ -117,6 +325,26 @@ def save_quiz(course_id, session_id, title, questions, user_id):
117
  print(f"Error saving quiz: {e}")
118
  return None
119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  def get_student_quiz_score(quiz_id, student_id):
122
  """Get student's score for a specific quiz"""
@@ -131,6 +359,19 @@ def get_student_quiz_score(quiz_id, student_id):
131
  return quiz['submissions'][0].get('score')
132
  return None
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  # def submit_quiz_answers(quiz_id, student_id, student_answers):
135
  # """Submit and score student's quiz answers"""
136
  # quiz = quizzes_collection.find_one({"_id": quiz_id})
@@ -201,6 +442,47 @@ def submit_quiz_answers(quiz_id, student_id, student_answers):
201
 
202
  return score if result.modified_count > 0 else None
203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  except Exception as e:
205
  print(f"Error submitting quiz: {e}")
206
  return None
 
1
+ # import ast
2
+ # from pymongo import MongoClient
3
+ # from datetime import datetime
4
+ # import openai
5
+ # import google.generativeai as genai
6
+ # from google.generativeai import GenerativeModel
7
+ # from dotenv import load_dotenv
8
+ # import os
9
+ # from file_upload_vectorize import resources_collection, vectors_collection, courses_collection2, faculty_collection
10
+
11
+ # # Load environment variables
12
+ # load_dotenv()
13
+ # MONGO_URI = os.getenv('MONGO_URI')
14
+ # OPENAI_KEY = os.getenv('OPENAI_KEY')
15
+ # GEMINI_KEY = os.getenv('GEMINI_KEY')
16
+
17
+ # # Configure APIs
18
+ # openai.api_key = OPENAI_KEY
19
+ # genai.configure(api_key=GEMINI_KEY)
20
+ # model = genai.GenerativeModel('gemini-pro')
21
+
22
+ # # Connect to MongoDB
23
+ # client = MongoClient(MONGO_URI)
24
+ # db = client['novascholar_db']
25
+ # quizzes_collection = db["quizzes"]
26
+
27
+ # def strip_code_markers(response_text):
28
+ # """Strip off the markers ``` and python from a LLM model's response"""
29
+ # if response_text.startswith("```python"):
30
+ # response_text = response_text[len("```python"):].strip()
31
+ # if response_text.startswith("```"):
32
+ # response_text = response_text[len("```"):].strip()
33
+ # if response_text.endswith("```"):
34
+ # response_text = response_text[:-len("```")].strip()
35
+ # return response_text
36
+
37
+
38
+ # # New function to generate MCQs using Gemini
39
+ # def generate_mcqs(context, num_questions, session_title, session_description):
40
+ # """Generate MCQs either from context or session details"""
41
+ # try:
42
+ # # Initialize Gemini model
43
+ # if context:
44
+ # prompt = f"""
45
+ # Based on the following content, generate {num_questions} multiple choice questions.
46
+ # Format each question as a Python dictionary with the following structure:
47
+ # {{
48
+ # "question": "Question text here",
49
+ # "options": ["A) option1", "B) option2", "C) option3", "D) option4"],
50
+ # "correct_option": "A) option1" or "B) option2" or "C) option3" or "D) option4"
51
+ # }}
52
+
53
+ # Content:
54
+ # {context}
55
+
56
+ # Generate challenging but clear questions that test understanding of key concepts.
57
+ # Return only the Python list of dictionaries.
58
+ # """
59
+ # else:
60
+ # prompt = f"""
61
+ # Generate {num_questions} multiple choice questions about the topic:
62
+ # Title: {session_title}
63
+ # Description: {session_description}
64
+
65
+ # Format each question as a Python dictionary with the following structure:
66
+ # {{
67
+ # "question": "Question text here",
68
+ # "options": ["A) option1", "B) option2", "C) option3", "D) option4"],
69
+ # "correct_option": "A" or "B" or "C" or "D"
70
+ # }}
71
+
72
+ # Generate challenging but clear questions.
73
+ # Return only the Python list of dictionaries without any additional formatting or markers
74
+ # Do not write any other text, do not start the response with (```python), do not end the response with backticks(```)
75
+ # A Sample response should look like this: Response Text: [
76
+ # {
77
+ # "question": "Which of the following is NOT a valid data type in C++?",
78
+ # "options": ["int", "double", "boolean", "char"],
79
+ # "correct_option": "C"
80
+ # }
81
+ # ] (Notice that there are no backticks(```) around the response and no (```python))
82
+ # .
83
+ # """
84
+
85
+ # response = model.generate_content(prompt)
86
+ # response_text = response.text.strip()
87
+ # print("Response Text:", response_text)
88
+ # modified_response_text = strip_code_markers(response_text)
89
+ # print("Response Text Modified to:", modified_response_text)
90
+ # # Extract and parse the response to get the list of MCQs
91
+ # mcqs = ast.literal_eval(modified_response_text) # Be careful with eval, consider using ast.literal_eval for production
92
+ # print(mcqs)
93
+ # if not mcqs:
94
+ # raise ValueError("No questions generated")
95
+ # return mcqs
96
+ # except Exception as e:
97
+ # print(f"Error generating MCQs: , error: {e}")
98
+ # return None
99
+
100
+ # # New function to save quiz to database
101
+ # def save_quiz(course_id, session_id, title, questions, user_id):
102
+ # """Save quiz to database"""
103
+ # try:
104
+ # quiz_data = {
105
+ # "user_id": user_id,
106
+ # "course_id": course_id,
107
+ # "session_id": session_id,
108
+ # "title": title,
109
+ # "questions": questions,
110
+ # "created_at": datetime.utcnow(),
111
+ # "status": "active",
112
+ # "submissions": []
113
+ # }
114
+ # result = quizzes_collection.insert_one(quiz_data)
115
+ # return result.inserted_id
116
+ # except Exception as e:
117
+ # print(f"Error saving quiz: {e}")
118
+ # return None
119
+
120
+
121
+ # def get_student_quiz_score(quiz_id, student_id):
122
+ # """Get student's score for a specific quiz"""
123
+ # quiz = quizzes_collection.find_one(
124
+ # {
125
+ # "_id": quiz_id,
126
+ # "submissions.student_id": student_id
127
+ # },
128
+ # {"submissions.$": 1}
129
+ # )
130
+ # if quiz and quiz.get('submissions'):
131
+ # return quiz['submissions'][0].get('score')
132
+ # return None
133
+
134
+ # # def submit_quiz_answers(quiz_id, student_id, student_answers):
135
+ # # """Submit and score student's quiz answers"""
136
+ # # quiz = quizzes_collection.find_one({"_id": quiz_id})
137
+ # # if not quiz:
138
+ # # return None
139
+
140
+ # # # Calculate score
141
+ # # correct_answers = 0
142
+ # # total_questions = len(quiz['questions'])
143
+
144
+ # # for q_idx, question in enumerate(quiz['questions']):
145
+ # # if student_answers.get(str(q_idx)) == question['correct_option']:
146
+ # # correct_answers += 1
147
+
148
+ # # score = (correct_answers / total_questions) * 100
149
+
150
+ # # # Store submission
151
+ # # submission_data = {
152
+ # # "student_id": student_id,
153
+ # # "answers": student_answers,
154
+ # # "score": score,
155
+ # # "submitted_at": datetime.utcnow()
156
+ # # }
157
+
158
+ # # # Update quiz with submission
159
+ # # quizzes_collection.update_one(
160
+ # # {"_id": quiz_id},
161
+ # # {
162
+ # # "$push": {"submissions": submission_data}
163
+ # # }
164
+ # # )
165
+
166
+ # # return score
167
+ # def submit_quiz_answers(quiz_id, student_id, student_answers):
168
+ # """Submit and score student's quiz answers"""
169
+ # try:
170
+ # quiz = quizzes_collection.find_one({"_id": quiz_id})
171
+ # if not quiz:
172
+ # return None
173
+
174
+ # # Calculate score
175
+ # correct_answers = 0
176
+ # total_questions = len(quiz['questions'])
177
+
178
+ # for q_idx, question in enumerate(quiz['questions']):
179
+ # student_answer = student_answers.get(str(q_idx))
180
+ # if student_answer: # Only check if answer was provided
181
+ # # Extract the option letter (A, B, C, D) from the full answer string
182
+ # answer_letter = student_answer.split(')')[0].strip()
183
+ # if answer_letter == question['correct_option']:
184
+ # correct_answers += 1
185
+
186
+ # score = (correct_answers / total_questions) * 100
187
+
188
+ # # Store submission
189
+ # submission_data = {
190
+ # "student_id": student_id,
191
+ # "answers": student_answers,
192
+ # "score": score,
193
+ # "submitted_at": datetime.utcnow()
194
+ # }
195
+
196
+ # # Update quiz with submission
197
+ # result = quizzes_collection.update_one(
198
+ # {"_id": quiz_id},
199
+ # {"$push": {"submissions": submission_data}}
200
+ # )
201
+
202
+ # return score if result.modified_count > 0 else None
203
+
204
+ # except Exception as e:
205
+ # print(f"Error submitting quiz: {e}")
206
+ # return None
207
+
208
  import ast
209
  from pymongo import MongoClient
210
  from datetime import datetime
 
230
  client = MongoClient(MONGO_URI)
231
  db = client['novascholar_db']
232
  quizzes_collection = db["quizzes"]
233
+ surprise_quizzes_collection = db["surprise_quizzes"]
234
 
235
  def strip_code_markers(response_text):
236
  """Strip off the markers ``` and python from a LLM model's response"""
 
325
  print(f"Error saving quiz: {e}")
326
  return None
327
 
328
+ def save_surprise_quiz(course_id, session_id, title, questions, user_id, no_minutes):
329
+ """Save quiz to database"""
330
+ try:
331
+ quiz_data = {
332
+ "user_id": user_id,
333
+ "course_id": course_id,
334
+ "session_id": session_id,
335
+ "title": title,
336
+ "questions": questions,
337
+ "created_at": datetime.now(),
338
+ "status": "active",
339
+ "submissions": [],
340
+ "no_minutes": no_minutes
341
+ }
342
+ result = surprise_quizzes_collection.insert_one(quiz_data)
343
+ return result.inserted_id
344
+ except Exception as e:
345
+ print(f"Error saving quiz: {e}")
346
+ return None
347
+
348
 
349
  def get_student_quiz_score(quiz_id, student_id):
350
  """Get student's score for a specific quiz"""
 
359
  return quiz['submissions'][0].get('score')
360
  return None
361
 
362
+ def get_student_surprise_quiz_score(quiz_id, student_id):
363
+ """Get student's score for a specific quiz"""
364
+ quiz = surprise_quizzes_collection.find_one(
365
+ {
366
+ "_id": quiz_id,
367
+ "submissions.student_id": student_id
368
+ },
369
+ {"submissions.$": 1}
370
+ )
371
+ if quiz and quiz.get('submissions'):
372
+ return quiz['submissions'][0].get('score')
373
+ return None
374
+
375
  # def submit_quiz_answers(quiz_id, student_id, student_answers):
376
  # """Submit and score student's quiz answers"""
377
  # quiz = quizzes_collection.find_one({"_id": quiz_id})
 
442
 
443
  return score if result.modified_count > 0 else None
444
 
445
+ except Exception as e:
446
+ print(f"Error submitting quiz: {e}")
447
+ return None
448
+
449
+ def submit_surprise_quiz_answers(quiz_id, student_id, student_answers):
450
+ """Submit and score student's quiz answers"""
451
+ try:
452
+ quiz = surprise_quizzes_collection.find_one({"_id": quiz_id})
453
+ if not quiz:
454
+ return None
455
+
456
+ # Calculate score
457
+ correct_answers = 0
458
+ total_questions = len(quiz['questions'])
459
+
460
+ for q_idx, question in enumerate(quiz['questions']):
461
+ student_answer = student_answers.get(str(q_idx))
462
+ if student_answer: # Only check if answer was provided
463
+ # Extract the option letter (A, B, C, D) from the full answer string
464
+ answer_letter = student_answer.split(')')[0].strip()
465
+ if answer_letter == question['correct_option']:
466
+ correct_answers += 1
467
+
468
+ score = (correct_answers / total_questions) * 100
469
+
470
+ # Store submission
471
+ submission_data = {
472
+ "student_id": student_id,
473
+ "answers": student_answers,
474
+ "score": score,
475
+ "submitted_at": datetime.utcnow()
476
+ }
477
+
478
+ # Update quiz with submission
479
+ result = surprise_quizzes_collection.update_one(
480
+ {"_id": quiz_id},
481
+ {"$push": {"submissions": submission_data}}
482
+ )
483
+
484
+ return score if result.modified_count > 0 else None
485
+
486
  except Exception as e:
487
  print(f"Error submitting quiz: {e}")
488
  return None
rubrics.py CHANGED
@@ -98,14 +98,14 @@ def display_rubrics_tab(session, course_id):
98
 
99
  if rubric:
100
  st.json(rubric)
101
- if st.button("Save Rubric"):
102
- rubric_data = {
103
- "course_id": course_id,
104
- "session_id": session['session_id'],
105
- "rubric": json.loads(rubric)
106
- }
107
- rubrics_collection.insert_one(rubric_data)
108
- st.success("Rubric saved successfully!")
109
  else:
110
  st.error("No learning outcomes found for this session")
111
  else:
 
98
 
99
  if rubric:
100
  st.json(rubric)
101
+ # if st.button("Save Rubric"):
102
+ rubric_data = {
103
+ "course_id": course_id,
104
+ "session_id": session['session_id'],
105
+ "rubric": json.loads(rubric)
106
+ }
107
+ rubrics_collection.insert_one(rubric_data)
108
+ st.success("Rubric saved successfully!")
109
  else:
110
  st.error("No learning outcomes found for this session")
111
  else:
session_page.py CHANGED
The diff for this file is too large to render. See raw diff
 
subjective_test_evaluation.py CHANGED
@@ -1,252 +1,798 @@
1
- import openai
2
- from pymongo import MongoClient
3
- from datetime import datetime
4
- import os
5
- from dotenv import load_dotenv
6
- import re
7
- import streamlit as st
8
- from bson import ObjectId
9
-
10
- load_dotenv()
11
- MONGO_URI = os.getenv('MONGO_URI')
12
- OPENAI_API_KEY = os.getenv('OPENAI_KEY')
13
-
14
- client = MongoClient(MONGO_URI)
15
- db = client['novascholar_db']
16
- rubrics_collection = db['rubrics']
17
- resources_collection = db['resources']
18
- subjective_tests_collection = db['subjective_tests']
19
- subjective_test_analysis_collection = db['subjective_test_analysis']
20
-
21
- openai.api_key = OPENAI_API_KEY
22
-
23
- def evaluate_subjective_answers(test_id, student_id, course_id):
24
- """Evaluate subjective test answers using OpenAI."""
25
- try:
26
- # Get test and submission details
27
- test_doc = subjective_tests_collection.find_one({
28
- "_id": ObjectId(test_id),
29
- "course_id": course_id
30
- })
31
- if not test_doc:
32
- return {
33
- "content_analysis": "Error: Test not found",
34
- "analyzed_at": datetime.utcnow(),
35
- "correctness_score": 0
36
- }
37
-
38
- submission = next(
39
- (sub for sub in test_doc.get('submissions', []) if sub['student_id'] == student_id),
40
- None
41
- )
42
-
43
- if not submission:
44
- return {
45
- "content_analysis": "Error: Submission not found",
46
- "analyzed_at": datetime.utcnow(),
47
- "correctness_score": 0
48
- }
49
-
50
- # Rest of the evaluation logic remains the same
51
- questions = test_doc.get('questions', [])
52
- student_answers = submission.get('answers', [])
53
-
54
- if not questions or not student_answers:
55
- return {
56
- "content_analysis": "Error: No questions or answers found",
57
- "analyzed_at": datetime.utcnow(),
58
- "correctness_score": 0
59
- }
60
-
61
- # Retrieve rubrics for the session
62
- rubric_doc = rubrics_collection.find_one({
63
- "session_id": test_doc['session_id'],
64
- "course_id": course_id
65
- })
66
-
67
- if not rubric_doc:
68
- return {
69
- "content_analysis": "Error: Rubric not found",
70
- "analyzed_at": datetime.utcnow(),
71
- "correctness_score": 0
72
- }
73
-
74
- rubric = rubric_doc.get('rubric', {})
75
-
76
- # Retrieve pre-class materials
77
- pre_class_materials = resources_collection.find({
78
- "session_id": test_doc['session_id'],
79
- "course_id": course_id
80
- })
81
- pre_class_content = "\n".join([material.get('text_content', '') for material in pre_class_materials])
82
-
83
- # Analyze each question
84
- all_analyses = []
85
- total_score = 0
86
-
87
- for i, (question, answer) in enumerate(zip(questions, student_answers), 1):
88
- analysis_content = f"Question {i}: {question['question']}\nAnswer: {answer}\n\nRubric: {rubric}\n\nPre-class Materials: {pre_class_content}\n\n"
89
-
90
- prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
91
-
92
- 1. Evaluation Process:
93
- - Use each rubric criterion (scored 1-4) for internal assessment
94
- - Compare response with pre-class materials
95
- - Check alignment with all rubric requirements
96
- - Calculate final score: sum of criteria scores converted to 10-point scale
97
-
98
- Pre-class Materials:
99
- {pre_class_content}
100
-
101
- Rubric Criteria:
102
- {rubric}
103
-
104
- Question and Answer:
105
- {analysis_content}
106
-
107
- Provide your assessment in the following format:
108
-
109
- **Score and Evidence**
110
- - Score: [X]/10
111
- - Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
112
- **Key Areas for Improvement**
113
- - [Concise improvement point 1]
114
- - [Concise improvement point 2]
115
- - [Concise improvement point 3]
116
- """
117
-
118
- response = openai.Completion.create(
119
- model="text-davinci-003",
120
- prompt=prompt_template,
121
- max_tokens=500,
122
- temperature=0.7
123
- )
124
-
125
- individual_analysis = response.choices[0].text.strip()
126
-
127
- try:
128
- score_match = re.search(r'Score: (\d+)', individual_analysis)
129
- question_score = int(score_match.group(1)) if score_match else 0
130
- total_score += question_score
131
- except:
132
- question_score = 0
133
-
134
- formatted_analysis = f"\n\n## Question {i} Analysis\n\n{individual_analysis}"
135
- all_analyses.append(formatted_analysis)
136
-
137
- average_score = round(total_score / len(questions)) if questions else 0
138
- combined_analysis = "\n".join(all_analyses)
139
-
140
- return {
141
- "content_analysis": combined_analysis,
142
- "analyzed_at": datetime.utcnow(),
143
- "correctness_score": average_score
144
- }
145
-
146
- except Exception as e:
147
- return {
148
- "content_analysis": f"Error evaluating answers: {str(e)}",
149
- "analyzed_at": datetime.utcnow(),
150
- "correctness_score": 0
151
- }
152
-
153
- def display_evaluation_to_faculty(session_id, student_id, course_id):
154
- """Display submitted tests with improved error handling and debugging"""
155
- st.subheader("Evaluate Subjective Tests")
156
-
157
- try:
158
- # Convert all IDs to strings for consistent comparison
159
- session_id = str(session_id)
160
- student_id = str(student_id)
161
- course_id = str(course_id)
162
-
163
- print(f"Searching for tests with session_id: {session_id}, student_id: {student_id}, course_id: {course_id}")
164
-
165
- # Query for tests
166
- query = {
167
- "session_id": session_id,
168
- "course_id": course_id,
169
- "submissions": {
170
- "$elemMatch": {
171
- "student_id": student_id
172
- }
173
- }
174
- }
175
-
176
- # Log the query for debugging
177
- print(f"MongoDB Query: {query}")
178
-
179
- # Fetch tests
180
- tests = list(subjective_tests_collection.find(query))
181
- print(f"Found {len(tests)} tests matching query")
182
-
183
- if not tests:
184
- # Check if any tests exist for this session
185
- all_session_tests = list(subjective_tests_collection.find({
186
- "session_id": session_id,
187
- "course_id": course_id
188
- }))
189
-
190
- if all_session_tests:
191
- print(f"Found {len(all_session_tests)} tests for this session, but no submissions from student {student_id}")
192
- st.warning("No submitted tests found for this student, but tests exist for this session.")
193
- else:
194
- print("No tests found for this session at all")
195
- st.info("No tests have been created for this session yet.")
196
- return
197
-
198
- # Display tests and handle evaluation
199
- for test in tests:
200
- with st.expander(f"Test: {test.get('title', 'Untitled Test')}", expanded=True):
201
- # Find student submission
202
- submission = next(
203
- (sub for sub in test.get('submissions', [])
204
- if sub['student_id'] == student_id),
205
- None
206
- )
207
-
208
- if submission:
209
- st.write("### Student's Answers")
210
- for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
211
- st.markdown(f"**Q{i+1}:** {question['question']}")
212
- st.markdown(f"**A{i+1}:** {answer}")
213
- st.markdown("---")
214
-
215
- # Generate/display analysis
216
- if st.button(f"Generate Analysis for {test.get('title')}"):
217
- with st.spinner("Analyzing responses..."):
218
- analysis = evaluate_subjective_answers(
219
- str(test['_id']),
220
- student_id,
221
- course_id
222
- )
223
-
224
- if analysis:
225
- st.markdown("### Analysis")
226
- st.markdown(analysis['content_analysis'])
227
- st.metric("Score", f"{analysis['correctness_score']}/10")
228
- else:
229
- st.error("Submission data not found for this student")
230
-
231
- except Exception as e:
232
- st.error("An error occurred while loading the tests")
233
- with st.expander("Error Details"):
234
- st.write(f"Error: {str(e)}")
235
- st.write(f"Session ID: {session_id}")
236
- st.write(f"Student ID: {student_id}")
237
- st.write(f"Course ID: {course_id}")
238
-
239
- def check_test_submission(session_id, student_id, course_id):
240
- """Utility function to check test submission status"""
241
- try:
242
- query = {
243
- "session_id": str(session_id),
244
- "course_id": str(course_id),
245
- "submissions.student_id": str(student_id)
246
- }
247
-
248
- test = subjective_tests_collection.find_one(query)
249
- return bool(test)
250
- except Exception as e:
251
- print(f"Error checking submission: {e}")
252
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import streamlit as st
2
+ # from datetime import datetime
3
+ # from pymongo import MongoClient
4
+ # import os
5
+ # from openai import OpenAI
6
+ # from dotenv import load_dotenv
7
+ # from bson import ObjectId
8
+
9
+ # load_dotenv()
10
+
11
+ # # MongoDB setup
12
+ # MONGO_URI = os.getenv('MONGO_URI')
13
+ # client = MongoClient(MONGO_URI)
14
+ # db = client["novascholar_db"]
15
+ # subjective_tests_collection = db["subjective_tests"]
16
+ # subjective_test_evaluation_collection = db["subjective_test_evaluation"]
17
+ # resources_collection = db["resources"]
18
+ # students_collection = db["students"]
19
+
20
+ # def evaluate_subjective_answers(session_id, student_id, test_id):
21
+ # """
22
+ # Generate evaluation and analysis for subjective test answers
23
+ # """
24
+ # try:
25
+ # # Fetch test and student submission
26
+ # test = subjective_tests_collection.find_one({"_id": test_id})
27
+ # if not test:
28
+ # return None
29
+
30
+ # # Find student's submission
31
+ # submission = next(
32
+ # (sub for sub in test.get('submissions', [])
33
+ # if sub['student_id'] == str(student_id)),
34
+ # None
35
+ # )
36
+ # if not submission:
37
+ # return None
38
+
39
+ # # Fetch pre-class materials
40
+ # pre_class_materials = resources_collection.find({"session_id": session_id})
41
+ # pre_class_content = ""
42
+ # for material in pre_class_materials:
43
+ # if 'text_content' in material:
44
+ # pre_class_content += material['text_content'] + "\n"
45
+
46
+ # # Default rubric (can be customized later)
47
+ # default_rubric = """
48
+ # 1. Content Understanding (1-4):
49
+ # - Demonstrates comprehensive understanding of core concepts
50
+ # - Accurately applies relevant theories and principles
51
+ # - Provides specific examples and evidence
52
+
53
+ # 2. Critical Analysis (1-4):
54
+ # - Shows depth of analysis
55
+ # - Makes meaningful connections
56
+ # - Demonstrates original thinking
57
+
58
+ # 3. Organization & Clarity (1-4):
59
+ # - Clear structure and flow
60
+ # - Well-developed arguments
61
+ # - Effective use of examples
62
+ # """
63
+
64
+ # # Initialize OpenAI client
65
+ # client = OpenAI(api_key=os.getenv('OPENAI_KEY'))
66
+
67
+ # evaluations = []
68
+ # for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
69
+ # analysis_content = f"""
70
+ # Question: {question['question']}
71
+ # Student Answer: {answer}
72
+ # """
73
+
74
+ # prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
75
+
76
+ # 1. Evaluation Process:
77
+ # - Use each rubric criterion (scored 1-4) for internal assessment
78
+ # - Compare response with pre-class materials
79
+ # - Check alignment with all rubric requirements
80
+ # - Calculate final score: sum of criteria scores converted to 10-point scale
81
+
82
+ # Pre-class Materials:
83
+ # {pre_class_content[:1000]} # Truncate to avoid token limits
84
+
85
+ # Rubric Criteria:
86
+ # {default_rubric}
87
+
88
+ # Question and Answer:
89
+ # {analysis_content}
90
+
91
+ # Provide your assessment in the following format:
92
+
93
+ # **Score and Evidence**
94
+ # - Score: [X]/10
95
+ # - Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
96
+
97
+ # **Key Areas for Improvement**
98
+ # - [Concise improvement point 1]
99
+ # - [Concise improvement point 2]
100
+ # - [Concise improvement point 3]
101
+ # """
102
+
103
+ # # Generate evaluation using OpenAI
104
+ # response = client.chat.completions.create(
105
+ # model="gpt-4o-mini",
106
+ # messages=[{"role": "user", "content": prompt_template}],
107
+ # max_tokens=500,
108
+ # temperature=0.4
109
+ # )
110
+
111
+ # evaluations.append({
112
+ # "question_number": i + 1,
113
+ # "question": question['question'],
114
+ # "answer": answer,
115
+ # "evaluation": response.choices[0].message.content
116
+ # })
117
+
118
+ # # Store evaluation in MongoDB
119
+ # evaluation_doc = {
120
+ # "test_id": test_id,
121
+ # "student_id": student_id,
122
+ # "session_id": session_id,
123
+ # "evaluations": evaluations,
124
+ # "evaluated_at": datetime.utcnow()
125
+ # }
126
+
127
+ # subjective_test_evaluation_collection.insert_one(evaluation_doc)
128
+ # return evaluation_doc
129
+
130
+ # except Exception as e:
131
+ # print(f"Error in evaluate_subjective_answers: {str(e)}")
132
+ # return None
133
+
134
+ # def display_evaluation_to_faculty(session_id, student_id, course_id):
135
+ # """
136
+ # Display interface for faculty to generate and view evaluations
137
+ # """
138
+ # st.header("Evaluate Subjective Tests")
139
+
140
+ # try:
141
+ # # Fetch available tests
142
+ # tests = list(subjective_tests_collection.find({
143
+ # "session_id": str(session_id),
144
+ # "status": "active"
145
+ # }))
146
+
147
+ # if not tests:
148
+ # st.info("No subjective tests found for this session.")
149
+ # return
150
+
151
+ # # Select test
152
+ # test_options = {
153
+ # f"{test['title']} (Created: {test['created_at'].strftime('%Y-%m-%d %H:%M')})" if 'created_at' in test else test['title']: test['_id']
154
+ # for test in tests
155
+ # }
156
+
157
+ # if test_options:
158
+ # selected_test = st.selectbox(
159
+ # "Select Test to Evaluate",
160
+ # options=list(test_options.keys())
161
+ # )
162
+
163
+ # if selected_test:
164
+ # test_id = test_options[selected_test]
165
+ # test = subjective_tests_collection.find_one({"_id": test_id})
166
+
167
+ # if test:
168
+ # submissions = test.get('submissions', [])
169
+ # if not submissions:
170
+ # st.warning("No submissions found for this test.")
171
+ # return
172
+
173
+ # # Create a dropdown for student submissions
174
+ # student_options = {
175
+ # f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub['student_id']
176
+ # for sub in submissions
177
+ # }
178
+
179
+ # selected_student = st.selectbox(
180
+ # "Select Student Submission",
181
+ # options=list(student_options.keys())
182
+ # )
183
+
184
+ # if selected_student:
185
+ # student_id = student_options[selected_student]
186
+ # submission = next(sub for sub in submissions if sub['student_id'] == student_id)
187
+
188
+ # st.markdown(f"**Submission Date:** {submission.get('submitted_at', 'No submission date')}")
189
+ # st.markdown("---")
190
+
191
+ # # Display questions and answers
192
+ # st.subheader("Submission Details")
193
+ # for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
194
+ # st.markdown(f"**Question {i+1}:** {question['question']}")
195
+ # st.markdown(f"**Answer:** {answer}")
196
+ # st.markdown("---")
197
+
198
+ # # Check for existing evaluation
199
+ # existing_eval = subjective_test_evaluation_collection.find_one({
200
+ # "test_id": test_id,
201
+ # "student_id": student_id,
202
+ # "session_id": str(session_id)
203
+ # })
204
+
205
+ # if existing_eval:
206
+ # st.subheader("Evaluation Results")
207
+ # for eval_item in existing_eval['evaluations']:
208
+ # st.markdown(f"### Evaluation for Question {eval_item['question_number']}")
209
+ # st.markdown(eval_item['evaluation'])
210
+ # st.markdown("---")
211
+
212
+ # st.success(" Evaluation completed")
213
+ # if st.button("Regenerate Evaluation", key=f"regenerate_{student_id}_{test_id}"):
214
+ # with st.spinner("Regenerating evaluation..."):
215
+ # evaluation = evaluate_subjective_answers(
216
+ # str(session_id),
217
+ # student_id,
218
+ # test_id
219
+ # )
220
+ # if evaluation:
221
+ # st.success("Evaluation regenerated successfully!")
222
+ # st.rerun()
223
+ # else:
224
+ # st.error("Error regenerating evaluation.")
225
+ # else:
226
+ # st.subheader("Generate Evaluation")
227
+ # if st.button("Generate Evaluation", key=f"evaluate_{student_id}_{test_id}"):
228
+ # with st.spinner("Generating evaluation..."):
229
+ # evaluation = evaluate_subjective_answers(
230
+ # str(session_id),
231
+ # student_id,
232
+ # test_id
233
+ # )
234
+ # if evaluation:
235
+ # st.success("Evaluation generated successfully!")
236
+ # st.markdown("### Generated Evaluation")
237
+ # for eval_item in evaluation['evaluations']:
238
+ # st.markdown(f"#### Question {eval_item['question_number']}")
239
+ # st.markdown(eval_item['evaluation'])
240
+ # st.markdown("---")
241
+ # st.rerun()
242
+ # else:
243
+ # st.error("Error generating evaluation.")
244
+
245
+ # except Exception as e:
246
+ # st.error(f"An error occurred while loading the evaluations: {str(e)}")
247
+ # print(f"Error in display_evaluation_to_faculty: {str(e)}")
248
+
249
+ import streamlit as st
250
+ from datetime import datetime
251
+ from pymongo import MongoClient
252
+ import os
253
+ from openai import OpenAI
254
+ from dotenv import load_dotenv
255
+ from bson import ObjectId
256
+
257
+ load_dotenv()
258
+
259
+ # MongoDB setup
260
+ MONGO_URI = os.getenv("MONGO_URI")
261
+ client = MongoClient(MONGO_URI)
262
+ db = client["novascholar_db"]
263
+ subjective_tests_collection = db["subjective_tests"]
264
+ subjective_test_evaluation_collection = db["subjective_test_evaluation"]
265
+ pre_subjective_tests_collection = db["pre_subjective_tests"]
266
+ resources_collection = db["resources"]
267
+ students_collection = db["students"]
268
+ pre_subjective_test_evaluation_collection = db["pre_subjective_test_evaluation"]
269
+
270
+
271
+ def evaluate_subjective_answers(session_id, student_id, test_id):
272
+ """
273
+ Generate evaluation and analysis for subjective test answers
274
+ """
275
+ try:
276
+ # Fetch test and student submission
277
+ test = subjective_tests_collection.find_one({"_id": test_id})
278
+ if not test:
279
+ return None
280
+
281
+ # Find student's submission
282
+ submission = next(
283
+ (
284
+ sub
285
+ for sub in test.get("submissions", [])
286
+ if sub["student_id"] == str(student_id)
287
+ ),
288
+ None,
289
+ )
290
+ if not submission:
291
+ return None
292
+
293
+ # Fetch pre-class materials
294
+ pre_class_materials = resources_collection.find({"session_id": session_id})
295
+ pre_class_content = ""
296
+ for material in pre_class_materials:
297
+ if "text_content" in material:
298
+ pre_class_content += material["text_content"] + "\n"
299
+
300
+ # Default rubric (can be customized later)
301
+ default_rubric = """
302
+ 1. Content Understanding (1-4):
303
+ - Demonstrates comprehensive understanding of core concepts
304
+ - Accurately applies relevant theories and principles
305
+ - Provides specific examples and evidence
306
+
307
+ 2. Critical Analysis (1-4):
308
+ - Shows depth of analysis
309
+ - Makes meaningful connections
310
+ - Demonstrates original thinking
311
+
312
+ 3. Organization & Clarity (1-4):
313
+ - Clear structure and flow
314
+ - Well-developed arguments
315
+ - Effective use of examples
316
+ """
317
+
318
+ # Initialize OpenAI client
319
+ client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
320
+
321
+ evaluations = []
322
+ for i, (question, answer) in enumerate(
323
+ zip(test["questions"], submission["answers"])
324
+ ):
325
+ analysis_content = f"""
326
+ Question: {question['question']}
327
+ Student Answer: {answer}
328
+ """
329
+
330
+ prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
331
+
332
+ 1. Evaluation Process:
333
+ - Use each rubric criterion (scored 1-4) for internal assessment
334
+ - Compare response with pre-class materials
335
+ - Check alignment with all rubric requirements
336
+ - Calculate final score: sum of criteria scores converted to 10-point scale
337
+
338
+ Pre-class Materials:
339
+ {pre_class_content[:1000]} # Truncate to avoid token limits
340
+
341
+ Rubric Criteria:
342
+ {default_rubric}
343
+
344
+ Question and Answer:
345
+ {analysis_content}
346
+
347
+ Provide your assessment in the following format:
348
+
349
+ **Score and Evidence**
350
+ - Score: [X]/10
351
+ - Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
352
+
353
+ **Key Areas for Improvement**
354
+ - [Concise improvement point 1]
355
+ - [Concise improvement point 2]
356
+ - [Concise improvement point 3]
357
+ """
358
+
359
+ # Generate evaluation using OpenAI
360
+ response = client.chat.completions.create(
361
+ model="gpt-4o-mini",
362
+ messages=[{"role": "user", "content": prompt_template}],
363
+ max_tokens=500,
364
+ temperature=0.4,
365
+ )
366
+
367
+ evaluations.append(
368
+ {
369
+ "question_number": i + 1,
370
+ "question": question["question"],
371
+ "answer": answer,
372
+ "evaluation": response.choices[0].message.content,
373
+ }
374
+ )
375
+
376
+ # Store evaluation in MongoDB
377
+ evaluation_doc = {
378
+ "test_id": test_id,
379
+ "student_id": student_id,
380
+ "session_id": session_id,
381
+ "evaluations": evaluations,
382
+ "evaluated_at": datetime.utcnow(),
383
+ }
384
+
385
+ subjective_test_evaluation_collection.insert_one(evaluation_doc)
386
+ return evaluation_doc
387
+
388
+ except Exception as e:
389
+ print(f"Error in evaluate_subjective_answers: {str(e)}")
390
+ return None
391
+
392
+
393
+ def pre_evaluate_subjective_answers(session_id, student_id, test_id):
394
+ """
395
+ Generate evaluation and analysis for subjective test answers
396
+ """
397
+ try:
398
+ # Fetch test and student submission
399
+ test = pre_subjective_tests_collection.find_one({"_id": test_id})
400
+ if not test:
401
+ return None
402
+
403
+ # Find student's submission
404
+ submission = next(
405
+ (
406
+ sub
407
+ for sub in test.get("submissions", [])
408
+ if sub["student_id"] == str(student_id)
409
+ ),
410
+ None,
411
+ )
412
+ if not submission:
413
+ return None
414
+
415
+ # Fetch pre-class materials
416
+ pre_class_materials = resources_collection.find({"session_id": session_id})
417
+ pre_class_content = ""
418
+ for material in pre_class_materials:
419
+ if "text_content" in material:
420
+ pre_class_content += material["text_content"] + "\n"
421
+
422
+ # Default rubric (can be customized later)
423
+ default_rubric = """
424
+ 1. Content Understanding (1-4):
425
+ - Demonstrates comprehensive understanding of core concepts
426
+ - Accurately applies relevant theories and principles
427
+ - Provides specific examples and evidence
428
+
429
+ 2. Critical Analysis (1-4):
430
+ - Shows depth of analysis
431
+ - Makes meaningful connections
432
+ - Demonstrates original thinking
433
+
434
+ 3. Organization & Clarity (1-4):
435
+ - Clear structure and flow
436
+ - Well-developed arguments
437
+ - Effective use of examples
438
+ """
439
+
440
+ # Initialize OpenAI client
441
+ client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
442
+
443
+ evaluations = []
444
+ for i, (question, answer) in enumerate(
445
+ zip(test["questions"], submission["answers"])
446
+ ):
447
+ analysis_content = f"""
448
+ Question: {question['question']}
449
+ Student Answer: {answer}
450
+ """
451
+
452
+ prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
453
+
454
+ 1. Evaluation Process:
455
+ - Use each rubric criterion (scored 1-4) for internal assessment
456
+ - Compare response with pre-class materials
457
+ - Check alignment with all rubric requirements
458
+ - Calculate final score: sum of criteria scores converted to 10-point scale
459
+
460
+ Pre-class Materials:
461
+ {pre_class_content[:1000]} # Truncate to avoid token limits
462
+
463
+ Rubric Criteria:
464
+ {default_rubric}
465
+
466
+ Question and Answer:
467
+ {analysis_content}
468
+
469
+ Provide your assessment in the following format:
470
+
471
+ **Score and Evidence**
472
+ - Score: [X]/10
473
+ - Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
474
+
475
+ **Key Areas for Improvement**
476
+ - [Concise improvement point 1]
477
+ - [Concise improvement point 2]
478
+ - [Concise improvement point 3]
479
+ """
480
+
481
+ # Generate evaluation using OpenAI
482
+ response = client.chat.completions.create(
483
+ model="gpt-4o-mini",
484
+ messages=[{"role": "user", "content": prompt_template}],
485
+ max_tokens=500,
486
+ temperature=0.4,
487
+ )
488
+
489
+ evaluations.append(
490
+ {
491
+ "question_number": i + 1,
492
+ "question": question["question"],
493
+ "answer": answer,
494
+ "evaluation": response.choices[0].message.content,
495
+ }
496
+ )
497
+
498
+ # Store evaluation in MongoDB
499
+ evaluation_doc = {
500
+ "test_id": test_id,
501
+ "student_id": student_id,
502
+ "session_id": session_id,
503
+ "evaluations": evaluations,
504
+ "evaluated_at": datetime.utcnow(),
505
+ }
506
+
507
+ pre_subjective_test_evaluation_collection.insert_one(evaluation_doc)
508
+ return evaluation_doc
509
+
510
+ except Exception as e:
511
+ print(f"Error in evaluate_subjective_answers: {str(e)}")
512
+ return None
513
+
514
+
515
+ def display_evaluation_to_faculty(session_id, student_id, course_id):
516
+ """
517
+ Display interface for faculty to generate and view evaluations
518
+ """
519
+ st.header("Evaluate Subjective Tests")
520
+
521
+ try:
522
+ # Fetch available tests
523
+ print("session_id", session_id, "student_id", student_id, "course_id", course_id)
524
+ tests = list(
525
+ subjective_tests_collection.find(
526
+ {"session_id": str(session_id), "status": "active"}
527
+ )
528
+ )
529
+
530
+ print("tests" ,tests)
531
+ if not tests:
532
+ st.info("No subjective tests found for this session.")
533
+ return
534
+
535
+ # Select test
536
+ test_options = {
537
+ (
538
+ f"{test['title']} (Created: {test['created_at'].strftime('%Y-%m-%d %H:%M')})"
539
+ if "created_at" in test
540
+ else test["title"]
541
+ ): test["_id"]
542
+ for test in tests
543
+ }
544
+
545
+ if test_options:
546
+ selected_test = st.selectbox(
547
+ "Select Test to Evaluate", options=list(test_options.keys())
548
+ )
549
+
550
+ if selected_test:
551
+ test_id = test_options[selected_test]
552
+ test = subjective_tests_collection.find_one({"_id": test_id})
553
+
554
+ if test:
555
+ submissions = test.get("submissions", [])
556
+ if not submissions:
557
+ st.warning("No submissions found for this test.")
558
+ return
559
+
560
+ # Create a dropdown for student submissions
561
+ student_options = {
562
+ f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub[
563
+ "student_id"
564
+ ]
565
+ for sub in submissions
566
+ }
567
+
568
+ selected_student = st.selectbox(
569
+ "Select Student Submission",
570
+ options=list(student_options.keys()),
571
+ )
572
+
573
+ if selected_student:
574
+ student_id = student_options[selected_student]
575
+ submission = next(
576
+ sub
577
+ for sub in submissions
578
+ if sub["student_id"] == student_id
579
+ )
580
+
581
+ st.markdown(
582
+ f"**Submission Date:** {submission.get('submitted_at', 'No submission date')}"
583
+ )
584
+ st.markdown("---")
585
+
586
+ # Display questions and answers
587
+ st.subheader("Submission Details")
588
+ for i, (question, answer) in enumerate(
589
+ zip(test["questions"], submission["answers"])
590
+ ):
591
+ st.markdown(f"**Question {i+1}:** {question['question']}")
592
+ st.markdown(f"**Answer:** {answer}")
593
+ st.markdown("---")
594
+
595
+ # Check for existing evaluation
596
+ existing_eval = subjective_test_evaluation_collection.find_one(
597
+ {
598
+ "test_id": test_id,
599
+ "student_id": student_id,
600
+ "session_id": str(session_id),
601
+ }
602
+ )
603
+
604
+ if existing_eval:
605
+ st.subheader("Evaluation Results")
606
+ for eval_item in existing_eval["evaluations"]:
607
+ st.markdown(
608
+ f"### Evaluation for Question {eval_item['question_number']}"
609
+ )
610
+ st.markdown(eval_item["evaluation"])
611
+ st.markdown("---")
612
+
613
+ st.success("✓ Evaluation completed")
614
+ if st.button(
615
+ "Regenerate Evaluation",
616
+ key=f"regenerate_{student_id}_{test_id}",
617
+ ):
618
+ with st.spinner("Regenerating evaluation..."):
619
+ evaluation = evaluate_subjective_answers(
620
+ str(session_id), student_id, test_id
621
+ )
622
+ if evaluation:
623
+ st.success(
624
+ "Evaluation regenerated successfully!"
625
+ )
626
+ st.rerun()
627
+ else:
628
+ st.error("Error regenerating evaluation.")
629
+ else:
630
+ st.subheader("Generate Evaluation")
631
+ if st.button(
632
+ "Generate Evaluation",
633
+ key=f"evaluate_{student_id}_{test_id}",
634
+ ):
635
+ with st.spinner("Generating evaluation..."):
636
+ evaluation = evaluate_subjective_answers(
637
+ str(session_id), student_id, test_id
638
+ )
639
+ if evaluation:
640
+ st.success("Evaluation generated successfully!")
641
+ st.markdown("### Generated Evaluation")
642
+ for eval_item in evaluation["evaluations"]:
643
+ st.markdown(
644
+ f"#### Question {eval_item['question_number']}"
645
+ )
646
+ st.markdown(eval_item["evaluation"])
647
+ st.markdown("---")
648
+ st.rerun()
649
+ else:
650
+ st.error("Error generating evaluation.")
651
+
652
+ except Exception as e:
653
+ st.error(f"An error occurred while loading the evaluations: {str(e)}")
654
+ print(f"Error in display_evaluation_to_faculty: {str(e)}")
655
+ return None
656
+
657
+
658
+ def pre_display_evaluation_to_faculty(session_id, student_id, course_id):
659
+ """
660
+ Display interface for faculty to generate and view evaluations
661
+ """
662
+ st.header("Evaluate Pre Subjective Tests")
663
+
664
+ try:
665
+ # Fetch available tests
666
+ tests = list(
667
+ pre_subjective_tests_collection.find(
668
+ {"session_id": str(session_id), "status": "active"}
669
+ )
670
+ )
671
+
672
+ if not tests:
673
+ st.info("No subjective tests found for this session.")
674
+ return
675
+
676
+ # Select test
677
+ test_options = {
678
+ (
679
+ f"{test['title']} (Created: {test['created_at'].strftime('%Y-%m-%d %H:%M')})"
680
+ if "created_at" in test
681
+ else test["title"]
682
+ ): test["_id"]
683
+ for test in tests
684
+ }
685
+
686
+ if test_options:
687
+ selected_test = st.selectbox(
688
+ "Select Test to Evaluate", options=list(test_options.keys())
689
+ )
690
+
691
+ if selected_test:
692
+ test_id = test_options[selected_test]
693
+ test = pre_subjective_tests_collection.find_one({"_id": test_id})
694
+
695
+ if test:
696
+ submissions = test.get("submissions", [])
697
+ if not submissions:
698
+ st.warning("No submissions found for this test.")
699
+ return
700
+
701
+ # Create a dropdown for student submissions
702
+ student_options = {
703
+ f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub[
704
+ "student_id"
705
+ ]
706
+ for sub in submissions
707
+ }
708
+
709
+ selected_student = st.selectbox(
710
+ "Select Student Submission",
711
+ options=list(student_options.keys()),
712
+ )
713
+
714
+ if selected_student:
715
+ student_id = student_options[selected_student]
716
+ submission = next(
717
+ sub
718
+ for sub in submissions
719
+ if sub["student_id"] == student_id
720
+ )
721
+
722
+ st.markdown(
723
+ f"**Submission Date:** {submission.get('submitted_at', 'No submission date')}"
724
+ )
725
+ st.markdown("---")
726
+
727
+ # Display questions and answers
728
+ st.subheader("Submission Details")
729
+ for i, (question, answer) in enumerate(
730
+ zip(test["questions"], submission["answers"])
731
+ ):
732
+ st.markdown(f"**Question {i+1}:** {question['question']}")
733
+ st.markdown(f"**Answer:** {answer}")
734
+ st.markdown("---")
735
+
736
+ # Check for existing evaluation
737
+ existing_eval = (
738
+ pre_subjective_test_evaluation_collection.find_one(
739
+ {
740
+ "test_id": test_id,
741
+ "student_id": student_id,
742
+ "session_id": str(session_id),
743
+ }
744
+ )
745
+ )
746
+
747
+ if existing_eval:
748
+ st.subheader("Evaluation Results")
749
+ for eval_item in existing_eval["evaluations"]:
750
+ st.markdown(
751
+ f"### Evaluation for Question {eval_item['question_number']}"
752
+ )
753
+ st.markdown(eval_item["evaluation"])
754
+ st.markdown("---")
755
+
756
+ st.success("✓ Evaluation completed")
757
+ if st.button(
758
+ "Regenerate Evaluation",
759
+ key=f"regenerate_{student_id}_{test_id}",
760
+ ):
761
+ with st.spinner("Regenerating evaluation..."):
762
+ evaluation = pre_evaluate_subjective_answers(
763
+ str(session_id), student_id, test_id
764
+ )
765
+ if evaluation:
766
+ st.success(
767
+ "Evaluation regenerated successfully!"
768
+ )
769
+ st.rerun()
770
+ else:
771
+ st.error("Error regenerating evaluation.")
772
+ else:
773
+ st.subheader("Generate Evaluation")
774
+ if st.button(
775
+ "Generate Evaluation",
776
+ key=f"pre_evaluate_{student_id}_{test_id}",
777
+ ):
778
+ with st.spinner("Generating evaluation..."):
779
+ print("session_id", session_id, "student_id", student_id, "test_id", test_id)
780
+ evaluation = pre_evaluate_subjective_answers(
781
+ str(session_id), student_id, test_id
782
+ )
783
+ if evaluation:
784
+ st.success("Evaluation generated successfully!")
785
+ st.markdown("### Generated Evaluation")
786
+ for eval_item in evaluation["evaluations"]:
787
+ st.markdown(
788
+ f"#### Question {eval_item['question_number']}"
789
+ )
790
+ st.markdown(eval_item["evaluation"])
791
+ st.markdown("---")
792
+ st.rerun()
793
+ else:
794
+ st.error("Error generating evaluation.")
795
+
796
+ except Exception as e:
797
+ st.error(f"An error occurred while loading the evaluations: {str(e)}")
798
+ print(f"Error in display_evaluation_to_faculty: {str(e)}")