omkar-surve126 commited on
Commit
f51433c
·
1 Parent(s): 4857aa9
Files changed (3) hide show
  1. rubrics.py +8 -8
  2. session_page.py +274 -521
  3. subjective_test_evaluation.py +208 -213
rubrics.py CHANGED
@@ -98,14 +98,14 @@ def display_rubrics_tab(session, course_id):
98
 
99
  if rubric:
100
  st.json(rubric)
101
- if st.button("Save Rubric"):
102
- rubric_data = {
103
- "course_id": course_id,
104
- "session_id": session['session_id'],
105
- "rubric": json.loads(rubric)
106
- }
107
- rubrics_collection.insert_one(rubric_data)
108
- st.success("Rubric saved successfully!")
109
  else:
110
  st.error("No learning outcomes found for this session")
111
  else:
 
98
 
99
  if rubric:
100
  st.json(rubric)
101
+ # if st.button("Save Rubric"):
102
+ rubric_data = {
103
+ "course_id": course_id,
104
+ "session_id": session['session_id'],
105
+ "rubric": json.loads(rubric)
106
+ }
107
+ rubrics_collection.insert_one(rubric_data)
108
+ st.success("Rubric saved successfully!")
109
  else:
110
  st.error("No learning outcomes found for this session")
111
  else:
session_page.py CHANGED
@@ -35,6 +35,9 @@ import streamlit.components.v1 as components
35
  from live_chat_feature import display_live_chat_interface
36
  from code_playground import display_code_playground
37
  from urllib.parse import urlparse, parse_qs
 
 
 
38
 
39
  # Load environment variables
40
  load_dotenv()
@@ -49,7 +52,7 @@ subjective_test_evaluation_collection = db["subjective_test_evaluation"]
49
  assignment_evaluation_collection = db["assignment_evaluation"]
50
  subjective_tests_collection = db["subjective_tests"]
51
  synoptic_store_collection = db["synoptic_store"]
52
-
53
 
54
  # for implementing Context Caching:
55
  # PROJECT_ID = "novascholar-446709"
@@ -1188,8 +1191,7 @@ def display_post_class_content(session, student_id, course_id):
1188
  course_id,
1189
  session['session_id'],
1190
  test_title,
1191
- questions,
1192
- synoptic
1193
  )
1194
  if test_id:
1195
  st.success("Subjective test saved successfully!")
@@ -1216,7 +1218,6 @@ def display_post_class_content(session, student_id, course_id):
1216
  session['session_id'],
1217
  st.session_state.test_title,
1218
  st.session_state.generated_questions,
1219
- st.session_state.generated_synoptic
1220
  )
1221
  if test_id:
1222
  st.success("Subjective test saved successfully!")
@@ -1278,267 +1279,136 @@ def display_post_class_content(session, student_id, course_id):
1278
  else:
1279
  st.error("Error saving quiz.")
1280
 
1281
- st.subheader("Add Assignments")
1282
- # Add assignment form
1283
  with st.form("add_assignment_form"):
1284
  title = st.text_input("Assignment Title")
 
1285
  due_date = st.date_input("Due Date")
1286
  submit = st.form_submit_button("Add Assignment")
1287
 
1288
  if submit:
 
 
 
 
1289
  due_date = datetime.combine(due_date, datetime.min.time())
1290
- # Save the assignment to the database
1291
  assignment = {
1292
- "id": ObjectId(),
1293
  "title": title,
 
1294
  "due_date": due_date,
1295
- "status": "pending",
 
 
 
 
1296
  "submissions": []
1297
  }
1298
- courses_collection2.update_one(
1299
- {"course_id": course_id, "sessions.session_id": session['session_id']},
1300
- {"$push": {"sessions.$.post_class.assignments": assignment}}
1301
- )
1302
  st.success("Assignment added successfully!")
1303
- else:
1304
- # Display assignments
1305
- session_data = courses_collection2.find_one(
1306
- {"course_id": course_id, "sessions.session_id": session['session_id']},
1307
- {"sessions.$": 1}
1308
- )
1309
 
1310
- if session_data and "sessions" in session_data and len(session_data["sessions"]) > 0:
1311
- assignments = session_data["sessions"][0].get("post_class", {}).get("assignments", [])
1312
- for assignment in assignments:
1313
- title = assignment.get("title", "No Title")
1314
- due_date = assignment.get("due_date", "No Due Date")
1315
- status = assignment.get("status", "No Status")
1316
- assignment_id = assignment.get("id", "No ID")
1317
-
1318
- with st.expander(f"Assignment: {title}", expanded=True):
1319
- st.markdown(f"**Due Date:** {due_date}")
1320
- st.markdown(f"**Status:** {status.replace('_', ' ').title()}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1321
 
1322
- # Assignment details
1323
- st.markdown("### Instructions")
1324
- st.markdown("Complete the assignment according to the provided guidelines.")
 
 
1325
 
1326
- # File submission
1327
- st.markdown("### Submission")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1328
  uploaded_file = st.file_uploader(
1329
  "Upload your work",
1330
- type=['pdf', 'py', 'ipynb'],
1331
- key=f"upload_{assignment['id']}"
1332
  )
1333
 
1334
  if uploaded_file is not None:
1335
- st.success("File uploaded successfully!")
1336
-
1337
- if st.button("Submit Assignment", key=f"submit_{assignment['id']}"):
1338
- # Extract text content from the file
1339
  text_content = extract_text_from_file(uploaded_file)
1340
 
1341
- # Call assignment_submit function
1342
- success = assignment_submit(
1343
- student_id=student_id,
1344
- course_id=course_id,
1345
- session_id=session['session_id'],
1346
- assignment_id=assignment['id'],
1347
- file_name=uploaded_file.name,
1348
- file_content=uploaded_file,
1349
- text_content=text_content,
1350
- material_type="assignment"
 
 
1351
  )
1352
 
1353
- if success:
1354
- st.success("Assignment submitted successfully!")
1355
- else:
1356
- st.error("Error saving submission.")
1357
- # Feedback section (if assignment is completed)
1358
- if assignment['status'] == 'completed':
1359
- st.markdown("### Feedback")
1360
- st.info("Feedback will be provided here once the assignment is graded.")
1361
- else:
1362
- st.warning("No assignments found for this session.")
1363
-
1364
- # def display_preclass_analytics(session, course_id):
1365
- # """Display pre-class analytics for faculty based on chat interaction metrics"""
1366
- # st.subheader("Pre-class Analytics")
1367
-
1368
- # # Get all enrolled students
1369
- # # enrolled_students = list(students_collection.find({"enrolled_courses": session['course_id']}))
1370
- # enrolled_students = list(students_collection.find({
1371
- # "enrolled_courses.course_id": course_id
1372
- # }))
1373
- # # total_students = len(enrolled_students)
1374
-
1375
- # total_students = students_collection.count_documents({
1376
- # "enrolled_courses": {
1377
- # "$elemMatch": {"course_id": course_id}
1378
- # }
1379
- # })
1380
-
1381
-
1382
- # if total_students == 0:
1383
- # st.warning("No students enrolled in this course.")
1384
- # return
1385
-
1386
- # # Get chat history for all students in this session
1387
- # chat_data = list(chat_history_collection.find({
1388
- # "session_id": session['session_id']
1389
- # }))
1390
-
1391
- # # Create a DataFrame to store student completion data
1392
- # completion_data = []
1393
- # incomplete_students = []
1394
-
1395
- # for student in enrolled_students:
1396
- # student_id = student['_id']
1397
- # student_name = student.get('full_name', 'Unknown')
1398
- # student_sid = student.get('SID', 'Unknown')
1399
-
1400
- # # Find student's chat history
1401
- # student_chat = next((chat for chat in chat_data if chat['user_id'] == student_id), None)
1402
-
1403
- # if student_chat:
1404
- # messages = student_chat.get('messages', [])
1405
- # message_count = len(messages)
1406
- # status = "Completed" if message_count >= 20 else "Incomplete"
1407
-
1408
- # # Format chat history for display
1409
- # chat_history = []
1410
- # for msg in messages:
1411
- # timestamp_str = msg.get('timestamp', '')
1412
- # if isinstance(timestamp_str, str):
1413
- # timestamp = datetime.fromisoformat(timestamp_str)
1414
- # else:
1415
- # timestamp = timestamp_str
1416
- # # timestamp = msg.get('timestamp', '').strftime("%Y-%m-%d %H:%M:%S")
1417
- # chat_history.append({
1418
- # # 'timestamp': timestamp,
1419
- # 'timestamp': timestamp.strftime("%Y-%m-%d %H:%M:%S"),
1420
- # 'prompt': msg.get('prompt'),
1421
- # 'response': msg.get('response')
1422
- # })
1423
-
1424
- # message_count = len(student_chat.get('messages', []))
1425
- # status = "Completed" if message_count >= 20 else "Incomplete"
1426
- # if status == "Incomplete":
1427
- # incomplete_students.append({
1428
- # 'name': student_name,
1429
- # 'sid': student_sid,
1430
- # 'message_count': message_count
1431
- # })
1432
- # else:
1433
- # message_count = 0
1434
- # status = "Not Started"
1435
- # chat_history = []
1436
- # incomplete_students.append({
1437
- # 'name': student_name,
1438
- # 'sid': student_sid,
1439
- # 'message_count': 0
1440
- # })
1441
-
1442
- # completion_data.append({
1443
- # 'Student Name': student_name,
1444
- # 'SID': student_sid,
1445
- # 'Messages': message_count,
1446
- # 'Status': status,
1447
- # 'Chat History': chat_history
1448
- # })
1449
-
1450
- # # Create DataFrame
1451
- # df = pd.DataFrame(completion_data)
1452
-
1453
- # # Display summary metrics
1454
- # col1, col2, col3 = st.columns(3)
1455
-
1456
- # completed_count = len(df[df['Status'] == 'Completed'])
1457
- # incomplete_count = len(df[df['Status'] == 'Incomplete'])
1458
- # not_started_count = len(df[df['Status'] == 'Not Started'])
1459
-
1460
- # with col1:
1461
- # st.metric("Completed", completed_count)
1462
- # with col2:
1463
- # st.metric("Incomplete", incomplete_count)
1464
- # with col3:
1465
- # st.metric("Not Started", not_started_count)
1466
-
1467
- # # Display completion rate progress bar
1468
- # completion_rate = (completed_count / total_students) * 100
1469
- # st.markdown("### Overall Completion Rate")
1470
- # st.progress(completion_rate / 100)
1471
- # st.markdown(f"**{completion_rate:.1f}%** of students have completed pre-class materials")
1472
-
1473
- # # Create tabs for different views
1474
- # tab1, tab2 = st.tabs(["Student Overview", "Detailed Chat History"])
1475
-
1476
- # with tab1:
1477
- # # Display completion summary table
1478
- # st.markdown("### Student Completion Details")
1479
- # summary_df = df[['Student Name', 'SID', 'Messages', 'Status']].copy()
1480
- # st.dataframe(
1481
- # summary_df.style.apply(lambda x: ['background-color: #90EE90' if v == 'Completed'
1482
- # else 'background-color: #FFB6C1' if v == 'Incomplete'
1483
- # else 'background-color: #FFE4B5'
1484
- # for v in x],
1485
- # subset=['Status'])
1486
- # )
1487
-
1488
- # with tab2:
1489
- # # Display detailed chat history
1490
- # st.markdown("### Student Chat Histories")
1491
-
1492
- # # Add student selector
1493
- # selected_student = st.selectbox(
1494
- # "Select a student to view chat history:",
1495
- # options=df['Student Name'].tolist()
1496
- # )
1497
-
1498
- # # Get selected student's data
1499
- # student_data = df[df['Student Name'] == selected_student].iloc[0]
1500
- # print(student_data)
1501
- # chat_history = student_data['Chat History']
1502
- # # Refresh chat history when a new student is selected
1503
- # if 'selected_student' not in st.session_state or st.session_state.selected_student != selected_student:
1504
- # st.session_state.selected_student = selected_student
1505
- # st.session_state.selected_student_chat_history = chat_history
1506
- # else:
1507
- # chat_history = st.session_state.selected_student_chat_history
1508
- # # Display student info and chat statistics
1509
- # st.markdown(f"**Student ID:** {student_data['SID']}")
1510
- # st.markdown(f"**Status:** {student_data['Status']}")
1511
- # st.markdown(f"**Total Messages:** {student_data['Messages']}")
1512
-
1513
- # # Display chat history in a table
1514
- # if chat_history:
1515
- # chat_df = pd.DataFrame(chat_history)
1516
- # st.dataframe(
1517
- # chat_df.style.apply(lambda x: ['background-color: #E8F0FE' if v == 'response' else 'background-color: #FFFFFF' for v in x], subset=['prompt']), use_container_width=True
1518
- # )
1519
- # else:
1520
- # st.info("No chat history available for this student.")
1521
-
1522
- # # Display students who haven't completed
1523
- # if incomplete_students:
1524
- # st.markdown("### Students Requiring Follow-up")
1525
- # incomplete_df = pd.DataFrame(incomplete_students)
1526
- # st.markdown(f"**{len(incomplete_students)} students** need to complete the pre-class materials:")
1527
-
1528
- # # Create a styled table for incomplete students
1529
- # st.table(
1530
- # incomplete_df.style.apply(lambda x: ['background-color: #FFFFFF'
1531
- # for _ in range(len(x))]))
1532
-
1533
- # # Export option for incomplete students list
1534
- # csv = incomplete_df.to_csv(index=False).encode('utf-8')
1535
- # st.download_button(
1536
- # "Download Follow-up List",
1537
- # csv,
1538
- # "incomplete_students.csv",
1539
- # "text/csv",
1540
- # key='download-csv'
1541
- # )
1542
 
1543
  def display_inclass_analytics(session, course_id):
1544
  """Display in-class analytics for faculty"""
@@ -1653,9 +1523,6 @@ def display_inclass_analytics(session, course_id):
1653
  "courses": course_id,
1654
  "_id": {"$nin": respondents}
1655
  }))
1656
-
1657
-
1658
-
1659
 
1660
  if non_participants:
1661
  st.markdown("#### Students Who Haven't Participated")
@@ -2457,8 +2324,6 @@ def display_session_analytics(session, course_id):
2457
  # Uploaded on: {material['uploaded_at'].strftime('%Y-%m-%d %H:%M')}
2458
  # """)
2459
 
2460
-
2461
-
2462
  def display_quiz_tab(student_id, course_id, session_id):
2463
  """Display quizzes for students"""
2464
  st.header("Course Quizzes")
@@ -2520,69 +2385,6 @@ def display_quiz_tab(student_id, course_id, session_id):
2520
  else:
2521
  st.error("Error submitting quiz. Please try again.")
2522
 
2523
- def display_subjective_test_tab(student_id, course_id, session_id):
2524
- """Display subjective tests for students"""
2525
- st.header("Subjective Tests")
2526
-
2527
- try:
2528
- subjective_tests = list(subjective_tests_collection.find({
2529
- "course_id": course_id,
2530
- "session_id": session_id,
2531
- "status": "active"
2532
- }))
2533
-
2534
- if not subjective_tests:
2535
- st.info("No subjective tests available for this session.")
2536
- return
2537
-
2538
- for test in subjective_tests:
2539
- with st.expander(f"📝 {test['title']}", expanded=True):
2540
- # Check for existing submission
2541
- existing_submission = next(
2542
- (sub for sub in test.get('submissions', [])
2543
- if sub['student_id'] == str(student_id)),
2544
- None
2545
- )
2546
-
2547
- if existing_submission:
2548
- st.success("Test completed! Your answers have been submitted.")
2549
- st.subheader("Your Answers")
2550
- for i, ans in enumerate(existing_submission['answers']):
2551
- st.markdown(f"**Question {i+1}:** {test['questions'][i]['question']}")
2552
- st.markdown(f"**Your Answer:** {ans}")
2553
- st.markdown("---")
2554
- else:
2555
- st.write("Please write your answers:")
2556
- with st.form(key=f"subjective_test_form_{test['_id']}"):
2557
- student_answers = []
2558
- for i, question in enumerate(test['questions']):
2559
- st.markdown(f"**Question {i+1}:** {question['question']}")
2560
- answer = st.text_area(
2561
- "Your answer:",
2562
- key=f"q_{test['_id']}_{i}",
2563
- height=200
2564
- )
2565
- student_answers.append(answer)
2566
-
2567
- if st.form_submit_button("Submit Test"):
2568
- if all(answer.strip() for answer in student_answers):
2569
- success = submit_subjective_test(
2570
- test['_id'],
2571
- str(student_id),
2572
- student_answers
2573
- )
2574
- if success:
2575
- st.success("Test submitted successfully!")
2576
- st.rerun()
2577
- else:
2578
- st.error("Error submitting test. Please try again.")
2579
- else:
2580
- st.error("Please answer all questions before submitting.")
2581
-
2582
- except Exception as e:
2583
- st.error(f"An error occurred while loading the tests. Please try again later.")
2584
- print(f"Error in display_subjective_test_tab: {str(e)}", flush=True)
2585
-
2586
  def display_session_content(student_id, course_id, session, username, user_type):
2587
  st.title(f"{session['title']}")
2588
 
@@ -2644,7 +2446,9 @@ def display_session_content(student_id, course_id, session, username, user_type)
2644
  "Pre-class Analytics",
2645
  "In-class Analytics",
2646
  "Post-class Analytics",
2647
- "End Terms"
 
 
2648
  ])
2649
  with tabs[0]:
2650
  upload_preclass_materials(session['session_id'], course_id)
@@ -2659,8 +2463,12 @@ def display_session_content(student_id, course_id, session, username, user_type)
2659
  with tabs[5]:
2660
  display_postclass_analytics(session, course_id)
2661
  with tabs[6]:
 
 
2662
  st.subheader("End Terms")
2663
  st.info("End term content will be available soon.")
 
 
2664
 
2665
  def parse_model_response(response_text):
2666
  """Enhanced parser for model responses with better error handling.
@@ -2866,17 +2674,21 @@ def generate_synoptic(questions, context, session_title, num_questions):
2866
  print(f"Response text: {response.text if 'response' in locals() else 'No response generated'}")
2867
  return None
2868
 
2869
- def save_subjective_test(course_id, session_id, title, questions, synoptic):
2870
- """Save subjective test to database"""
2871
  try:
2872
- # Format questions to include metadata
 
 
 
 
2873
  formatted_questions = []
2874
  for q in questions:
2875
  formatted_question = {
2876
  "question": q["question"],
2877
- "expected_points": q.get("expected_points", []),
2878
- "difficulty_level": q.get("difficulty_level", "medium"),
2879
- "suggested_time": q.get("suggested_time", "5 minutes")
2880
  }
2881
  formatted_questions.append(formatted_question)
2882
 
@@ -2885,157 +2697,48 @@ def save_subjective_test(course_id, session_id, title, questions, synoptic):
2885
  "session_id": session_id,
2886
  "title": title,
2887
  "questions": formatted_questions,
2888
- "synoptic": synoptic,
2889
  "created_at": datetime.utcnow(),
2890
  "status": "active",
2891
  "submissions": []
2892
  }
2893
 
2894
  result = subjective_tests_collection.insert_one(test_data)
2895
- return result.inserted_id
2896
  except Exception as e:
2897
- print(f"Error saving subjective test: {e}")
2898
  return None
2899
 
2900
- def submit_subjective_test(test_id, student_id, student_answers):
2901
- """Submit subjective test answers and trigger analysis"""
2902
  try:
2903
- submission_data = {
 
 
 
 
 
2904
  "student_id": student_id,
2905
- "answers": student_answers,
2906
- "submitted_at": datetime.utcnow()
 
2907
  }
2908
 
 
2909
  result = subjective_tests_collection.update_one(
2910
- {"_id": test_id},
2911
- {
2912
- "$push": {
2913
- "submissions": submission_data
2914
- }
2915
- }
2916
  )
2917
 
2918
- if result.modified_count > 0:
2919
- try:
2920
- # Trigger grading and analysis
2921
- analysis = analyze_subjective_answers(test_id, student_id)
2922
- if analysis:
2923
- # Update the submission with the analysis and score
2924
- subjective_tests_collection.update_one(
2925
- {"_id": test_id, "submissions.student_id": student_id},
2926
- {
2927
- "$set": {
2928
- "submissions.$.analysis": analysis,
2929
- "submissions.$.score": analysis.get('correctness_score')
2930
- }
2931
- }
2932
- )
2933
- return True
2934
- else:
2935
- print("Error: Analysis failed")
2936
- return False
2937
- except Exception as e:
2938
- print(f"Warning: Grading failed but submission was saved: {e}")
2939
- return True # We still return True since the submission itself was successful
2940
-
2941
- print("Error: No document was modified")
2942
- return False
2943
-
2944
  except Exception as e:
2945
- print(f"Error submitting subjective test: {str(e)}")
2946
  return False
2947
 
2948
- def analyze_subjective_answers(test_id, student_id):
2949
- """Analyze subjective test answers for correctness and improvements"""
2950
- try:
2951
- # Get test and submission details
2952
- test_doc = subjective_tests_collection.find_one({"_id": test_id})
2953
- if not test_doc:
2954
- print(f"Test document not found for test_id: {test_id}")
2955
- return None
2956
-
2957
- submission = next(
2958
- (sub for sub in test_doc.get('submissions', []) if sub['student_id'] == student_id),
2959
- None
2960
- )
2961
-
2962
- if not submission:
2963
- print(f"No submission found for student_id: {student_id}")
2964
- return None
2965
-
2966
- # Get questions and answers
2967
- questions = test_doc.get('questions', [])
2968
- student_answers = submission.get('answers', [])
2969
-
2970
- if not questions or not student_answers:
2971
- print("No questions or answers found")
2972
- return None
2973
-
2974
- # Retrieve the synoptic from the synoptic_store collection
2975
- synoptic_doc = synoptic_store_collection.find_one({"session_title": test_doc.get('title')})
2976
- synoptic = synoptic_doc.get('synoptic', '') if synoptic_doc else ''
2977
-
2978
- # Analyze each question separately
2979
- all_analyses = []
2980
- total_score = 0
2981
-
2982
- for i, (question, answer) in enumerate(zip(questions, student_answers), 1):
2983
- # Format content for individual question
2984
- analysis_content = f"Question {i}: {question['question']}\nAnswer: {answer}\n\n"
2985
-
2986
- # Get analysis for this question
2987
- individual_analysis = derive_analytics(
2988
- goal="Analyze and Grade",
2989
- reference_text=analysis_content,
2990
- openai_api_key=OPENAI_API_KEY,
2991
- context=test_doc.get('context', ''),
2992
- synoptic=synoptic[i-1] if isinstance(synoptic, list) else synoptic
2993
- )
2994
-
2995
- if individual_analysis:
2996
- # Extract score for this question
2997
- try:
2998
- score_match = re.search(r'(\d+)(?:/10)?', individual_analysis)
2999
- if score_match:
3000
- question_score = int(score_match.group(1))
3001
- if 1 <= question_score <= 10:
3002
- total_score += question_score
3003
- except:
3004
- question_score = 0
3005
-
3006
- # Format individual analysis
3007
- formatted_analysis = f"\n\n## Question {i} Analysis\n\n{individual_analysis}"
3008
- all_analyses.append(formatted_analysis)
3009
-
3010
- if not all_analyses:
3011
- print("Error: No analyses generated")
3012
- return None
3013
-
3014
- # Calculate average score
3015
- average_score = round(total_score / len(questions)) if questions else 0
3016
-
3017
- # Combine all analyses
3018
- combined_analysis = "\n".join(all_analyses)
3019
-
3020
- # Format final results
3021
- analysis_results = {
3022
- "content_analysis": combined_analysis,
3023
- "analyzed_at": datetime.utcnow(),
3024
- "correctness_score": average_score
3025
- }
3026
-
3027
- return analysis_results
3028
-
3029
- except Exception as e:
3030
- print(f"Error in analyze_subjective_answers: {str(e)}")
3031
- return None
3032
-
3033
  def display_subjective_test_tab(student_id, course_id, session_id):
3034
- """Display subjective tests for students"""
3035
  st.header("Subjective Tests")
3036
 
3037
  try:
3038
- # Query for active tests
3039
  subjective_tests = list(subjective_tests_collection.find({
3040
  "course_id": course_id,
3041
  "session_id": session_id,
@@ -3046,94 +2749,144 @@ def display_subjective_test_tab(student_id, course_id, session_id):
3046
  st.info("No subjective tests available for this session.")
3047
  return
3048
 
3049
- for test in subjective_tests:
3050
- with st.expander(f"📝 {test['title']}", expanded=True):
3051
- # Check for existing submission
3052
- existing_submission = next(
3053
- (sub for sub in test.get('submissions', [])
3054
- if sub['student_id'] == str(student_id)),
3055
- None
3056
- )
3057
-
3058
- if existing_submission:
3059
- st.success("Test completed! Your answers have been submitted.")
3060
- st.subheader("Your Answers")
3061
- for i, ans in enumerate(existing_submission['answers']):
3062
- st.markdown(f"**Question {i+1}:** {test['questions'][i]['question']}")
3063
- st.markdown(f"**Your Answer:** {ans}")
3064
- st.markdown("---")
3065
 
3066
- # Display analysis
3067
- display_subjective_analysis(test['_id'], str(student_id), test.get('context', ''))
3068
- else:
3069
- st.write("Please write your answers:")
3070
- with st.form(key=f"subjective_test_form_{test['_id']}"):
3071
- student_answers = []
3072
- for i, question in enumerate(test['questions']):
3073
- st.markdown(f"**Question {i+1}:** {question['question']}")
3074
- answer = st.text_area(
3075
- "Your answer:",
3076
- key=f"q_{test['_id']}_{i}",
3077
- height=200
3078
- )
3079
- student_answers.append(answer)
3080
-
3081
- if st.form_submit_button("Submit Test"):
3082
- if all(answer.strip() for answer in student_answers):
3083
- success = submit_subjective_test(
3084
- test['_id'],
3085
- str(student_id),
3086
- student_answers
3087
  )
3088
- if success:
3089
- st.success("Test submitted successfully!")
3090
- st.rerun()
 
 
 
 
 
 
 
 
 
 
 
3091
  else:
3092
- st.error("Error submitting test. Please try again.")
3093
- else:
3094
- st.error("Please answer all questions before submitting.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3095
  except Exception as e:
3096
- print(f"Error in display_subjective_test_tab: {str(e)}", flush=True)
3097
  st.error("An error occurred while loading the tests. Please try again later.")
 
 
 
 
 
3098
 
3099
-
3100
-
3101
- def display_subjective_analysis(test_id, student_id, context):
3102
- """Display subjective test analysis to students and faculty"""
3103
  try:
3104
- test_doc = subjective_tests_collection.find_one({"_id": test_id})
3105
- submission = next(
3106
- (sub for sub in test_doc.get('submissions', []) if sub['student_id'] == student_id),
3107
- None
3108
- )
3109
 
3110
- if not submission:
3111
- st.warning("No submission found for analysis.")
3112
  return
3113
 
3114
- # Get or generate analysis
3115
- analysis = submission.get('analysis')
3116
- if not analysis:
3117
- analysis = analyze_subjective_answers(test_id, student_id, context)
3118
- if not analysis:
3119
- st.error("Could not generate analysis.")
3120
- return
3121
 
3122
- # Display analysis results
3123
- st.subheader("Answer Analysis")
3124
-
3125
- # Content analysis
3126
- st.markdown("### Evidence-Based Feedback")
3127
- st.markdown(analysis.get('content_analysis', 'No analysis available'))
3128
-
3129
- # Improvement suggestions
3130
- # st.markdown("### Suggested Improvements")
3131
- # st.markdown(analysis.get('suggested_improvements', 'No suggestions available'))
3132
-
3133
- # Analysis timestamp
3134
- analyzed_at = analysis.get('analyzed_at')
3135
- if analyzed_at:
3136
- st.caption(f"Analysis performed at: {analyzed_at.strftime('%Y-%m-%d %H:%M:%S UTC')}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3137
 
3138
  except Exception as e:
3139
- st.error(f"Error displaying analysis: {e}")
 
 
35
  from live_chat_feature import display_live_chat_interface
36
  from code_playground import display_code_playground
37
  from urllib.parse import urlparse, parse_qs
38
+ from bs4 import BeautifulSoup
39
+ from rubrics import display_rubrics_tab
40
+ from subjective_test_evaluation import evaluate_subjective_answers, display_evaluation_to_faculty
41
 
42
  # Load environment variables
43
  load_dotenv()
 
52
  assignment_evaluation_collection = db["assignment_evaluation"]
53
  subjective_tests_collection = db["subjective_tests"]
54
  synoptic_store_collection = db["synoptic_store"]
55
+ assignments_collection = db["assignments"]
56
 
57
  # for implementing Context Caching:
58
  # PROJECT_ID = "novascholar-446709"
 
1191
  course_id,
1192
  session['session_id'],
1193
  test_title,
1194
+ questions
 
1195
  )
1196
  if test_id:
1197
  st.success("Subjective test saved successfully!")
 
1218
  session['session_id'],
1219
  st.session_state.test_title,
1220
  st.session_state.generated_questions,
 
1221
  )
1222
  if test_id:
1223
  st.success("Subjective test saved successfully!")
 
1279
  else:
1280
  st.error("Error saving quiz.")
1281
 
1282
+ st.subheader("Add Assignment")
 
1283
  with st.form("add_assignment_form"):
1284
  title = st.text_input("Assignment Title")
1285
+ description = st.text_area("Assignment Description")
1286
  due_date = st.date_input("Due Date")
1287
  submit = st.form_submit_button("Add Assignment")
1288
 
1289
  if submit:
1290
+ if not title or not description:
1291
+ st.error("Please fill in all required fields.")
1292
+ return
1293
+
1294
  due_date = datetime.combine(due_date, datetime.min.time())
 
1295
  assignment = {
1296
+ "_id": ObjectId(),
1297
  "title": title,
1298
+ "description": description,
1299
  "due_date": due_date,
1300
+ "course_id": course_id,
1301
+ "session_id": session['session_id'],
1302
+ "faculty_id": faculty_id,
1303
+ "created_at": datetime.utcnow(),
1304
+ "status": "active",
1305
  "submissions": []
1306
  }
1307
+
1308
+ assignments_collection.insert_one(assignment)
 
 
1309
  st.success("Assignment added successfully!")
1310
+
1311
+ st.subheader("Existing Assignments")
1312
+ assignments = assignments_collection.find({
1313
+ "session_id": session['session_id'],
1314
+ "course_id": course_id
1315
+ })
1316
 
1317
+ for assignment in assignments:
1318
+ with st.expander(f"📝 {assignment['title']}", expanded=True):
1319
+ st.markdown(f"**Due Date:** {assignment['due_date'].strftime('%Y-%m-%d')}")
1320
+ st.markdown(f"**Description:** {assignment['description']}")
1321
+
1322
+ total_submissions = len(assignment.get('submissions', []))
1323
+ total_students = students_collection.count_documents({
1324
+ "enrolled_courses": {
1325
+ "$elemMatch": {"course_id": course_id}
1326
+ }
1327
+ })
1328
+
1329
+ col1, col2, col3 = st.columns(3)
1330
+ with col1:
1331
+ st.metric("Total Submissions", total_submissions)
1332
+ with col2:
1333
+ submission_rate = (total_submissions / total_students * 100) if total_students > 0 else 0
1334
+ st.metric("Submission Rate", f"{submission_rate:.1f}%")
1335
+ with col3:
1336
+ st.metric("Pending Submissions", total_students - total_submissions)
1337
+
1338
+ # Display evaluation button and status
1339
+ evaluation_status = st.empty()
1340
+ eval_button = st.button("View/Generate Evaluations", key=f"eval_{assignment['_id']}")
1341
+
1342
+ if eval_button:
1343
+ st.session_state.show_evaluations = True
1344
+ st.session_state.current_assignment = assignment['_id']
1345
 
1346
+ # Show evaluation interface in a new container instead of an expander
1347
+ evaluation_container = st.container()
1348
+ with evaluation_container:
1349
+ from assignment_evaluation import display_evaluation_to_faculty
1350
+ display_evaluation_to_faculty(session['session_id'], student_id, course_id)
1351
 
1352
+ else: # Student view
1353
+ assignments = assignments_collection.find({
1354
+ "session_id": session['session_id'],
1355
+ "course_id": course_id,
1356
+ "status": "active"
1357
+ })
1358
+
1359
+ for assignment in assignments:
1360
+ with st.expander(f"📝 {assignment['title']}", expanded=True):
1361
+ st.markdown(f"**Due Date:** {assignment['due_date'].strftime('%Y-%m-%d')}")
1362
+ st.markdown(f"**Description:** {assignment['description']}")
1363
+
1364
+ existing_submission = next(
1365
+ (sub for sub in assignment.get('submissions', [])
1366
+ if sub['student_id'] == str(student_id)),
1367
+ None
1368
+ )
1369
+
1370
+ if existing_submission:
1371
+ st.success("Assignment submitted!")
1372
+ st.markdown(f"**Submitted on:** {existing_submission['submitted_at'].strftime('%Y-%m-%d %H:%M')}")
1373
+
1374
+ # Show evaluation status and feedback in the same container
1375
+ evaluation = assignment_evaluation_collection.find_one({
1376
+ "assignment_id": assignment['_id'],
1377
+ "student_id": str(student_id)
1378
+ })
1379
+
1380
+ if evaluation:
1381
+ st.markdown("### Evaluation")
1382
+ st.markdown(evaluation['evaluation'])
1383
+ else:
1384
+ st.info("Evaluation pending. Check back later.")
1385
+ else:
1386
  uploaded_file = st.file_uploader(
1387
  "Upload your work",
1388
+ type=['pdf', 'doc', 'docx', 'txt', 'py', 'ipynb', 'ppt', 'pptx'],
1389
+ key=f"upload_{assignment['_id']}"
1390
  )
1391
 
1392
  if uploaded_file is not None:
1393
+ if st.button("Submit Assignment", key=f"submit_{assignment['_id']}"):
 
 
 
1394
  text_content = extract_text_from_file(uploaded_file)
1395
 
1396
+ submission = {
1397
+ "student_id": str(student_id),
1398
+ "file_name": uploaded_file.name,
1399
+ "file_type": uploaded_file.type,
1400
+ "file_content": uploaded_file.getvalue(),
1401
+ "text_content": text_content,
1402
+ "submitted_at": datetime.utcnow()
1403
+ }
1404
+
1405
+ assignments_collection.update_one(
1406
+ {"_id": assignment['_id']},
1407
+ {"$push": {"submissions": submission}}
1408
  )
1409
 
1410
+ st.success("Assignment submitted successfully!")
1411
+ st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1412
 
1413
  def display_inclass_analytics(session, course_id):
1414
  """Display in-class analytics for faculty"""
 
1523
  "courses": course_id,
1524
  "_id": {"$nin": respondents}
1525
  }))
 
 
 
1526
 
1527
  if non_participants:
1528
  st.markdown("#### Students Who Haven't Participated")
 
2324
  # Uploaded on: {material['uploaded_at'].strftime('%Y-%m-%d %H:%M')}
2325
  # """)
2326
 
 
 
2327
  def display_quiz_tab(student_id, course_id, session_id):
2328
  """Display quizzes for students"""
2329
  st.header("Course Quizzes")
 
2385
  else:
2386
  st.error("Error submitting quiz. Please try again.")
2387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2388
  def display_session_content(student_id, course_id, session, username, user_type):
2389
  st.title(f"{session['title']}")
2390
 
 
2446
  "Pre-class Analytics",
2447
  "In-class Analytics",
2448
  "Post-class Analytics",
2449
+ "Rubrics",
2450
+ "End Terms",
2451
+ "Evaluate Subjective Tests"
2452
  ])
2453
  with tabs[0]:
2454
  upload_preclass_materials(session['session_id'], course_id)
 
2463
  with tabs[5]:
2464
  display_postclass_analytics(session, course_id)
2465
  with tabs[6]:
2466
+ display_rubrics_tab(session, course_id)
2467
+ with tabs[7]:
2468
  st.subheader("End Terms")
2469
  st.info("End term content will be available soon.")
2470
+ with tabs[8]: # New tab for evaluation
2471
+ display_evaluation_to_faculty(session['session_id'], student_id, course_id)
2472
 
2473
  def parse_model_response(response_text):
2474
  """Enhanced parser for model responses with better error handling.
 
2674
  print(f"Response text: {response.text if 'response' in locals() else 'No response generated'}")
2675
  return None
2676
 
2677
+ def save_subjective_test(course_id, session_id, title, questions):
2678
+ """Save subjective test to database with proper ID handling"""
2679
  try:
2680
+ # Ensure proper string format for IDs
2681
+ course_id = str(course_id)
2682
+ session_id = str(session_id)
2683
+
2684
+ # Format questions
2685
  formatted_questions = []
2686
  for q in questions:
2687
  formatted_question = {
2688
  "question": q["question"],
2689
+ "expected_points": [],
2690
+ "difficulty_level": "medium",
2691
+ "suggested_time": "5 minutes"
2692
  }
2693
  formatted_questions.append(formatted_question)
2694
 
 
2697
  "session_id": session_id,
2698
  "title": title,
2699
  "questions": formatted_questions,
 
2700
  "created_at": datetime.utcnow(),
2701
  "status": "active",
2702
  "submissions": []
2703
  }
2704
 
2705
  result = subjective_tests_collection.insert_one(test_data)
2706
+ return str(result.inserted_id)
2707
  except Exception as e:
2708
+ print(f"Error saving test: {e}")
2709
  return None
2710
 
2711
+ def submit_subjective_test(test_id, student_id, answers):
2712
+ """Submit test answers with proper ID handling"""
2713
  try:
2714
+ # Ensure IDs are strings
2715
+ test_id = str(test_id)
2716
+ student_id = str(student_id)
2717
+
2718
+ # Create submission document
2719
+ submission = {
2720
  "student_id": student_id,
2721
+ "answers": answers,
2722
+ "submitted_at": datetime.utcnow(),
2723
+ "status": "submitted"
2724
  }
2725
 
2726
+ # Update test document with new submission
2727
  result = subjective_tests_collection.update_one(
2728
+ {"_id": ObjectId(test_id)},
2729
+ {"$push": {"submissions": submission}}
 
 
 
 
2730
  )
2731
 
2732
+ return result.modified_count > 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2733
  except Exception as e:
2734
+ print(f"Error submitting test: {e}")
2735
  return False
2736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2737
  def display_subjective_test_tab(student_id, course_id, session_id):
2738
+ """Display subjective tests and results for students"""
2739
  st.header("Subjective Tests")
2740
 
2741
  try:
 
2742
  subjective_tests = list(subjective_tests_collection.find({
2743
  "course_id": course_id,
2744
  "session_id": session_id,
 
2749
  st.info("No subjective tests available for this session.")
2750
  return
2751
 
2752
+ # Create tabs for Tests and Results
2753
+ test_tab, results_tab = st.tabs(["Available Tests", "Test Results"])
2754
+
2755
+ with test_tab:
2756
+ for test in subjective_tests:
2757
+ with st.expander(f"📝 {test['title']}", expanded=True):
2758
+ # Check for existing submission
2759
+ existing_submission = next(
2760
+ (sub for sub in test.get('submissions', [])
2761
+ if sub['student_id'] == str(student_id)),
2762
+ None
2763
+ )
 
 
 
 
2764
 
2765
+ if existing_submission:
2766
+ st.success("Test completed! Your answers have been submitted.")
2767
+ st.subheader("Your Answers")
2768
+ for i, ans in enumerate(existing_submission['answers']):
2769
+ st.markdown(f"**Question {i+1}:** {test['questions'][i]['question']}")
2770
+ st.markdown(f"**Your Answer:** {ans}")
2771
+ st.markdown("---")
2772
+ else:
2773
+ st.write("Please write your answers:")
2774
+ with st.form(key=f"subjective_test_form_{test['_id']}"):
2775
+ student_answers = []
2776
+ for i, question in enumerate(test['questions']):
2777
+ st.markdown(f"**Question {i+1}:** {question['question']}")
2778
+ answer = st.text_area(
2779
+ "Your answer:",
2780
+ key=f"q_{test['_id']}_{i}",
2781
+ height=200
 
 
 
 
2782
  )
2783
+ student_answers.append(answer)
2784
+
2785
+ if st.form_submit_button("Submit Test"):
2786
+ if all(answer.strip() for answer in student_answers):
2787
+ success = submit_subjective_test(
2788
+ test['_id'],
2789
+ str(student_id),
2790
+ student_answers
2791
+ )
2792
+ if success:
2793
+ st.success("Test submitted successfully!")
2794
+ st.rerun()
2795
+ else:
2796
+ st.error("Error submitting test. Please try again.")
2797
  else:
2798
+ st.error("Please answer all questions before submitting.")
2799
+
2800
+ with results_tab:
2801
+ # Display results for completed tests
2802
+ completed_tests = [
2803
+ test for test in subjective_tests
2804
+ if any(sub['student_id'] == str(student_id) for sub in test.get('submissions', []))
2805
+ ]
2806
+
2807
+ if not completed_tests:
2808
+ st.info("You haven't completed any tests yet.")
2809
+ return
2810
+
2811
+ # Create a selectbox for choosing which test results to view
2812
+ test_options = {
2813
+ f"{test['title']} (Submitted: {next(sub['submitted_at'].strftime('%Y-%m-%d') for sub in test['submissions'] if sub['student_id'] == str(student_id))})"
2814
+ : test['_id']
2815
+ for test in completed_tests
2816
+ }
2817
+
2818
+ selected_test = st.selectbox(
2819
+ "Select a test to view results:",
2820
+ options=list(test_options.keys())
2821
+ )
2822
+
2823
+ if selected_test:
2824
+ test_id = test_options[selected_test]
2825
+ display_test_results(test_id, student_id)
2826
+
2827
  except Exception as e:
 
2828
  st.error("An error occurred while loading the tests. Please try again later.")
2829
+ print(f"Error in display_subjective_test_tab: {str(e)}")
2830
+
2831
+ def display_test_results(test_id, student_id):
2832
+ """
2833
+ Display test results and analysis for a student
2834
 
2835
+ Args:
2836
+ test_id: ObjectId or str of the test
2837
+ student_id: str of the student ID
2838
+ """
2839
  try:
2840
+ # Fetch analysis from evaluation collection
2841
+ analysis = subjective_test_evaluation_collection.find_one({
2842
+ "test_id": test_id,
2843
+ "student_id": str(student_id)
2844
+ })
2845
 
2846
+ if not analysis:
2847
+ st.info("Analysis will be available soon. Please check back later.")
2848
  return
2849
 
2850
+ st.header("Test Analysis")
 
 
 
 
 
 
2851
 
2852
+ # Display overall evaluation summary if available
2853
+ if "overall_summary" in analysis:
2854
+ with st.expander("Overall Performance Summary", expanded=True):
2855
+ st.markdown(analysis["overall_summary"])
2856
+
2857
+ # Display individual question evaluations
2858
+ st.subheader("Question-wise Analysis")
2859
+ for eval_item in analysis.get('evaluations', []):
2860
+ with st.expander(f"Question {eval_item['question_number']}", expanded=True):
2861
+ st.markdown("**Question:**")
2862
+ st.markdown(eval_item['question'])
2863
+
2864
+ st.markdown("**Your Answer:**")
2865
+ st.markdown(eval_item['answer'])
2866
+
2867
+ st.markdown("**Evaluation:**")
2868
+ st.markdown(eval_item['evaluation'])
2869
+
2870
+ # Extract and display score if available
2871
+ if "Score:" in eval_item['evaluation']:
2872
+ score_line = next((line for line in eval_item['evaluation'].split('\n') if "Score:" in line), None)
2873
+ if score_line:
2874
+ score = score_line.split("Score:")[1].strip()
2875
+ st.metric("Score", score)
2876
+
2877
+ # Display improvement points if available
2878
+ if "Key Areas for Improvement" in eval_item['evaluation']:
2879
+ st.markdown("**Areas for Improvement:**")
2880
+ improvement_section = eval_item['evaluation'].split("Key Areas for Improvement")[1]
2881
+ points = [point.strip('- ').strip() for point in improvement_section.split('\n') if point.strip().startswith('-')]
2882
+ for point in points:
2883
+ if point: # Only display non-empty points
2884
+ st.markdown(f"• {point}")
2885
+
2886
+ # Display evaluation timestamp
2887
+ if "evaluated_at" in analysis:
2888
+ st.caption(f"Analysis generated on: {analysis['evaluated_at'].strftime('%Y-%m-%d %H:%M:%S UTC')}")
2889
 
2890
  except Exception as e:
2891
+ st.error("An error occurred while loading the analysis. Please try again later.")
2892
+ print(f"Error in display_test_results: {str(e)}")
subjective_test_evaluation.py CHANGED
@@ -1,252 +1,247 @@
1
- import openai
2
- from pymongo import MongoClient
3
  from datetime import datetime
 
4
  import os
 
5
  from dotenv import load_dotenv
6
- import re
7
- import streamlit as st
8
  from bson import ObjectId
9
 
10
  load_dotenv()
11
- MONGO_URI = os.getenv('MONGO_URI')
12
- OPENAI_API_KEY = os.getenv('OPENAI_KEY')
13
 
 
 
14
  client = MongoClient(MONGO_URI)
15
- db = client['novascholar_db']
16
- rubrics_collection = db['rubrics']
17
- resources_collection = db['resources']
18
- subjective_tests_collection = db['subjective_tests']
19
- subjective_test_analysis_collection = db['subjective_test_analysis']
20
-
21
- openai.api_key = OPENAI_API_KEY
22
-
23
- def evaluate_subjective_answers(test_id, student_id, course_id):
24
- """Evaluate subjective test answers using OpenAI."""
25
  try:
26
- # Get test and submission details
27
- test_doc = subjective_tests_collection.find_one({
28
- "_id": ObjectId(test_id),
29
- "course_id": course_id
30
- })
31
- if not test_doc:
32
- return {
33
- "content_analysis": "Error: Test not found",
34
- "analyzed_at": datetime.utcnow(),
35
- "correctness_score": 0
36
- }
37
 
 
38
  submission = next(
39
- (sub for sub in test_doc.get('submissions', []) if sub['student_id'] == student_id),
 
40
  None
41
  )
42
-
43
  if not submission:
44
- return {
45
- "content_analysis": "Error: Submission not found",
46
- "analyzed_at": datetime.utcnow(),
47
- "correctness_score": 0
48
- }
49
-
50
- # Rest of the evaluation logic remains the same
51
- questions = test_doc.get('questions', [])
52
- student_answers = submission.get('answers', [])
53
-
54
- if not questions or not student_answers:
55
- return {
56
- "content_analysis": "Error: No questions or answers found",
57
- "analyzed_at": datetime.utcnow(),
58
- "correctness_score": 0
59
- }
60
-
61
- # Retrieve rubrics for the session
62
- rubric_doc = rubrics_collection.find_one({
63
- "session_id": test_doc['session_id'],
64
- "course_id": course_id
65
- })
66
-
67
- if not rubric_doc:
68
- return {
69
- "content_analysis": "Error: Rubric not found",
70
- "analyzed_at": datetime.utcnow(),
71
- "correctness_score": 0
72
- }
73
-
74
- rubric = rubric_doc.get('rubric', {})
75
-
76
- # Retrieve pre-class materials
77
- pre_class_materials = resources_collection.find({
78
- "session_id": test_doc['session_id'],
79
- "course_id": course_id
80
- })
81
- pre_class_content = "\n".join([material.get('text_content', '') for material in pre_class_materials])
82
-
83
- # Analyze each question
84
- all_analyses = []
85
- total_score = 0
86
-
87
- for i, (question, answer) in enumerate(zip(questions, student_answers), 1):
88
- analysis_content = f"Question {i}: {question['question']}\nAnswer: {answer}\n\nRubric: {rubric}\n\nPre-class Materials: {pre_class_content}\n\n"
89
 
90
  prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
 
 
 
 
 
 
91
 
92
- 1. Evaluation Process:
93
- - Use each rubric criterion (scored 1-4) for internal assessment
94
- - Compare response with pre-class materials
95
- - Check alignment with all rubric requirements
96
- - Calculate final score: sum of criteria scores converted to 10-point scale
97
 
98
- Pre-class Materials:
99
- {pre_class_content}
100
 
101
- Rubric Criteria:
102
- {rubric}
103
 
104
- Question and Answer:
105
- {analysis_content}
106
 
107
- Provide your assessment in the following format:
 
 
108
 
109
- **Score and Evidence**
110
- - Score: [X]/10
111
- - Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
112
- **Key Areas for Improvement**
113
- - [Concise improvement point 1]
114
- - [Concise improvement point 2]
115
- - [Concise improvement point 3]
116
  """
117
 
118
- response = openai.Completion.create(
119
- model="text-davinci-003",
120
- prompt=prompt_template,
 
121
  max_tokens=500,
122
- temperature=0.7
123
  )
124
 
125
- individual_analysis = response.choices[0].text.strip()
126
-
127
- try:
128
- score_match = re.search(r'Score: (\d+)', individual_analysis)
129
- question_score = int(score_match.group(1)) if score_match else 0
130
- total_score += question_score
131
- except:
132
- question_score = 0
133
-
134
- formatted_analysis = f"\n\n## Question {i} Analysis\n\n{individual_analysis}"
135
- all_analyses.append(formatted_analysis)
136
-
137
- average_score = round(total_score / len(questions)) if questions else 0
138
- combined_analysis = "\n".join(all_analyses)
139
-
140
- return {
141
- "content_analysis": combined_analysis,
142
- "analyzed_at": datetime.utcnow(),
143
- "correctness_score": average_score
144
  }
 
 
 
145
 
146
  except Exception as e:
147
- return {
148
- "content_analysis": f"Error evaluating answers: {str(e)}",
149
- "analyzed_at": datetime.utcnow(),
150
- "correctness_score": 0
151
- }
152
 
153
  def display_evaluation_to_faculty(session_id, student_id, course_id):
154
- """Display submitted tests with improved error handling and debugging"""
155
- st.subheader("Evaluate Subjective Tests")
156
-
 
 
157
  try:
158
- # Convert all IDs to strings for consistent comparison
159
- session_id = str(session_id)
160
- student_id = str(student_id)
161
- course_id = str(course_id)
162
-
163
- print(f"Searching for tests with session_id: {session_id}, student_id: {student_id}, course_id: {course_id}")
164
-
165
- # Query for tests
166
- query = {
167
- "session_id": session_id,
168
- "course_id": course_id,
169
- "submissions": {
170
- "$elemMatch": {
171
- "student_id": student_id
172
- }
173
- }
174
- }
175
-
176
- # Log the query for debugging
177
- print(f"MongoDB Query: {query}")
178
-
179
- # Fetch tests
180
- tests = list(subjective_tests_collection.find(query))
181
- print(f"Found {len(tests)} tests matching query")
182
-
183
  if not tests:
184
- # Check if any tests exist for this session
185
- all_session_tests = list(subjective_tests_collection.find({
186
- "session_id": session_id,
187
- "course_id": course_id
188
- }))
189
-
190
- if all_session_tests:
191
- print(f"Found {len(all_session_tests)} tests for this session, but no submissions from student {student_id}")
192
- st.warning("No submitted tests found for this student, but tests exist for this session.")
193
- else:
194
- print("No tests found for this session at all")
195
- st.info("No tests have been created for this session yet.")
196
  return
197
 
198
- # Display tests and handle evaluation
199
- for test in tests:
200
- with st.expander(f"Test: {test.get('title', 'Untitled Test')}", expanded=True):
201
- # Find student submission
202
- submission = next(
203
- (sub for sub in test.get('submissions', [])
204
- if sub['student_id'] == student_id),
205
- None
206
- )
207
-
208
- if submission:
209
- st.write("### Student's Answers")
210
- for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
211
- st.markdown(f"**Q{i+1}:** {question['question']}")
212
- st.markdown(f"**A{i+1}:** {answer}")
213
- st.markdown("---")
214
-
215
- # Generate/display analysis
216
- if st.button(f"Generate Analysis for {test.get('title')}"):
217
- with st.spinner("Analyzing responses..."):
218
- analysis = evaluate_subjective_answers(
219
- str(test['_id']),
220
- student_id,
221
- course_id
222
- )
223
-
224
- if analysis:
225
- st.markdown("### Analysis")
226
- st.markdown(analysis['content_analysis'])
227
- st.metric("Score", f"{analysis['correctness_score']}/10")
228
- else:
229
- st.error("Submission data not found for this student")
230
-
231
- except Exception as e:
232
- st.error("An error occurred while loading the tests")
233
- with st.expander("Error Details"):
234
- st.write(f"Error: {str(e)}")
235
- st.write(f"Session ID: {session_id}")
236
- st.write(f"Student ID: {student_id}")
237
- st.write(f"Course ID: {course_id}")
238
-
239
- def check_test_submission(session_id, student_id, course_id):
240
- """Utility function to check test submission status"""
241
- try:
242
- query = {
243
- "session_id": str(session_id),
244
- "course_id": str(course_id),
245
- "submissions.student_id": str(student_id)
246
  }
247
 
248
- test = subjective_tests_collection.find_one(query)
249
- return bool(test)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  except Exception as e:
251
- print(f"Error checking submission: {e}")
252
- return False
 
1
+ import streamlit as st
 
2
  from datetime import datetime
3
+ from pymongo import MongoClient
4
  import os
5
+ from openai import OpenAI
6
  from dotenv import load_dotenv
 
 
7
  from bson import ObjectId
8
 
9
  load_dotenv()
 
 
10
 
11
+ # MongoDB setup
12
+ MONGO_URI = os.getenv('MONGO_URI')
13
  client = MongoClient(MONGO_URI)
14
+ db = client["novascholar_db"]
15
+ subjective_tests_collection = db["subjective_tests"]
16
+ subjective_test_evaluation_collection = db["subjective_test_evaluation"]
17
+ resources_collection = db["resources"]
18
+ students_collection = db["students"]
19
+
20
+ def evaluate_subjective_answers(session_id, student_id, test_id):
21
+ """
22
+ Generate evaluation and analysis for subjective test answers
23
+ """
24
  try:
25
+ # Fetch test and student submission
26
+ test = subjective_tests_collection.find_one({"_id": test_id})
27
+ if not test:
28
+ return None
 
 
 
 
 
 
 
29
 
30
+ # Find student's submission
31
  submission = next(
32
+ (sub for sub in test.get('submissions', [])
33
+ if sub['student_id'] == str(student_id)),
34
  None
35
  )
 
36
  if not submission:
37
+ return None
38
+
39
+ # Fetch pre-class materials
40
+ pre_class_materials = resources_collection.find({"session_id": session_id})
41
+ pre_class_content = ""
42
+ for material in pre_class_materials:
43
+ if 'text_content' in material:
44
+ pre_class_content += material['text_content'] + "\n"
45
+
46
+ # Default rubric (can be customized later)
47
+ default_rubric = """
48
+ 1. Content Understanding (1-4):
49
+ - Demonstrates comprehensive understanding of core concepts
50
+ - Accurately applies relevant theories and principles
51
+ - Provides specific examples and evidence
52
+
53
+ 2. Critical Analysis (1-4):
54
+ - Shows depth of analysis
55
+ - Makes meaningful connections
56
+ - Demonstrates original thinking
57
+
58
+ 3. Organization & Clarity (1-4):
59
+ - Clear structure and flow
60
+ - Well-developed arguments
61
+ - Effective use of examples
62
+ """
63
+
64
+ # Initialize OpenAI client
65
+ client = OpenAI(api_key=os.getenv('OPENAI_KEY'))
66
+
67
+ evaluations = []
68
+ for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
69
+ analysis_content = f"""
70
+ Question: {question['question']}
71
+ Student Answer: {answer}
72
+ """
 
 
 
 
 
 
 
 
 
73
 
74
  prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
75
+
76
+ 1. Evaluation Process:
77
+ - Use each rubric criterion (scored 1-4) for internal assessment
78
+ - Compare response with pre-class materials
79
+ - Check alignment with all rubric requirements
80
+ - Calculate final score: sum of criteria scores converted to 10-point scale
81
 
82
+ Pre-class Materials:
83
+ {pre_class_content[:1000]} # Truncate to avoid token limits
 
 
 
84
 
85
+ Rubric Criteria:
86
+ {default_rubric}
87
 
88
+ Question and Answer:
89
+ {analysis_content}
90
 
91
+ Provide your assessment in the following format:
 
92
 
93
+ **Score and Evidence**
94
+ - Score: [X]/10
95
+ - Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
96
 
97
+ **Key Areas for Improvement**
98
+ - [Concise improvement point 1]
99
+ - [Concise improvement point 2]
100
+ - [Concise improvement point 3]
 
 
 
101
  """
102
 
103
+ # Generate evaluation using OpenAI
104
+ response = client.chat.completions.create(
105
+ model="gpt-4o-mini",
106
+ messages=[{"role": "user", "content": prompt_template}],
107
  max_tokens=500,
108
+ temperature=0.4
109
  )
110
 
111
+ evaluations.append({
112
+ "question_number": i + 1,
113
+ "question": question['question'],
114
+ "answer": answer,
115
+ "evaluation": response.choices[0].message.content
116
+ })
117
+
118
+ # Store evaluation in MongoDB
119
+ evaluation_doc = {
120
+ "test_id": test_id,
121
+ "student_id": student_id,
122
+ "session_id": session_id,
123
+ "evaluations": evaluations,
124
+ "evaluated_at": datetime.utcnow()
 
 
 
 
 
125
  }
126
+
127
+ subjective_test_evaluation_collection.insert_one(evaluation_doc)
128
+ return evaluation_doc
129
 
130
  except Exception as e:
131
+ print(f"Error in evaluate_subjective_answers: {str(e)}")
132
+ return None
 
 
 
133
 
134
  def display_evaluation_to_faculty(session_id, student_id, course_id):
135
+ """
136
+ Display interface for faculty to generate and view evaluations
137
+ """
138
+ st.header("Evaluate Subjective Tests")
139
+
140
  try:
141
+ # Fetch available tests
142
+ tests = list(subjective_tests_collection.find({
143
+ "session_id": str(session_id),
144
+ "status": "active"
145
+ }))
146
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  if not tests:
148
+ st.info("No subjective tests found for this session.")
 
 
 
 
 
 
 
 
 
 
 
149
  return
150
 
151
+ # Select test
152
+ test_options = {
153
+ f"{test['title']} (Created: {test['created_at'].strftime('%Y-%m-%d %H:%M')})" if 'created_at' in test else test['title']: test['_id']
154
+ for test in tests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  }
156
 
157
+ if test_options:
158
+ selected_test = st.selectbox(
159
+ "Select Test to Evaluate",
160
+ options=list(test_options.keys())
161
+ )
162
+
163
+ if selected_test:
164
+ test_id = test_options[selected_test]
165
+ test = subjective_tests_collection.find_one({"_id": test_id})
166
+
167
+ if test:
168
+ submissions = test.get('submissions', [])
169
+ if not submissions:
170
+ st.warning("No submissions found for this test.")
171
+ return
172
+
173
+ # Create a dropdown for student submissions
174
+ student_options = {
175
+ f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub['student_id']
176
+ for sub in submissions
177
+ }
178
+
179
+ selected_student = st.selectbox(
180
+ "Select Student Submission",
181
+ options=list(student_options.keys())
182
+ )
183
+
184
+ if selected_student:
185
+ student_id = student_options[selected_student]
186
+ submission = next(sub for sub in submissions if sub['student_id'] == student_id)
187
+
188
+ st.markdown(f"**Submission Date:** {submission.get('submitted_at', 'No submission date')}")
189
+ st.markdown("---")
190
+
191
+ # Display questions and answers
192
+ st.subheader("Submission Details")
193
+ for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
194
+ st.markdown(f"**Question {i+1}:** {question['question']}")
195
+ st.markdown(f"**Answer:** {answer}")
196
+ st.markdown("---")
197
+
198
+ # Check for existing evaluation
199
+ existing_eval = subjective_test_evaluation_collection.find_one({
200
+ "test_id": test_id,
201
+ "student_id": student_id,
202
+ "session_id": str(session_id)
203
+ })
204
+
205
+ if existing_eval:
206
+ st.subheader("Evaluation Results")
207
+ for eval_item in existing_eval['evaluations']:
208
+ st.markdown(f"### Evaluation for Question {eval_item['question_number']}")
209
+ st.markdown(eval_item['evaluation'])
210
+ st.markdown("---")
211
+
212
+ st.success("✓ Evaluation completed")
213
+ if st.button("Regenerate Evaluation", key=f"regenerate_{student_id}_{test_id}"):
214
+ with st.spinner("Regenerating evaluation..."):
215
+ evaluation = evaluate_subjective_answers(
216
+ str(session_id),
217
+ student_id,
218
+ test_id
219
+ )
220
+ if evaluation:
221
+ st.success("Evaluation regenerated successfully!")
222
+ st.rerun()
223
+ else:
224
+ st.error("Error regenerating evaluation.")
225
+ else:
226
+ st.subheader("Generate Evaluation")
227
+ if st.button("Generate Evaluation", key=f"evaluate_{student_id}_{test_id}"):
228
+ with st.spinner("Generating evaluation..."):
229
+ evaluation = evaluate_subjective_answers(
230
+ str(session_id),
231
+ student_id,
232
+ test_id
233
+ )
234
+ if evaluation:
235
+ st.success("Evaluation generated successfully!")
236
+ st.markdown("### Generated Evaluation")
237
+ for eval_item in evaluation['evaluations']:
238
+ st.markdown(f"#### Question {eval_item['question_number']}")
239
+ st.markdown(eval_item['evaluation'])
240
+ st.markdown("---")
241
+ st.rerun()
242
+ else:
243
+ st.error("Error generating evaluation.")
244
+
245
  except Exception as e:
246
+ st.error(f"An error occurred while loading the evaluations: {str(e)}")
247
+ print(f"Error in display_evaluation_to_faculty: {str(e)}")