|
import openai
|
|
from pymongo import MongoClient
|
|
from datetime import datetime
|
|
import os
|
|
from dotenv import load_dotenv
|
|
import re
|
|
import streamlit as st
|
|
from bson import ObjectId
|
|
|
|
load_dotenv()
|
|
MONGO_URI = os.getenv('MONGO_URI')
|
|
OPENAI_API_KEY = os.getenv('OPENAI_KEY')
|
|
|
|
client = MongoClient(MONGO_URI)
|
|
db = client['novascholar_db']
|
|
rubrics_collection = db['rubrics']
|
|
resources_collection = db['resources']
|
|
subjective_tests_collection = db['subjective_tests']
|
|
subjective_test_analysis_collection = db['subjective_test_analysis']
|
|
|
|
openai.api_key = OPENAI_API_KEY
|
|
|
|
def evaluate_subjective_answers(test_id, student_id, course_id):
|
|
"""Evaluate subjective test answers using OpenAI."""
|
|
try:
|
|
|
|
test_doc = subjective_tests_collection.find_one({
|
|
"_id": ObjectId(test_id),
|
|
"course_id": course_id
|
|
})
|
|
if not test_doc:
|
|
return {
|
|
"content_analysis": "Error: Test not found",
|
|
"analyzed_at": datetime.utcnow(),
|
|
"correctness_score": 0
|
|
}
|
|
|
|
submission = next(
|
|
(sub for sub in test_doc.get('submissions', []) if sub['student_id'] == student_id),
|
|
None
|
|
)
|
|
|
|
if not submission:
|
|
return {
|
|
"content_analysis": "Error: Submission not found",
|
|
"analyzed_at": datetime.utcnow(),
|
|
"correctness_score": 0
|
|
}
|
|
|
|
|
|
questions = test_doc.get('questions', [])
|
|
student_answers = submission.get('answers', [])
|
|
|
|
if not questions or not student_answers:
|
|
return {
|
|
"content_analysis": "Error: No questions or answers found",
|
|
"analyzed_at": datetime.utcnow(),
|
|
"correctness_score": 0
|
|
}
|
|
|
|
|
|
rubric_doc = rubrics_collection.find_one({
|
|
"session_id": test_doc['session_id'],
|
|
"course_id": course_id
|
|
})
|
|
|
|
if not rubric_doc:
|
|
return {
|
|
"content_analysis": "Error: Rubric not found",
|
|
"analyzed_at": datetime.utcnow(),
|
|
"correctness_score": 0
|
|
}
|
|
|
|
rubric = rubric_doc.get('rubric', {})
|
|
|
|
|
|
pre_class_materials = resources_collection.find({
|
|
"session_id": test_doc['session_id'],
|
|
"course_id": course_id
|
|
})
|
|
pre_class_content = "\n".join([material.get('text_content', '') for material in pre_class_materials])
|
|
|
|
|
|
all_analyses = []
|
|
total_score = 0
|
|
|
|
for i, (question, answer) in enumerate(zip(questions, student_answers), 1):
|
|
analysis_content = f"Question {i}: {question['question']}\nAnswer: {answer}\n\nRubric: {rubric}\n\nPre-class Materials: {pre_class_content}\n\n"
|
|
|
|
prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
|
|
|
|
1. Evaluation Process:
|
|
- Use each rubric criterion (scored 1-4) for internal assessment
|
|
- Compare response with pre-class materials
|
|
- Check alignment with all rubric requirements
|
|
- Calculate final score: sum of criteria scores converted to 10-point scale
|
|
|
|
Pre-class Materials:
|
|
{pre_class_content}
|
|
|
|
Rubric Criteria:
|
|
{rubric}
|
|
|
|
Question and Answer:
|
|
{analysis_content}
|
|
|
|
Provide your assessment in the following format:
|
|
|
|
**Score and Evidence**
|
|
- Score: [X]/10
|
|
- Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
|
|
**Key Areas for Improvement**
|
|
- [Concise improvement point 1]
|
|
- [Concise improvement point 2]
|
|
- [Concise improvement point 3]
|
|
"""
|
|
|
|
response = openai.Completion.create(
|
|
model="text-davinci-003",
|
|
prompt=prompt_template,
|
|
max_tokens=500,
|
|
temperature=0.7
|
|
)
|
|
|
|
individual_analysis = response.choices[0].text.strip()
|
|
|
|
try:
|
|
score_match = re.search(r'Score: (\d+)', individual_analysis)
|
|
question_score = int(score_match.group(1)) if score_match else 0
|
|
total_score += question_score
|
|
except:
|
|
question_score = 0
|
|
|
|
formatted_analysis = f"\n\n## Question {i} Analysis\n\n{individual_analysis}"
|
|
all_analyses.append(formatted_analysis)
|
|
|
|
average_score = round(total_score / len(questions)) if questions else 0
|
|
combined_analysis = "\n".join(all_analyses)
|
|
|
|
return {
|
|
"content_analysis": combined_analysis,
|
|
"analyzed_at": datetime.utcnow(),
|
|
"correctness_score": average_score
|
|
}
|
|
|
|
except Exception as e:
|
|
return {
|
|
"content_analysis": f"Error evaluating answers: {str(e)}",
|
|
"analyzed_at": datetime.utcnow(),
|
|
"correctness_score": 0
|
|
}
|
|
|
|
def display_evaluation_to_faculty(session_id, student_id, course_id):
|
|
"""Display submitted tests with improved error handling and debugging"""
|
|
st.subheader("Evaluate Subjective Tests")
|
|
|
|
try:
|
|
|
|
session_id = str(session_id)
|
|
student_id = str(student_id)
|
|
course_id = str(course_id)
|
|
|
|
print(f"Searching for tests with session_id: {session_id}, student_id: {student_id}, course_id: {course_id}")
|
|
|
|
|
|
query = {
|
|
"session_id": session_id,
|
|
"course_id": course_id,
|
|
"submissions": {
|
|
"$elemMatch": {
|
|
"student_id": student_id
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
print(f"MongoDB Query: {query}")
|
|
|
|
|
|
tests = list(subjective_tests_collection.find(query))
|
|
print(f"Found {len(tests)} tests matching query")
|
|
|
|
if not tests:
|
|
|
|
all_session_tests = list(subjective_tests_collection.find({
|
|
"session_id": session_id,
|
|
"course_id": course_id
|
|
}))
|
|
|
|
if all_session_tests:
|
|
print(f"Found {len(all_session_tests)} tests for this session, but no submissions from student {student_id}")
|
|
st.warning("No submitted tests found for this student, but tests exist for this session.")
|
|
else:
|
|
print("No tests found for this session at all")
|
|
st.info("No tests have been created for this session yet.")
|
|
return
|
|
|
|
|
|
for test in tests:
|
|
with st.expander(f"Test: {test.get('title', 'Untitled Test')}", expanded=True):
|
|
|
|
submission = next(
|
|
(sub for sub in test.get('submissions', [])
|
|
if sub['student_id'] == student_id),
|
|
None
|
|
)
|
|
|
|
if submission:
|
|
st.write("### Student's Answers")
|
|
for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
|
|
st.markdown(f"**Q{i+1}:** {question['question']}")
|
|
st.markdown(f"**A{i+1}:** {answer}")
|
|
st.markdown("---")
|
|
|
|
|
|
if st.button(f"Generate Analysis for {test.get('title')}"):
|
|
with st.spinner("Analyzing responses..."):
|
|
analysis = evaluate_subjective_answers(
|
|
str(test['_id']),
|
|
student_id,
|
|
course_id
|
|
)
|
|
|
|
if analysis:
|
|
st.markdown("### Analysis")
|
|
st.markdown(analysis['content_analysis'])
|
|
st.metric("Score", f"{analysis['correctness_score']}/10")
|
|
else:
|
|
st.error("Submission data not found for this student")
|
|
|
|
except Exception as e:
|
|
st.error("An error occurred while loading the tests")
|
|
with st.expander("Error Details"):
|
|
st.write(f"Error: {str(e)}")
|
|
st.write(f"Session ID: {session_id}")
|
|
st.write(f"Student ID: {student_id}")
|
|
st.write(f"Course ID: {course_id}")
|
|
|
|
def check_test_submission(session_id, student_id, course_id):
|
|
"""Utility function to check test submission status"""
|
|
try:
|
|
query = {
|
|
"session_id": str(session_id),
|
|
"course_id": str(course_id),
|
|
"submissions.student_id": str(student_id)
|
|
}
|
|
|
|
test = subjective_tests_collection.find_one(query)
|
|
return bool(test)
|
|
except Exception as e:
|
|
print(f"Error checking submission: {e}")
|
|
return False |