File size: 9,802 Bytes
d45297d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
import openai
from pymongo import MongoClient
from datetime import datetime
import os
from dotenv import load_dotenv
import re
import streamlit as st
from bson import ObjectId

load_dotenv()
MONGO_URI = os.getenv('MONGO_URI')
OPENAI_API_KEY = os.getenv('OPENAI_KEY')

client = MongoClient(MONGO_URI)
db = client['novascholar_db']
rubrics_collection = db['rubrics']
resources_collection = db['resources']
subjective_tests_collection = db['subjective_tests']
subjective_test_analysis_collection = db['subjective_test_analysis']

openai.api_key = OPENAI_API_KEY

def evaluate_subjective_answers(test_id, student_id, course_id):
    """Evaluate subjective test answers using OpenAI."""
    try:
        # Get test and submission details
        test_doc = subjective_tests_collection.find_one({
            "_id": ObjectId(test_id),
            "course_id": course_id
        })
        if not test_doc:
            return {
                "content_analysis": "Error: Test not found",
                "analyzed_at": datetime.utcnow(),
                "correctness_score": 0
            }

        submission = next(
            (sub for sub in test_doc.get('submissions', []) if sub['student_id'] == student_id),
            None
        )

        if not submission:
            return {
                "content_analysis": "Error: Submission not found",
                "analyzed_at": datetime.utcnow(),
                "correctness_score": 0
            }

        # Rest of the evaluation logic remains the same
        questions = test_doc.get('questions', [])
        student_answers = submission.get('answers', [])

        if not questions or not student_answers:
            return {
                "content_analysis": "Error: No questions or answers found",
                "analyzed_at": datetime.utcnow(),
                "correctness_score": 0
            }

        # Retrieve rubrics for the session
        rubric_doc = rubrics_collection.find_one({
            "session_id": test_doc['session_id'],
            "course_id": course_id
        })
        
        if not rubric_doc:
            return {
                "content_analysis": "Error: Rubric not found",
                "analyzed_at": datetime.utcnow(),
                "correctness_score": 0
            }

        rubric = rubric_doc.get('rubric', {})

        # Retrieve pre-class materials
        pre_class_materials = resources_collection.find({
            "session_id": test_doc['session_id'],
            "course_id": course_id
        })
        pre_class_content = "\n".join([material.get('text_content', '') for material in pre_class_materials])

        # Analyze each question
        all_analyses = []
        total_score = 0

        for i, (question, answer) in enumerate(zip(questions, student_answers), 1):
            analysis_content = f"Question {i}: {question['question']}\nAnswer: {answer}\n\nRubric: {rubric}\n\nPre-class Materials: {pre_class_content}\n\n"

            prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:



                1. Evaluation Process:

                - Use each rubric criterion (scored 1-4) for internal assessment

                - Compare response with pre-class materials

                - Check alignment with all rubric requirements

                - Calculate final score: sum of criteria scores converted to 10-point scale



                Pre-class Materials:

                {pre_class_content}



                Rubric Criteria:

                {rubric}



                Question and Answer:

                {analysis_content}



                Provide your assessment in the following format:



                **Score and Evidence**

                - Score: [X]/10

                - Evidence for deduction: [One-line reference to most significant gap or inaccuracy]

                **Key Areas for Improvement**

                - [Concise improvement point 1]

                - [Concise improvement point 2]

                - [Concise improvement point 3]

            """

            response = openai.Completion.create(
                model="text-davinci-003",
                prompt=prompt_template,
                max_tokens=500,
                temperature=0.7
            )

            individual_analysis = response.choices[0].text.strip()

            try:
                score_match = re.search(r'Score: (\d+)', individual_analysis)
                question_score = int(score_match.group(1)) if score_match else 0
                total_score += question_score
            except:
                question_score = 0

            formatted_analysis = f"\n\n## Question {i} Analysis\n\n{individual_analysis}"
            all_analyses.append(formatted_analysis)

        average_score = round(total_score / len(questions)) if questions else 0
        combined_analysis = "\n".join(all_analyses)

        return {
            "content_analysis": combined_analysis,
            "analyzed_at": datetime.utcnow(),
            "correctness_score": average_score
        }

    except Exception as e:
        return {
            "content_analysis": f"Error evaluating answers: {str(e)}",
            "analyzed_at": datetime.utcnow(),
            "correctness_score": 0
        }

def display_evaluation_to_faculty(session_id, student_id, course_id):
    """Display submitted tests with improved error handling and debugging"""
    st.subheader("Evaluate Subjective Tests")
    
    try:
        # Convert all IDs to strings for consistent comparison
        session_id = str(session_id)
        student_id = str(student_id)
        course_id = str(course_id)
        
        print(f"Searching for tests with session_id: {session_id}, student_id: {student_id}, course_id: {course_id}")
        
        # Query for tests
        query = {
            "session_id": session_id,
            "course_id": course_id,
            "submissions": {
                "$elemMatch": {
                    "student_id": student_id
                }
            }
        }
        
        # Log the query for debugging
        print(f"MongoDB Query: {query}")
        
        # Fetch tests
        tests = list(subjective_tests_collection.find(query))
        print(f"Found {len(tests)} tests matching query")
        
        if not tests:
            # Check if any tests exist for this session
            all_session_tests = list(subjective_tests_collection.find({
                "session_id": session_id,
                "course_id": course_id
            }))
            
            if all_session_tests:
                print(f"Found {len(all_session_tests)} tests for this session, but no submissions from student {student_id}")
                st.warning("No submitted tests found for this student, but tests exist for this session.")
            else:
                print("No tests found for this session at all")
                st.info("No tests have been created for this session yet.")
            return

        # Display tests and handle evaluation
        for test in tests:
            with st.expander(f"Test: {test.get('title', 'Untitled Test')}", expanded=True):
                # Find student submission
                submission = next(
                    (sub for sub in test.get('submissions', []) 
                     if sub['student_id'] == student_id),
                    None
                )
                
                if submission:
                    st.write("### Student's Answers")
                    for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
                        st.markdown(f"**Q{i+1}:** {question['question']}")
                        st.markdown(f"**A{i+1}:** {answer}")
                        st.markdown("---")
                    
                    # Generate/display analysis
                    if st.button(f"Generate Analysis for {test.get('title')}"):
                        with st.spinner("Analyzing responses..."):
                            analysis = evaluate_subjective_answers(
                                str(test['_id']),
                                student_id,
                                course_id
                            )
                            
                            if analysis:
                                st.markdown("### Analysis")
                                st.markdown(analysis['content_analysis'])
                                st.metric("Score", f"{analysis['correctness_score']}/10")
                else:
                    st.error("Submission data not found for this student")
                    
    except Exception as e:
        st.error("An error occurred while loading the tests")
        with st.expander("Error Details"):
            st.write(f"Error: {str(e)}")
            st.write(f"Session ID: {session_id}")
            st.write(f"Student ID: {student_id}")
            st.write(f"Course ID: {course_id}")

def check_test_submission(session_id, student_id, course_id):
    """Utility function to check test submission status"""
    try:
        query = {
            "session_id": str(session_id),
            "course_id": str(course_id),
            "submissions.student_id": str(student_id)
        }
        
        test = subjective_tests_collection.find_one(query)
        return bool(test)
    except Exception as e:
        print(f"Error checking submission: {e}")
        return False