omkar-surve126 commited on
Commit
d45297d
·
verified ·
1 Parent(s): 6cdfd32

Upload 2 files

Browse files
Files changed (2) hide show
  1. rubrics.py +112 -0
  2. subjective_test_evaluation.py +252 -0
rubrics.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from pymongo import MongoClient
3
+ from openai import OpenAI
4
+ from bson import ObjectId
5
+ import json
6
+ from dotenv import load_dotenv
7
+ import os
8
+
9
+ load_dotenv()
10
+ MONGO_URI = os.getenv('MONGO_URI')
11
+ OPENAI_API_KEY = os.getenv('OPENAI_KEY')
12
+
13
+ client = MongoClient(MONGO_URI)
14
+ db = client['novascholar_db']
15
+ # db.create_collection("rubrics")
16
+ rubrics_collection = db['rubrics']
17
+ resources_collection = db['resources']
18
+ courses_collection = db['courses']
19
+
20
+ def generate_rubrics(api_key, session_title, outcome_description, taxonomy, pre_class_material):
21
+ prompt = f"""
22
+ You are an expert educational AI assistant specializing in instructional design. Generate a detailed rubric for the session titled "{session_title}". The rubric should be aligned with Bloom's Taxonomy level "{taxonomy}" and use numerical scoring levels (4,3,2,1) instead of descriptive levels. Use the following context:
23
+
24
+ Session Outcome Description:
25
+ {outcome_description}
26
+
27
+ Pre-class Material:
28
+ {pre_class_material}
29
+
30
+ Please generate the rubric in JSON format with these specifications:
31
+ 1. Use numerical levels (4=Highest, 1=Lowest) instead of descriptive levels
32
+ 2. Include 4-5 relevant criteria based on the session outcome
33
+ 3. Each criterion should have clear descriptors for each numerical level
34
+ 4. Focus on objectively measurable aspects for evaluation
35
+ 5. Structure should be suitable for evaluating assignments and test answers
36
+
37
+ ***IMPORTANT: DO NOT INCLUDE THE WORD JSON IN THE OUTPUT STRING, DO NOT INCLUDE BACKTICKS (```) IN THE OUTPUT, AND DO NOT INCLUDE ANY OTHER TEXT, OTHER THAN THE ACTUAL JSON RESPONSE. START THE RESPONSE STRING WITH AN OPEN CURLY BRACE {{ AND END WITH A CLOSING CURLY BRACE }}.***
38
+ """
39
+
40
+ messages = [
41
+ {
42
+ "role": "system",
43
+ "content": "You are an expert educational AI assistant specializing in instructional design.",
44
+ },
45
+ {
46
+ "role": "user",
47
+ "content": prompt
48
+ },
49
+ ]
50
+
51
+ try:
52
+ client = OpenAI(api_key=api_key)
53
+ response = client.chat.completions.create(
54
+ model="gpt-4-0125-preview",
55
+ messages=messages
56
+ )
57
+ return response.choices[0].message.content
58
+ except Exception as e:
59
+ st.error(f"Failed to generate rubrics: {e}")
60
+ return None
61
+
62
+ def display_rubrics_tab(session, course_id):
63
+ st.subheader("Generated Rubrics")
64
+
65
+ # Fetch session details from the courses collection
66
+ course_data = courses_collection.find_one(
67
+ {"course_id": course_id, "sessions.session_id": session['session_id']},
68
+ {"sessions.$": 1}
69
+ )
70
+
71
+ if course_data and 'sessions' in course_data and len(course_data['sessions']) > 0:
72
+ session_data = course_data['sessions'][0]
73
+
74
+ # Extract session learning outcomes
75
+ if 'session_learning_outcomes' in session_data and len(session_data['session_learning_outcomes']) > 0:
76
+ outcome = session_data['session_learning_outcomes'][0]
77
+ outcome_description = outcome.get('outcome_description', '')
78
+ taxonomy_level = outcome.get('bloom_taxonomy_level', '')
79
+
80
+ # Display fetched information
81
+ st.markdown("### Session Information")
82
+ st.markdown(f"**Session Title:** {session['title']}")
83
+ st.markdown(f"**Learning Outcome:** {outcome_description}")
84
+ st.markdown(f"**Taxonomy Level:** {taxonomy_level}")
85
+
86
+ # Fetch pre-class material
87
+ pre_class_material_docs = resources_collection.find({"session_id": session['session_id']})
88
+ pre_class_material = "\n".join([f"{doc.get('title', 'No Title')}: {doc.get('url', 'No URL')}" for doc in pre_class_material_docs])
89
+
90
+ if st.button("Generate Rubric"):
91
+ rubric = generate_rubrics(
92
+ OPENAI_API_KEY,
93
+ session['title'],
94
+ outcome_description,
95
+ taxonomy_level,
96
+ pre_class_material
97
+ )
98
+
99
+ if rubric:
100
+ st.json(rubric)
101
+ if st.button("Save Rubric"):
102
+ rubric_data = {
103
+ "course_id": course_id,
104
+ "session_id": session['session_id'],
105
+ "rubric": json.loads(rubric)
106
+ }
107
+ rubrics_collection.insert_one(rubric_data)
108
+ st.success("Rubric saved successfully!")
109
+ else:
110
+ st.error("No learning outcomes found for this session")
111
+ else:
112
+ st.error("Session data not found")
subjective_test_evaluation.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ from pymongo import MongoClient
3
+ from datetime import datetime
4
+ import os
5
+ from dotenv import load_dotenv
6
+ import re
7
+ import streamlit as st
8
+ from bson import ObjectId
9
+
10
+ load_dotenv()
11
+ MONGO_URI = os.getenv('MONGO_URI')
12
+ OPENAI_API_KEY = os.getenv('OPENAI_KEY')
13
+
14
+ client = MongoClient(MONGO_URI)
15
+ db = client['novascholar_db']
16
+ rubrics_collection = db['rubrics']
17
+ resources_collection = db['resources']
18
+ subjective_tests_collection = db['subjective_tests']
19
+ subjective_test_analysis_collection = db['subjective_test_analysis']
20
+
21
+ openai.api_key = OPENAI_API_KEY
22
+
23
+ def evaluate_subjective_answers(test_id, student_id, course_id):
24
+ """Evaluate subjective test answers using OpenAI."""
25
+ try:
26
+ # Get test and submission details
27
+ test_doc = subjective_tests_collection.find_one({
28
+ "_id": ObjectId(test_id),
29
+ "course_id": course_id
30
+ })
31
+ if not test_doc:
32
+ return {
33
+ "content_analysis": "Error: Test not found",
34
+ "analyzed_at": datetime.utcnow(),
35
+ "correctness_score": 0
36
+ }
37
+
38
+ submission = next(
39
+ (sub for sub in test_doc.get('submissions', []) if sub['student_id'] == student_id),
40
+ None
41
+ )
42
+
43
+ if not submission:
44
+ return {
45
+ "content_analysis": "Error: Submission not found",
46
+ "analyzed_at": datetime.utcnow(),
47
+ "correctness_score": 0
48
+ }
49
+
50
+ # Rest of the evaluation logic remains the same
51
+ questions = test_doc.get('questions', [])
52
+ student_answers = submission.get('answers', [])
53
+
54
+ if not questions or not student_answers:
55
+ return {
56
+ "content_analysis": "Error: No questions or answers found",
57
+ "analyzed_at": datetime.utcnow(),
58
+ "correctness_score": 0
59
+ }
60
+
61
+ # Retrieve rubrics for the session
62
+ rubric_doc = rubrics_collection.find_one({
63
+ "session_id": test_doc['session_id'],
64
+ "course_id": course_id
65
+ })
66
+
67
+ if not rubric_doc:
68
+ return {
69
+ "content_analysis": "Error: Rubric not found",
70
+ "analyzed_at": datetime.utcnow(),
71
+ "correctness_score": 0
72
+ }
73
+
74
+ rubric = rubric_doc.get('rubric', {})
75
+
76
+ # Retrieve pre-class materials
77
+ pre_class_materials = resources_collection.find({
78
+ "session_id": test_doc['session_id'],
79
+ "course_id": course_id
80
+ })
81
+ pre_class_content = "\n".join([material.get('text_content', '') for material in pre_class_materials])
82
+
83
+ # Analyze each question
84
+ all_analyses = []
85
+ total_score = 0
86
+
87
+ for i, (question, answer) in enumerate(zip(questions, student_answers), 1):
88
+ analysis_content = f"Question {i}: {question['question']}\nAnswer: {answer}\n\nRubric: {rubric}\n\nPre-class Materials: {pre_class_content}\n\n"
89
+
90
+ prompt_template = f"""As an educational assessor, evaluate this student's answer based on the provided rubric criteria and pre-class materials. Follow these assessment guidelines:
91
+
92
+ 1. Evaluation Process:
93
+ - Use each rubric criterion (scored 1-4) for internal assessment
94
+ - Compare response with pre-class materials
95
+ - Check alignment with all rubric requirements
96
+ - Calculate final score: sum of criteria scores converted to 10-point scale
97
+
98
+ Pre-class Materials:
99
+ {pre_class_content}
100
+
101
+ Rubric Criteria:
102
+ {rubric}
103
+
104
+ Question and Answer:
105
+ {analysis_content}
106
+
107
+ Provide your assessment in the following format:
108
+
109
+ **Score and Evidence**
110
+ - Score: [X]/10
111
+ - Evidence for deduction: [One-line reference to most significant gap or inaccuracy]
112
+ **Key Areas for Improvement**
113
+ - [Concise improvement point 1]
114
+ - [Concise improvement point 2]
115
+ - [Concise improvement point 3]
116
+ """
117
+
118
+ response = openai.Completion.create(
119
+ model="text-davinci-003",
120
+ prompt=prompt_template,
121
+ max_tokens=500,
122
+ temperature=0.7
123
+ )
124
+
125
+ individual_analysis = response.choices[0].text.strip()
126
+
127
+ try:
128
+ score_match = re.search(r'Score: (\d+)', individual_analysis)
129
+ question_score = int(score_match.group(1)) if score_match else 0
130
+ total_score += question_score
131
+ except:
132
+ question_score = 0
133
+
134
+ formatted_analysis = f"\n\n## Question {i} Analysis\n\n{individual_analysis}"
135
+ all_analyses.append(formatted_analysis)
136
+
137
+ average_score = round(total_score / len(questions)) if questions else 0
138
+ combined_analysis = "\n".join(all_analyses)
139
+
140
+ return {
141
+ "content_analysis": combined_analysis,
142
+ "analyzed_at": datetime.utcnow(),
143
+ "correctness_score": average_score
144
+ }
145
+
146
+ except Exception as e:
147
+ return {
148
+ "content_analysis": f"Error evaluating answers: {str(e)}",
149
+ "analyzed_at": datetime.utcnow(),
150
+ "correctness_score": 0
151
+ }
152
+
153
+ def display_evaluation_to_faculty(session_id, student_id, course_id):
154
+ """Display submitted tests with improved error handling and debugging"""
155
+ st.subheader("Evaluate Subjective Tests")
156
+
157
+ try:
158
+ # Convert all IDs to strings for consistent comparison
159
+ session_id = str(session_id)
160
+ student_id = str(student_id)
161
+ course_id = str(course_id)
162
+
163
+ print(f"Searching for tests with session_id: {session_id}, student_id: {student_id}, course_id: {course_id}")
164
+
165
+ # Query for tests
166
+ query = {
167
+ "session_id": session_id,
168
+ "course_id": course_id,
169
+ "submissions": {
170
+ "$elemMatch": {
171
+ "student_id": student_id
172
+ }
173
+ }
174
+ }
175
+
176
+ # Log the query for debugging
177
+ print(f"MongoDB Query: {query}")
178
+
179
+ # Fetch tests
180
+ tests = list(subjective_tests_collection.find(query))
181
+ print(f"Found {len(tests)} tests matching query")
182
+
183
+ if not tests:
184
+ # Check if any tests exist for this session
185
+ all_session_tests = list(subjective_tests_collection.find({
186
+ "session_id": session_id,
187
+ "course_id": course_id
188
+ }))
189
+
190
+ if all_session_tests:
191
+ print(f"Found {len(all_session_tests)} tests for this session, but no submissions from student {student_id}")
192
+ st.warning("No submitted tests found for this student, but tests exist for this session.")
193
+ else:
194
+ print("No tests found for this session at all")
195
+ st.info("No tests have been created for this session yet.")
196
+ return
197
+
198
+ # Display tests and handle evaluation
199
+ for test in tests:
200
+ with st.expander(f"Test: {test.get('title', 'Untitled Test')}", expanded=True):
201
+ # Find student submission
202
+ submission = next(
203
+ (sub for sub in test.get('submissions', [])
204
+ if sub['student_id'] == student_id),
205
+ None
206
+ )
207
+
208
+ if submission:
209
+ st.write("### Student's Answers")
210
+ for i, (question, answer) in enumerate(zip(test['questions'], submission['answers'])):
211
+ st.markdown(f"**Q{i+1}:** {question['question']}")
212
+ st.markdown(f"**A{i+1}:** {answer}")
213
+ st.markdown("---")
214
+
215
+ # Generate/display analysis
216
+ if st.button(f"Generate Analysis for {test.get('title')}"):
217
+ with st.spinner("Analyzing responses..."):
218
+ analysis = evaluate_subjective_answers(
219
+ str(test['_id']),
220
+ student_id,
221
+ course_id
222
+ )
223
+
224
+ if analysis:
225
+ st.markdown("### Analysis")
226
+ st.markdown(analysis['content_analysis'])
227
+ st.metric("Score", f"{analysis['correctness_score']}/10")
228
+ else:
229
+ st.error("Submission data not found for this student")
230
+
231
+ except Exception as e:
232
+ st.error("An error occurred while loading the tests")
233
+ with st.expander("Error Details"):
234
+ st.write(f"Error: {str(e)}")
235
+ st.write(f"Session ID: {session_id}")
236
+ st.write(f"Student ID: {student_id}")
237
+ st.write(f"Course ID: {course_id}")
238
+
239
+ def check_test_submission(session_id, student_id, course_id):
240
+ """Utility function to check test submission status"""
241
+ try:
242
+ query = {
243
+ "session_id": str(session_id),
244
+ "course_id": str(course_id),
245
+ "submissions.student_id": str(student_id)
246
+ }
247
+
248
+ test = subjective_tests_collection.find_one(query)
249
+ return bool(test)
250
+ except Exception as e:
251
+ print(f"Error checking submission: {e}")
252
+ return False