YashJD commited on
Commit
cea9944
·
1 Parent(s): afd5874

Omkar update 1

Browse files
Files changed (1) hide show
  1. assignment_evaluation.py +261 -0
assignment_evaluation.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # assignment_evaluation.py
2
+
3
+ import streamlit as st
4
+ from datetime import datetime
5
+ from pymongo import MongoClient
6
+ import os
7
+ from openai import OpenAI
8
+ from dotenv import load_dotenv
9
+ from bson import ObjectId
10
+
11
+ load_dotenv()
12
+
13
+ # MongoDB setup
14
+ MONGO_URI = os.getenv('MONGO_URI')
15
+ client = MongoClient(MONGO_URI)
16
+ db = client["novascholar_db"]
17
+ assignments_collection = db["assignments"]
18
+ assignment_evaluation_collection = db["assignment_evaluation"]
19
+ resources_collection = db["resources"]
20
+ students_collection = db["students"]
21
+
22
+ def evaluate_assignment(session_id, student_id, assignment_id):
23
+ """
24
+ Generate evaluation and analysis for submitted assignments
25
+ """
26
+ try:
27
+ # Fetch assignment and student submission
28
+ assignment = assignments_collection.find_one({"_id": assignment_id})
29
+ if not assignment:
30
+ return None
31
+
32
+ # Find student's submission
33
+ submission = next(
34
+ (sub for sub in assignment.get('submissions', [])
35
+ if sub['student_id'] == str(student_id)),
36
+ None
37
+ )
38
+ if not submission:
39
+ return None
40
+
41
+ # Default rubric for assignment evaluation
42
+ default_rubric = """
43
+ 1. Understanding & Implementation (1-4):
44
+ - Demonstrates understanding of assignment requirements
45
+ - Implements required components correctly
46
+ - Shows attention to detail
47
+
48
+ 2. Quality & Completeness (1-4):
49
+ - Work is complete and thorough
50
+ - Meets all assignment objectives
51
+ - Shows evidence of effort and care
52
+
53
+ 3. Presentation & Organization (1-4):
54
+ - Clear and professional presentation
55
+ - Well-structured and organized
56
+ - Follows required format and guidelines
57
+ """
58
+
59
+ # Initialize OpenAI client
60
+ client = OpenAI(api_key=os.getenv('OPENAI_KEY'))
61
+
62
+ # Create evaluation prompt
63
+ prompt_template = f"""As an assignment evaluator, assess this student's submission based on the provided rubric criteria. Follow these guidelines:
64
+
65
+ 1. Evaluation Process:
66
+ - Use each rubric criterion (scored 1-4)
67
+ - Evaluate completeness and quality
68
+ - Check alignment with assignment requirements
69
+ - Calculate final score: sum of criteria scores converted to 10-point scale
70
+
71
+ Assignment Title: {assignment['title']}
72
+ Due Date: {assignment['due_date']}
73
+
74
+ Submission Content:
75
+ {submission.get('text_content', 'No text content available')}
76
+
77
+ Rubric Criteria:
78
+ {default_rubric}
79
+
80
+ Provide your assessment in the following format:
81
+
82
+ **Overall Score and Summary**
83
+ - Score: [X]/10
84
+ - Overall Assessment: [2-3 sentence summary]
85
+
86
+ **Strengths**
87
+ - [Key strength 1]
88
+ - [Key strength 2]
89
+ - [Key strength 3]
90
+
91
+ **Areas for Improvement**
92
+ - [Improvement point 1]
93
+ - [Improvement point 2]
94
+ - [Improvement point 3]
95
+
96
+ **Specific Recommendations**
97
+ [2-3 sentences with actionable feedback]
98
+ """
99
+
100
+ # Generate evaluation using OpenAI
101
+ response = client.chat.completions.create(
102
+ model="gpt-4o-mini",
103
+ messages=[{"role": "user", "content": prompt_template}],
104
+ max_tokens=1000,
105
+ temperature=0.4
106
+ )
107
+
108
+ # Store evaluation in MongoDB
109
+ evaluation_doc = {
110
+ "assignment_id": assignment_id,
111
+ "student_id": student_id,
112
+ "session_id": session_id,
113
+ "evaluation": response.choices[0].message.content,
114
+ "evaluated_at": datetime.utcnow()
115
+ }
116
+
117
+ assignment_evaluation_collection.insert_one(evaluation_doc)
118
+ return evaluation_doc
119
+
120
+ except Exception as e:
121
+ print(f"Error in evaluate_assignment: {str(e)}")
122
+ return None
123
+
124
+ def display_evaluation_to_faculty(session_id, student_id, course_id):
125
+ """
126
+ Display interface for faculty to generate and view assignment evaluations
127
+ """
128
+ st.header("Evaluate Assignments")
129
+
130
+ try:
131
+ # Fetch available assignments
132
+ assignments = list(assignments_collection.find({
133
+ "session_id": str(session_id),
134
+ "course_id": course_id
135
+ }))
136
+
137
+ if not assignments:
138
+ st.info("No assignments found for this session.")
139
+ return
140
+
141
+ # Select assignment
142
+ assignment_options = {
143
+ f"{assignment['title']} (Due: {assignment['due_date'].strftime('%Y-%m-%d')})" if 'due_date' in assignment else assignment['title']: assignment['_id']
144
+ for assignment in assignments
145
+ }
146
+
147
+ if assignment_options:
148
+ selected_assignment = st.selectbox(
149
+ "Select Assignment to Evaluate",
150
+ options=list(assignment_options.keys())
151
+ )
152
+
153
+ if selected_assignment:
154
+ assignment_id = assignment_options[selected_assignment]
155
+ assignment = assignments_collection.find_one({"_id": assignment_id})
156
+
157
+ if assignment:
158
+ submissions = assignment.get('submissions', [])
159
+ if not submissions:
160
+ st.warning("No submissions found for this assignment.")
161
+ return
162
+
163
+ # Create a dropdown for student submissions
164
+ student_options = {
165
+ f"{students_collection.find_one({'_id': ObjectId(sub['student_id'])})['full_name']} (Submitted: {sub['submitted_at'].strftime('%Y-%m-%d %H:%M')})": sub['student_id']
166
+ for sub in submissions
167
+ }
168
+
169
+ selected_student = st.selectbox(
170
+ "Select Student Submission",
171
+ options=list(student_options.keys())
172
+ )
173
+
174
+ if selected_student:
175
+ student_id = student_options[selected_student]
176
+ submission = next(sub for sub in submissions if sub['student_id'] == student_id)
177
+
178
+ # Display submission details
179
+ st.subheader("Submission Details")
180
+ st.markdown(f"**Submitted:** {submission['submitted_at'].strftime('%Y-%m-%d %H:%M')}")
181
+ st.markdown(f"**File Name:** {submission['file_name']}")
182
+
183
+ # Add download button for submitted file
184
+ if 'file_content' in submission:
185
+ st.download_button(
186
+ label="Download Submission",
187
+ data=submission['file_content'],
188
+ file_name=submission['file_name'],
189
+ mime=submission['file_type']
190
+ )
191
+
192
+ # Check for existing evaluation
193
+ existing_eval = assignment_evaluation_collection.find_one({
194
+ "assignment_id": assignment_id,
195
+ "student_id": student_id,
196
+ "session_id": str(session_id)
197
+ })
198
+
199
+ if existing_eval:
200
+ st.subheader("Evaluation Results")
201
+ st.markdown(existing_eval['evaluation'])
202
+ st.success("✓ Evaluation completed")
203
+
204
+ if st.button("Regenerate Evaluation"):
205
+ with st.spinner("Regenerating evaluation..."):
206
+ evaluation = evaluate_assignment(
207
+ str(session_id),
208
+ student_id,
209
+ assignment_id
210
+ )
211
+ if evaluation:
212
+ st.success("Evaluation regenerated successfully!")
213
+ st.rerun()
214
+ else:
215
+ st.error("Error regenerating evaluation.")
216
+ else:
217
+ if st.button("Generate Evaluation"):
218
+ with st.spinner("Generating evaluation..."):
219
+ evaluation = evaluate_assignment(
220
+ str(session_id),
221
+ student_id,
222
+ assignment_id
223
+ )
224
+ if evaluation:
225
+ st.success("Evaluation generated successfully!")
226
+ st.markdown("### Generated Evaluation")
227
+ st.markdown(evaluation['evaluation'])
228
+ st.rerun()
229
+ else:
230
+ st.error("Error generating evaluation.")
231
+
232
+ except Exception as e:
233
+ st.error(f"An error occurred while loading the evaluations: {str(e)}")
234
+ print(f"Error in display_evaluation_to_faculty: {str(e)}")
235
+
236
+ def display_assignment_results(assignment_id, student_id):
237
+ """
238
+ Display assignment results and analysis for a student
239
+ """
240
+ try:
241
+ # Fetch analysis from evaluation collection
242
+ analysis = assignment_evaluation_collection.find_one({
243
+ "assignment_id": assignment_id,
244
+ "student_id": str(student_id)
245
+ })
246
+
247
+ if not analysis:
248
+ st.info("Evaluation will be available soon. Please check back later.")
249
+ return
250
+
251
+ st.header("Assignment Evaluation")
252
+
253
+ # Display evaluation content
254
+ st.markdown(analysis["evaluation"])
255
+
256
+ # Display evaluation timestamp
257
+ st.caption(f"Evaluation generated on: {analysis['evaluated_at'].strftime('%Y-%m-%d %H:%M:%S UTC')}")
258
+
259
+ except Exception as e:
260
+ st.error("An error occurred while loading the evaluation. Please try again later.")
261
+ print(f"Error in display_assignment_results: {str(e)}")