Garvitj commited on
Commit
7337af2
·
verified ·
1 Parent(s): 6a8245c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +158 -0
app.py CHANGED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import difflib
3
+ from groq import Groq
4
+ import gradio as gr
5
+ from transformers import pipeline
6
+ import pytesseract
7
+ from sentence_transformers import SentenceTransformer, util
8
+ from PIL import Image
9
+ from typing import List
10
+ import requests
11
+
12
+ # Initialize sentence transformer model
13
+ model1 = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
14
+
15
+ # Initialize Groq client
16
+ client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
17
+
18
+ # System prompt for Groq
19
+ system_prompt = {
20
+ "role": "system",
21
+ "content": "You are a useful assistant. You reply with efficient answers."
22
+ }
23
+
24
+ # Function to interact with Groq for generating response
25
+ async def chat_groq(message, history):
26
+ messages = [system_prompt]
27
+
28
+ for msg in history:
29
+ messages.append({"role": "user", "content": str(msg[0])})
30
+ messages.append({"role": "assistant", "content": str(msg[1])})
31
+
32
+ messages.append({"role": "user", "content": str(message)})
33
+
34
+ response_content = ''
35
+
36
+ stream = client.chat.completions.create(
37
+ model="llama3-70b-8192",
38
+ messages=messages,
39
+ max_tokens=1024,
40
+ temperature=1.3,
41
+ stream=True
42
+ )
43
+
44
+ for chunk in stream:
45
+ content = chunk.choices[0].delta.content
46
+ if content:
47
+ response_content += chunk.choices[0].delta.content
48
+ yield response_content
49
+
50
+ # Extract text from an image using Tesseract
51
+ def extract_text_from_image(filepath: str, languages: List[str]):
52
+ image = Image.open(filepath)
53
+ lang_str = '+'.join(languages) # Join languages for Tesseract
54
+ return pytesseract.image_to_string(image=image, lang=lang_str)
55
+
56
+ # Function to get embeddings for text using SentenceTransformer
57
+ def get_embedding(text):
58
+ return model1.encode(text, convert_to_tensor=True)
59
+
60
+ # Calculate similarity between two texts using cosine similarity
61
+ def calculate_similarity(text1, text2):
62
+ embedding1 = get_embedding(text1)
63
+ embedding2 = get_embedding(text2)
64
+ similarity = util.pytorch_cos_sim(embedding1, embedding2)
65
+ return similarity.item()
66
+
67
+ # Assign badges based on the grade
68
+ def assign_badge(grade):
69
+ if grade == 5:
70
+ return "Gold Badge 🌟"
71
+ elif grade == 4:
72
+ return "Silver Badge 🥈"
73
+ elif grade == 3:
74
+ return "Bronze Badge 🥉"
75
+ else:
76
+ return "Keep Improving Badge 💪"
77
+
78
+ # Generate visual feedback by comparing answers
79
+ def generate_visual_feedback(student_answer, model_answer):
80
+ diff = difflib.ndiff(student_answer.split(), model_answer.split())
81
+ highlighted_diff = ' '.join(
82
+ [f"**{word}**" if word.startswith('-') else word for word in diff if not word.startswith('?')]
83
+ )
84
+ return highlighted_diff
85
+
86
+ # Categorize feedback into clarity, completeness, and accuracy
87
+ def detailed_feedback(similarity_score):
88
+ if similarity_score >= 0.9:
89
+ return {"Clarity": "Excellent", "Completeness": "Complete", "Accuracy": "Accurate"}
90
+ elif similarity_score >= 0.8:
91
+ return {"Clarity": "Good", "Completeness": "Almost Complete", "Accuracy": "Mostly Accurate"}
92
+ elif similarity_score >= 0.7:
93
+ return {"Clarity": "Fair", "Completeness": "Partial", "Accuracy": "Some Errors"}
94
+ else:
95
+ return {"Clarity": "Needs Improvement", "Completeness": "Incomplete", "Accuracy": "Inaccurate"}
96
+
97
+ # Assign grades based on similarity score
98
+ def get_grade(similarity_score):
99
+ if similarity_score >= 0.9:
100
+ return 5
101
+ elif similarity_score >= 0.8:
102
+ return 4
103
+ elif similarity_score >= 0.7:
104
+ return 3
105
+ elif similarity_score >= 0.6:
106
+ return 2
107
+ else:
108
+ return 1
109
+
110
+ # Function to evaluate student's answer by comparing it to a model answer
111
+ def evaluate_answer(image, languages, model_answer):
112
+ student_answer = extract_text_from_image(image, languages)
113
+ similarity_score = calculate_similarity(student_answer, model_answer)
114
+ grade = get_grade(similarity_score)
115
+ feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
116
+ visual_feedback = generate_visual_feedback(student_answer, model_answer)
117
+ badge = assign_badge(grade)
118
+ detailed_feedback_msg = detailed_feedback(similarity_score)
119
+ prompt = f"The student got grade: {grade} when the student's answer is: {student_answer} and the teacher's answer is: {model_answer}. Justify the grade given to the student."
120
+ return grade, similarity_score * 100, feedback, visual_feedback, badge, detailed_feedback_msg, prompt
121
+
122
+ # Main interface function for Gradio
123
+ async def gradio_interface(image, languages: List[str], model_answer, prompt="", history=[]):
124
+ grade, similarity_score, feedback, visual_feedback, badge, detailed_feedback_msg, prompt = evaluate_answer(image, languages, model_answer)
125
+ response = ""
126
+ async for result in chat_groq(prompt, history):
127
+ response = result # Get the Groq response
128
+ return grade, similarity_score, feedback, visual_feedback, badge, detailed_feedback_msg, response
129
+
130
+ # Get available Tesseract languages
131
+ language_choices = pytesseract.get_languages()
132
+
133
+ # Define Gradio interface
134
+ interface = gr.Interface(
135
+ fn=gradio_interface,
136
+ inputs=[
137
+ gr.Image(type="filepath", label="Input"),
138
+ gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='Language'),
139
+ gr.Textbox(lines=2, placeholder="Enter your model answer here", label="Model Answer"),
140
+ gr.Textbox(lines=2, placeholder="Enter your prompt here", label="Prompt")
141
+ ],
142
+ outputs=[
143
+ gr.Text(label="Grade"),
144
+ gr.Number(label="Similarity Score (%)"),
145
+ gr.Text(label="Feedback"),
146
+ gr.HTML(label="Visual Feedback"),
147
+ gr.Text(label="Badge"),
148
+ gr.JSON(label="Detailed Feedback"),
149
+ gr.Text(label="Generated Response")
150
+ ],
151
+ title="Enhanced Automated Grading System",
152
+ description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, visual feedback, badge, and detailed feedback based on the model answer.",
153
+ live=True
154
+ )
155
+
156
+ if __name__ == "__main__":
157
+ interface.queue()
158
+ interface.launch()