Update app.py
Browse files
app.py
CHANGED
@@ -8,8 +8,14 @@ from PIL import Image
|
|
8 |
from typing import List
|
9 |
import requests
|
10 |
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
# Initialize Groq client
|
15 |
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
@@ -54,21 +60,28 @@ def extract_text_from_image(filepath: str, languages: List[str]):
|
|
54 |
|
55 |
|
56 |
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
-
# Calculate Levenshtein Distance
|
60 |
-
def calculate_levenshtein_distance(text1, text2):
|
61 |
-
return Levenshtein.distance(text1, text2)
|
62 |
|
63 |
-
# Use Levenshtein distance to refine similarity
|
64 |
-
def calculate_similarity_with_levenshtein(text1, text2):
|
65 |
-
similarity = calculate_similarity(text1, text2) # From embeddings
|
66 |
-
edit_distance = calculate_levenshtein_distance(text1, text2)
|
67 |
-
normalized_distance = 1 - (edit_distance / max(len(text1), len(text2)))
|
68 |
-
|
69 |
-
# Combine similarity score with normalized Levenshtein distance
|
70 |
-
final_similarity = (similarity + normalized_distance) / 2
|
71 |
-
return final_similarity
|
72 |
|
73 |
|
74 |
# Assign badges based on the grade
|
@@ -107,21 +120,12 @@ def get_grade(similarity_score):
|
|
107 |
else:
|
108 |
return 1
|
109 |
|
110 |
-
def get_embedding(text):
|
111 |
-
return model1.encode(text, convert_to_tensor=True)
|
112 |
-
|
113 |
-
# Calculate similarity between two texts focusing on sequence
|
114 |
-
def calculate_similarity(text1, text2):
|
115 |
-
embedding1 = get_embedding(text1)
|
116 |
-
embedding2 = get_embedding(text2)
|
117 |
-
similarity = util.pytorch_cos_sim(embedding1, embedding2)
|
118 |
-
return similarity.item()
|
119 |
|
120 |
|
121 |
# Function to evaluate student's answer by comparing it to a model answer
|
122 |
def evaluate_answer(image, languages, model_answer):
|
123 |
student_answer = extract_text_from_image(image, languages)
|
124 |
-
similarity_score =
|
125 |
grade = get_grade(similarity_score)
|
126 |
feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
|
127 |
# visual_feedback = generate_sequence_feedback(student_answer, model_answer)
|
|
|
8 |
from typing import List
|
9 |
import requests
|
10 |
|
11 |
+
import torch
|
12 |
+
from transformers import BertTokenizer, BertModel
|
13 |
+
import torch.nn.functional as F
|
14 |
+
|
15 |
+
# Load pre-trained BERT model and tokenizer
|
16 |
+
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
17 |
+
model = BertModel.from_pretrained('bert-base-uncased')
|
18 |
+
|
19 |
|
20 |
# Initialize Groq client
|
21 |
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
|
|
60 |
|
61 |
|
62 |
|
63 |
+
# Function to get BERT embeddings
|
64 |
+
def get_bert_embedding(text):
|
65 |
+
inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True)
|
66 |
+
with torch.no_grad():
|
67 |
+
outputs = model(**inputs)
|
68 |
+
# Get the embeddings from the last hidden state
|
69 |
+
embeddings = outputs.last_hidden_state.mean(dim=1)
|
70 |
+
return embeddings
|
71 |
+
|
72 |
+
# Function to calculate cosine similarity
|
73 |
+
def calculate_cosine_similarity(embedding1, embedding2):
|
74 |
+
similarity = F.cosine_similarity(embedding1, embedding2)
|
75 |
+
return similarity.item()
|
76 |
+
|
77 |
+
# Function to compare logic of student and teacher answers
|
78 |
+
def compare_answers(student_answer, teacher_answer):
|
79 |
+
student_embedding = get_bert_embedding(student_answer)
|
80 |
+
teacher_embedding = get_bert_embedding(teacher_answer)
|
81 |
+
similarity_score = calculate_cosine_similarity(student_embedding, teacher_embedding)
|
82 |
+
return similarity_score
|
83 |
|
|
|
|
|
|
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
|
87 |
# Assign badges based on the grade
|
|
|
120 |
else:
|
121 |
return 1
|
122 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
|
125 |
# Function to evaluate student's answer by comparing it to a model answer
|
126 |
def evaluate_answer(image, languages, model_answer):
|
127 |
student_answer = extract_text_from_image(image, languages)
|
128 |
+
similarity_score = compare_answers(student_answer, model_answer)
|
129 |
grade = get_grade(similarity_score)
|
130 |
feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
|
131 |
# visual_feedback = generate_sequence_feedback(student_answer, model_answer)
|