Upload 3 files
Browse files- .gitignore +4 -0
- app.py +90 -0
- requirements.txt +8 -0
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
flagged/
|
2 |
+
*.mp4
|
3 |
+
*.mkv
|
4 |
+
gradio_cached_examples/
|
app.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
from PIL import Image
|
7 |
+
import pytesseract
|
8 |
+
from sentence_transformers import SentenceTransformer, util
|
9 |
+
import io
|
10 |
+
|
11 |
+
# Define the model name and path to the saved model
|
12 |
+
model_path = "E:/grader app/saved_model" # Replace with the path to your saved model
|
13 |
+
|
14 |
+
# Check if CUDA is available, otherwise, fall back to CPU
|
15 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
16 |
+
print(f"Using device: {device}")
|
17 |
+
|
18 |
+
# Load the tokenizer and model from the local disk
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
20 |
+
model = AutoModelForCausalLM.from_pretrained(
|
21 |
+
model_path,
|
22 |
+
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
|
23 |
+
device_map="auto" if device == "cuda" else None
|
24 |
+
)
|
25 |
+
model.to(device)
|
26 |
+
|
27 |
+
# Load a smaller version of Sentence-BERT model
|
28 |
+
model1 = SentenceTransformer('all-MiniLM-L6-v2')
|
29 |
+
|
30 |
+
def get_embedding(text):
|
31 |
+
return model1.encode(text, convert_to_tensor=True)
|
32 |
+
|
33 |
+
def calculate_similarity(text1, text2):
|
34 |
+
embedding1 = get_embedding(text1)
|
35 |
+
embedding2 = get_embedding(text2)
|
36 |
+
similarity = util.pytorch_cos_sim(embedding1, embedding2)
|
37 |
+
return similarity.item()
|
38 |
+
|
39 |
+
def get_grade(similarity_score):
|
40 |
+
if similarity_score >= 0.9:
|
41 |
+
return 5
|
42 |
+
elif similarity_score >= 0.8:
|
43 |
+
return 4
|
44 |
+
elif similarity_score >= 0.7:
|
45 |
+
return 3
|
46 |
+
elif similarity_score >= 0.6:
|
47 |
+
return 2
|
48 |
+
else:
|
49 |
+
return 1
|
50 |
+
|
51 |
+
def extract_text_from_image(image):
|
52 |
+
# Convert PIL image to RGB format
|
53 |
+
image = image.convert('RGB')
|
54 |
+
# Use pytesseract to extract text from the image
|
55 |
+
text = pytesseract.image_to_string(image)
|
56 |
+
return text.strip()
|
57 |
+
|
58 |
+
def evaluate_answer(image):
|
59 |
+
student_answer = extract_text_from_image(image)
|
60 |
+
model_answer = "The process of photosynthesis helps plants produce glucose using sunlight."
|
61 |
+
similarity_score = calculate_similarity(student_answer, model_answer)
|
62 |
+
grade = get_grade(similarity_score)
|
63 |
+
feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
|
64 |
+
return grade, similarity_score * 100, feedback
|
65 |
+
|
66 |
+
def generate_response(prompt):
|
67 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
68 |
+
|
69 |
+
# Generate response from the model
|
70 |
+
with torch.no_grad():
|
71 |
+
outputs = model.generate(inputs.input_ids, max_length=150, temperature=0.7)
|
72 |
+
|
73 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
74 |
+
return response
|
75 |
+
|
76 |
+
def gradio_interface(image, prompt):
|
77 |
+
grade, similarity_score, feedback = evaluate_answer(image)
|
78 |
+
response = generate_response(prompt)
|
79 |
+
return grade, similarity_score, feedback, response
|
80 |
+
|
81 |
+
# Define Gradio interface
|
82 |
+
interface = gr.Interface(
|
83 |
+
fn=gradio_interface,
|
84 |
+
inputs=[gr.Image(type="pil"), gr.Textbox(lines=2, placeholder="Enter your prompt here")],
|
85 |
+
outputs=[gr.Label(), gr.Label(), gr.Textbox(), gr.Textbox()],
|
86 |
+
live=True
|
87 |
+
)
|
88 |
+
|
89 |
+
if __name__ == "__main__":
|
90 |
+
interface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
transformers
|
3 |
+
gradio
|
4 |
+
pytesseract
|
5 |
+
Pillow
|
6 |
+
opencv-python
|
7 |
+
sentence-transformers
|
8 |
+
numpy
|