grader / app.py
Garvitj's picture
Update app.py
a8d0538 verified
raw
history blame
3.05 kB
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import numpy as np
import cv2
from PIL import Image
import pytesseract as tess
from sentence_transformers import SentenceTransformer, util
import io
model_name = "eachadea/vicuna-7b-1.1"
# Check if CUDA is available, otherwise, fall back to CPU
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Load the model
# If CUDA is available, use float16, otherwise, use float32
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
device_map="auto" if device == "cuda" else None
)
# Move model to the appropriate device (CPU or CUDA)
model.to(device)
tess.pytesseract.tesseract_cmd = r"/app/tesseract.exe"
# Load a smaller version of Sentence-BERT model
model1 = SentenceTransformer('all-MiniLM-L6-v2')
def get_embedding(text):
return model1.encode(text, convert_to_tensor=True)
def calculate_similarity(text1, text2):
embedding1 = get_embedding(text1)
embedding2 = get_embedding(text2)
similarity = util.pytorch_cos_sim(embedding1, embedding2)
return similarity.item()
def get_grade(similarity_score):
if similarity_score >= 0.9:
return 5
elif similarity_score >= 0.8:
return 4
elif similarity_score >= 0.7:
return 3
elif similarity_score >= 0.6:
return 2
else:
return 1
def extract_text_from_image(image):
# Convert PIL image to RGB format
image = image.convert('RGB')
# Use pytesseract to extract text from the image
text = tess.image_to_string(image)
return text.strip()
def evaluate_answer(image):
student_answer = extract_text_from_image(image)
model_answer = "The process of photosynthesis helps plants produce glucose using sunlight."
similarity_score = calculate_similarity(student_answer, model_answer)
grade = get_grade(similarity_score)
feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
return grade, similarity_score * 100, feedback
def generate_response(prompt):
inputs = tokenizer(prompt, return_tensors="pt").to(device)
# Generate response from the model
with torch.no_grad():
outputs = model.generate(inputs.input_ids, max_length=150, temperature=0.7)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
def gradio_interface(image, prompt):
grade, similarity_score, feedback = evaluate_answer(image)
response = generate_response(prompt)
return grade, similarity_score, feedback, response
# Define Gradio interface
interface = gr.Interface(
fn=gradio_interface,
inputs=[gr.Image(type="pil"), gr.Textbox(lines=2, placeholder="Enter your prompt here")],
outputs=[gr.Label(), gr.Label(), gr.Textbox(), gr.Textbox()],
live=True
)
if __name__ == "__main__":
interface.launch()