|
|
|
import gradio as gr |
|
from transformers import pipeline |
|
import pytesseract |
|
from sentence_transformers import SentenceTransformer, util |
|
from PIL import Image |
|
from typing import List |
|
import requests |
|
|
|
|
|
model1 = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') |
|
|
|
|
|
API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2" |
|
headers = {"Authorization": "Bearer hf_TsCTtXxnvpmhFKABqKmcVLyLEhjQPsITSVx"} |
|
|
|
|
|
def query(payload): |
|
response = requests.post(API_URL, headers=headers, json=payload) |
|
return response.json() |
|
|
|
|
|
def generate_response(prompt): |
|
response = query({"inputs": prompt}) |
|
|
|
|
|
if isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]: |
|
return response[0]['generated_text'] |
|
else: |
|
|
|
print("Unexpected response format:", response) |
|
return "Sorry, I couldn't generate a response." |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_text_from_image(filepath: str, languages: List[str]): |
|
image = Image.open(filepath) |
|
lang_str = '+'.join(languages) |
|
return pytesseract.image_to_string(image=image, lang=lang_str) |
|
|
|
|
|
def get_embedding(text): |
|
return model1.encode(text, convert_to_tensor=True) |
|
|
|
|
|
def calculate_similarity(text1, text2): |
|
embedding1 = get_embedding(text1) |
|
embedding2 = get_embedding(text2) |
|
similarity = util.pytorch_cos_sim(embedding1, embedding2) |
|
return similarity.item() |
|
|
|
|
|
def get_grade(similarity_score): |
|
if similarity_score >= 0.9: |
|
return 5 |
|
elif similarity_score >= 0.8: |
|
return 4 |
|
elif similarity_score >= 0.7: |
|
return 3 |
|
elif similarity_score >= 0.6: |
|
return 2 |
|
else: |
|
return 1 |
|
|
|
|
|
def evaluate_answer(image, languages): |
|
student_answer = extract_text_from_image(image, languages) |
|
model_answer = "The process of photosynthesis helps plants produce glucose using sunlight." |
|
similarity_score = calculate_similarity(student_answer, model_answer) |
|
grade = get_grade(similarity_score) |
|
feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}" |
|
prompt=f"the student got grades: {grade} when Student's answer is: {student_answer} and Teacher's answer is: {model_answer}. justify the grades given to student" |
|
return grade, similarity_score * 100, feedback, prompt |
|
|
|
|
|
def gradio_interface(image, languages: List[str], prompt=""): |
|
grade, similarity_score, feedback,prompt = evaluate_answer(image, languages) |
|
response = generate_response(prompt) |
|
return grade, similarity_score, feedback, response |
|
|
|
|
|
language_choices = pytesseract.get_languages() |
|
|
|
|
|
interface = gr.Interface( |
|
fn=gradio_interface, |
|
inputs=[ |
|
gr.Image(type="filepath", label="Input"), |
|
gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='language'), |
|
gr.Textbox(lines=2, placeholder="Enter your prompt here", label="Prompt") |
|
], |
|
outputs=[ |
|
gr.Text(label="Grade"), |
|
gr.Number(label="Similarity Score (%)"), |
|
gr.Text(label="Feedback"), |
|
gr.Text(label="Generated Response") |
|
], |
|
title="Automated Grading System", |
|
description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, and feedback based on the model answer.", |
|
live=True |
|
) |
|
|
|
if __name__ == "__main__": |
|
interface.launch() |
|
|
|
|