grader / app.py
Garvitj's picture
Update app.py
2aa09ae verified
raw
history blame
6.97 kB
# import gradio as gr
# from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# import torch
# import numpy as np
# import cv2
# from PIL import Image
# import pytesseract
# from sentence_transformers import SentenceTransformer, util
# import io
# from typing import List
# def extract_text_from_image(filepath: str, languages: List[str]):
# image = Image.open(filepath)
# return pytesseract.image_to_string(image=image, lang=', '.join(languages))
# # tess.pytesseract.tesseract_cmd = r"tesseract"
# import requests
# API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2"
# headers = {"Authorization": "hf_TsCTtXxnvpmhFKABqKmcVLyLEhjQPsITSVx"}
# def query(payload):
# response = requests.post(API_URL, headers=headers, json=payload)
# return response.json()
# # output = query({
# # "inputs": "Can you please let us know more details about your ",
# # })
# def generate_response(prompt):
# # Generate response from the API
# response = query({"inputs":prompt})
# return response[0]['generated_text']
# def get_embedding(text):
# return model1.encode(text, convert_to_tensor=True)
# def calculate_similarity(text1, text2):
# embedding1 = get_embedding(text1)
# embedding2 = get_embedding(text2)
# similarity = util.pytorch_cos_sim(embedding1, embedding2)
# return similarity.item()
# def get_grade(similarity_score):
# if similarity_score >= 0.9:
# return 5
# elif similarity_score >= 0.8:
# return 4
# elif similarity_score >= 0.7:
# return 3
# elif similarity_score >= 0.6:
# return 2
# else:
# return 1
# def evaluate_answer(image,languages):
# student_answer = extract_text_from_image(image,languages)
# model_answer = "The process of photosynthesis helps plants produce glucose using sunlight."
# similarity_score = calculate_similarity(student_answer, model_answer)
# grade = get_grade(similarity_score)
# feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
# return grade, similarity_score * 100, feedback
# def generate_response(prompt):
# # Generate response from the new model using the pipeline
# response = pipe(prompt, max_length=150, temperature=0.7)
# return response[0]['generated_text']
# def gradio_interface(image, languages: List[str]):
# grade, similarity_score, feedback = evaluate_answer(image,languages)
# response = generate_response(prompt)
# return grade, similarity_score, response
# # # Define Gradio interface
# # interface = gr.Interface(
# # fn=gradio_interface,
# # inputs=[gr.Image(type="pil"), gr.Textbox(lines=2, placeholder="Enter your prompt here")],
# # outputs=[gr.Label(), gr.Label(), gr.Textbox(), gr.Textbox()],
# # live=True
# # )
# language_choices = pytesseract.get_languages()
# interface = gr.Interface(
# fn=gradio_interface,
# inputs=[
# gr.Image(type="filepath", label="Input"),
# gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='language')
# ],
# outputs=[gr.Text(label="Grade"), gr.Number(label="Similarity Score (%)"), gr.Text(label="Feedback")],
# title="Automated Grading System",
# description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, and feedback based on the model answer.",
# live=True
# )
# if __name__ == "__main__":
# interface.launch()
import gradio as gr
from transformers import pipeline
import pytesseract
from sentence_transformers import SentenceTransformer, util
from PIL import Image
from typing import List
import requests
# Initialize sentence transformer model
model1 = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
# Hugging Face API details
API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2"
headers = {"Authorization": "Bearer hf_TsCTtXxnvpmhFKABqKmcVLyLEhjQPsITSVx"}
# Function to interact with Hugging Face API for GPT-2
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
# Function to generate text response from GPT-2 model using Hugging Face API
def generate_response(prompt):
response = query({"inputs": prompt})
return response[0]['generated_text']
# Extract text from an image using Tesseract
def extract_text_from_image(filepath: str, languages: List[str]):
image = Image.open(filepath)
lang_str = '+'.join(languages) # Join languages for Tesseract
return pytesseract.image_to_string(image=image, lang=lang_str)
# Function to get embeddings for text using SentenceTransformer
def get_embedding(text):
return model1.encode(text, convert_to_tensor=True)
# Calculate similarity between two texts using cosine similarity
def calculate_similarity(text1, text2):
embedding1 = get_embedding(text1)
embedding2 = get_embedding(text2)
similarity = util.pytorch_cos_sim(embedding1, embedding2)
return similarity.item()
# Assign grades based on similarity score
def get_grade(similarity_score):
if similarity_score >= 0.9:
return 5
elif similarity_score >= 0.8:
return 4
elif similarity_score >= 0.7:
return 3
elif similarity_score >= 0.6:
return 2
else:
return 1
# Function to evaluate student's answer by comparing it to a model answer
def evaluate_answer(image, languages):
student_answer = extract_text_from_image(image, languages)
model_answer = "The process of photosynthesis helps plants produce glucose using sunlight."
similarity_score = calculate_similarity(student_answer, model_answer)
grade = get_grade(similarity_score)
feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
return grade, similarity_score * 100, feedback
# Main interface function for Gradio
def gradio_interface(image, languages: List[str], prompt):
grade, similarity_score, feedback = evaluate_answer(image, languages)
response = generate_response(prompt)
return grade, similarity_score, feedback, response
# Get available Tesseract languages
language_choices = pytesseract.get_languages()
# Define Gradio interface
interface = gr.Interface(
fn=gradio_interface,
inputs=[
gr.Image(type="filepath", label="Input"),
gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='language'),
gr.Textbox(lines=2, placeholder="Enter your prompt here", label="Prompt")
],
outputs=[
gr.Text(label="Grade"),
gr.Number(label="Similarity Score (%)"),
gr.Text(label="Feedback"),
gr.Text(label="Generated Response")
],
title="Automated Grading System",
description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, and feedback based on the model answer.",
live=True
)
if __name__ == "__main__":
interface.launch()