File size: 3,273 Bytes
f7dfa37
eada6b0
f7dfa37
 
 
 
d9f22c4
f7dfa37
 
3583331
f7dfa37
3583331
 
 
f7dfa37
3583331
f7dfa37
4e5c44e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7dfa37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
860f072
f7dfa37
 
 
eada6b0
 
 
f7dfa37
 
 
 
860f072
 
 
 
 
 
 
 
 
54d872b
f7dfa37
 
54d872b
 
 
 
860f072
 
7e73101
f7dfa37
 
860f072
f7dfa37
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
import numpy as np
import cv2
from PIL import Image
import pytesseract
from sentence_transformers import SentenceTransformer, util
import io
from typing import List

def extract_text_from_image(filepath: str, languages: List[str]):
    image = Image.open(filepath)
    return pytesseract.image_to_string(image=image, lang=', '.join(languages))

# tess.pytesseract.tesseract_cmd = r"tesseract"

import requests

API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2"
headers = {"Authorization": "hf_TsCTtXxnvpmhFKABqKmcVLyLEhjQPsITSVx"}

def query(payload):
	response = requests.post(API_URL, headers=headers, json=payload)
	return response.json()
	
# output = query({
# 	"inputs": "Can you please let us know more details about your ",
# })

def generate_response(prompt):
    # Generate response from the API
    response = query({"inputs":prompt})
    return response[0]['generated_text']


def get_embedding(text):
    return model1.encode(text, convert_to_tensor=True)

def calculate_similarity(text1, text2):
    embedding1 = get_embedding(text1)
    embedding2 = get_embedding(text2)
    similarity = util.pytorch_cos_sim(embedding1, embedding2)
    return similarity.item()

def get_grade(similarity_score):
    if similarity_score >= 0.9:
        return 5
    elif similarity_score >= 0.8:
        return 4
    elif similarity_score >= 0.7:
        return 3
    elif similarity_score >= 0.6:
        return 2
    else:
        return 1


def evaluate_answer(image):
    student_answer = extract_text_from_image(image)
    model_answer = "The process of photosynthesis helps plants produce glucose using sunlight."
    similarity_score = calculate_similarity(student_answer, model_answer)
    grade = get_grade(similarity_score)
    feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
    return grade, similarity_score * 100, feedback

def generate_response(prompt):
    # Generate response from the new model using the pipeline
    response = pipe(prompt, max_length=150, temperature=0.7)
    return response[0]['generated_text']

def gradio_interface(image, prompt):
    grade, similarity_score, feedback = evaluate_answer(image)
    response = generate_response(prompt)
    return grade, similarity_score, response

# # Define Gradio interface
# interface = gr.Interface(
#     fn=gradio_interface,
#     inputs=[gr.Image(type="pil"), gr.Textbox(lines=2, placeholder="Enter your prompt here")],
#     outputs=[gr.Label(), gr.Label(), gr.Textbox(), gr.Textbox()],
#     live=True
# )
language_choices = pytesseract.get_languages()
interface = gr.Interface(
    fn=gradio_interface,
    inputs=[
        gr.Image(type="filepath", label="Input"), 
        gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='language')
        ],
    outputs=[gr.Text(label="Grade"), gr.Number(label="Similarity Score (%)"), gr.Text(label="Feedback")],
    title="Automated Grading System",
    description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, and feedback based on the model answer.",
    live=True
)
    

if __name__ == "__main__":
    interface.launch()