import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import torch import numpy as np import cv2 from PIL import Image import pytesseract as tess from sentence_transformers import SentenceTransformer, util import io # save_directory = "spaces/Garvitj/grader" # # Load the tokenizer from the saved directory # tokenizer = AutoTokenizer.from_pretrained(save_directory) # # Load the model from the saved directory # model = AutoModelForCausalLM.from_pretrained( # save_directory, # torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, # device_map="auto" if torch.cuda.is_available() else None # ) # # Move model to the appropriate device (CPU or CUDA) # device = "cuda" if torch.cuda.is_available() else "cpu" # model.to(device) # print(f"Model and tokenizer loaded from {save_directory}") tess.pytesseract.tesseract_cmd = r"tesseract" # Use a pipeline as a high-level helper # pipe = pipeline("text-generation", model="eachadea/vicuna-7b-1.1") # Initialize the pipeline with the Hugging Face API # pipe = pipeline("text-generation", model="eachadea/vicuna-7b-1.1", api_key="your_api_key") import requests API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2" headers = {"Authorization": "hf_TsCTtXxnvpmhFKABqKmcVLyLEhjQPsITSVx"} def query(payload): response = requests.post(API_URL, headers=headers, json=payload) return response.json() # output = query({ # "inputs": "Can you please let us know more details about your ", # }) def generate_response(prompt): # Generate response from the API response = query({"inputs":prompt}) return response[0]['generated_text'] def get_embedding(text): return model1.encode(text, convert_to_tensor=True) def calculate_similarity(text1, text2): embedding1 = get_embedding(text1) embedding2 = get_embedding(text2) similarity = util.pytorch_cos_sim(embedding1, embedding2) return similarity.item() def get_grade(similarity_score): if similarity_score >= 0.9: return 5 elif similarity_score >= 0.8: return 4 elif similarity_score >= 0.7: return 3 elif similarity_score >= 0.6: return 2 else: return 1 def extract_text_from_image(image): # Convert PIL image to RGB format image = image.convert('RGB') # Use pytesseract to extract text from the image text = tess.image_to_string(image) return text.strip() def evaluate_answer(image): student_answer = extract_text_from_image(image) model_answer = "The process of photosynthesis helps plants produce glucose using sunlight." similarity_score = calculate_similarity(student_answer, model_answer) grade = get_grade(similarity_score) feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}" return grade, similarity_score * 100, feedback def generate_response(prompt): # Generate response from the new model using the pipeline response = pipe(prompt, max_length=150, temperature=0.7) return response[0]['generated_text'] def gradio_interface(image, prompt): grade, similarity_score, feedback = evaluate_answer(image) response = generate_response(prompt) return grade, similarity_score, response # # Define Gradio interface # interface = gr.Interface( # fn=gradio_interface, # inputs=[gr.Image(type="pil"), gr.Textbox(lines=2, placeholder="Enter your prompt here")], # outputs=[gr.Label(), gr.Label(), gr.Textbox(), gr.Textbox()], # live=True # ) interface = gr.Interface( fn=gradio_interface, inputs=gr.Image(type="pil", label="Upload your answer sheet"), outputs=[gr.Text(label="Grade"), gr.Number(label="Similarity Score (%)"), gr.Text(label="Feedback")], title="Automated Grading System", description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, and feedback based on the model answer.", live=True ) if __name__ == "__main__": interface.launch()