File size: 3,628 Bytes
f7dfa37 eada6b0 f7dfa37 a8d0538 f7dfa37 eada6b0 f7dfa37 eada6b0 f7dfa37 eada6b0 f7dfa37 eada6b0 9263c0f eada6b0 9263c0f de9e3c2 eada6b0 4e5c44e f7dfa37 a8d0538 f7dfa37 eada6b0 f7dfa37 eada6b0 f7dfa37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
import numpy as np
import cv2
from PIL import Image
import pytesseract as tess
from sentence_transformers import SentenceTransformer, util
import io
# save_directory = "spaces/Garvitj/grader"
# # Load the tokenizer from the saved directory
# tokenizer = AutoTokenizer.from_pretrained(save_directory)
# # Load the model from the saved directory
# model = AutoModelForCausalLM.from_pretrained(
# save_directory,
# torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
# device_map="auto" if torch.cuda.is_available() else None
# )
# # Move model to the appropriate device (CPU or CUDA)
# device = "cuda" if torch.cuda.is_available() else "cpu"
# model.to(device)
# print(f"Model and tokenizer loaded from {save_directory}")
tess.pytesseract.tesseract_cmd = r"Garvitj/grader/tesseract.exe"
# Use a pipeline as a high-level helper
# pipe = pipeline("text-generation", model="eachadea/vicuna-7b-1.1")
# Initialize the pipeline with the Hugging Face API
# pipe = pipeline("text-generation", model="eachadea/vicuna-7b-1.1", api_key="your_api_key")
import requests
API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2"
headers = {"Authorization": "hf_TsCTtXxnvpmhFKABqKmcVLyLEhjQPsITSVx"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
# output = query({
# "inputs": "Can you please let us know more details about your ",
# })
def generate_response(prompt):
# Generate response from the API
response = query({"inputs":prompt})
return response[0]['generated_text']
def get_embedding(text):
return model1.encode(text, convert_to_tensor=True)
def calculate_similarity(text1, text2):
embedding1 = get_embedding(text1)
embedding2 = get_embedding(text2)
similarity = util.pytorch_cos_sim(embedding1, embedding2)
return similarity.item()
def get_grade(similarity_score):
if similarity_score >= 0.9:
return 5
elif similarity_score >= 0.8:
return 4
elif similarity_score >= 0.7:
return 3
elif similarity_score >= 0.6:
return 2
else:
return 1
def extract_text_from_image(image):
# Convert PIL image to RGB format
image = image.convert('RGB')
# Use pytesseract to extract text from the image
text = tess.image_to_string(image)
return text.strip()
def evaluate_answer(image):
student_answer = extract_text_from_image(image)
model_answer = "The process of photosynthesis helps plants produce glucose using sunlight."
similarity_score = calculate_similarity(student_answer, model_answer)
grade = get_grade(similarity_score)
feedback = generate_response("Student's answer: {student_answer}\nTeacher's answer: {model_answer}")
return grade, similarity_score * 100, feedback
def generate_response(prompt):
# Generate response from the new model using the pipeline
response = pipe(prompt, max_length=150, temperature=0.7)
return response[0]['generated_text']
def gradio_interface(image, prompt):
grade, similarity_score, feedback = evaluate_answer(image)
response = generate_response(prompt)
return grade, similarity_score, feedback, response
# Define Gradio interface
interface = gr.Interface(
fn=gradio_interface,
inputs=[gr.Image(type="pil"), gr.Textbox(lines=2, placeholder="Enter your prompt here")],
outputs=[gr.Label(), gr.Label(), gr.Textbox(), gr.Textbox()],
live=True
)
if __name__ == "__main__":
interface.launch()
|