# import gradio as gr | |
# from transformers import pipeline | |
# import pytesseract | |
# from sentence_transformers import SentenceTransformer, util | |
# from PIL import Image | |
# from typing import List | |
# import requests | |
# # Initialize sentence transformer model | |
# model1 = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') | |
# # Hugging Face API details | |
# API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2" | |
# headers = {"Authorization": f"Bearer {hf_TsCTtXxnvpmhFKABqKmcVLyLEhjQPsITSVx}"} | |
# # Function to interact with Hugging Face API for GPT-2 | |
# def query(payload): | |
# response = requests.post(API_URL, headers=headers, json=payload) | |
# return response.json() | |
# # Function to generate text response from GPT-2 model using Hugging Face API | |
# def generate_response(prompt): | |
# response = query({"inputs": prompt}) | |
# # Check if the response contains the expected format | |
# if isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]: | |
# return response[0]['generated_text'] | |
# else: | |
# # Log the response if something unexpected is returned | |
# print("Unexpected response format:", response) | |
# return "Sorry, I couldn't generate a response." | |
# # Function to generate text response from GPT-2 model using Hugging Face API | |
# # def generate_response(prompt): | |
# # response = query({"inputs": prompt}) | |
# # return response[0]['generated_text'] | |
# # Extract text from an image using Tesseract | |
# def extract_text_from_image(filepath: str, languages: List[str]): | |
# image = Image.open(filepath) | |
# lang_str = '+'.join(languages) # Join languages for Tesseract | |
# return pytesseract.image_to_string(image=image, lang=lang_str) | |
# # Function to get embeddings for text using SentenceTransformer | |
# def get_embedding(text): | |
# return model1.encode(text, convert_to_tensor=True) | |
# # Calculate similarity between two texts using cosine similarity | |
# def calculate_similarity(text1, text2): | |
# embedding1 = get_embedding(text1) | |
# embedding2 = get_embedding(text2) | |
# similarity = util.pytorch_cos_sim(embedding1, embedding2) | |
# return similarity.item() | |
# # Assign grades based on similarity score | |
# def get_grade(similarity_score): | |
# if similarity_score >= 0.9: | |
# return 5 | |
# elif similarity_score >= 0.8: | |
# return 4 | |
# elif similarity_score >= 0.7: | |
# return 3 | |
# elif similarity_score >= 0.6: | |
# return 2 | |
# else: | |
# return 1 | |
# # Function to evaluate student's answer by comparing it to a model answer | |
# def evaluate_answer(image, languages): | |
# student_answer = extract_text_from_image(image, languages) | |
# model_answer = "The process of photosynthesis helps plants produce glucose using sunlight." | |
# similarity_score = calculate_similarity(student_answer, model_answer) | |
# grade = get_grade(similarity_score) | |
# feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}" | |
# prompt=f"the student got grades: {grade} when Student's answer is: {student_answer} and Teacher's answer is: {model_answer}. justify the grades given to student" | |
# return grade, similarity_score * 100, feedback, prompt | |
# # Main interface function for Gradio | |
# def gradio_interface(image, languages: List[str], prompt=""): | |
# grade, similarity_score, feedback,prompt = evaluate_answer(image, languages) | |
# response = generate_response(prompt) | |
# return grade, similarity_score, feedback, response | |
# # Get available Tesseract languages | |
# language_choices = pytesseract.get_languages() | |
# # Define Gradio interface | |
# interface = gr.Interface( | |
# fn=gradio_interface, | |
# inputs=[ | |
# gr.Image(type="filepath", label="Input"), | |
# gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='language'), | |
# gr.Textbox(lines=2, placeholder="Enter your prompt here", label="Prompt") | |
# ], | |
# outputs=[ | |
# gr.Text(label="Grade"), | |
# gr.Number(label="Similarity Score (%)"), | |
# gr.Text(label="Feedback"), | |
# gr.Text(label="Generated Response") | |
# ], | |
# title="Automated Grading System", | |
# description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, and feedback based on the model answer.", | |
# live=True | |
# ) | |
# if __name__ == "__main__": | |
# interface.launch() | |
import os | |
from groq import Groq | |
import gradio as gr | |
from transformers import pipeline | |
import pytesseract | |
from sentence_transformers import SentenceTransformer, util | |
from PIL import Image | |
from typing import List | |
import requests | |
# Initialize sentence transformer model | |
model1 = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') | |
# Initialize Groq client | |
client = Groq(api_key=os.environ.get("GROQ_API_KEY")) | |
# System prompt for Groq | |
system_prompt = { | |
"role": "system", | |
"content": "You are a useful assistant. You reply with efficient answers." | |
} | |
# Function to interact with Groq for generating response | |
async def chat_groq(message, history): | |
messages = [system_prompt] | |
for msg in history: | |
messages.append({"role": "user", "content": str(msg[0])}) | |
messages.append({"role": "assistant", "content": str(msg[1])}) | |
messages.append({"role": "user", "content": str(message)}) | |
response_content = '' | |
stream = client.chat.completions.create( | |
model="llama3-70b-8192", | |
messages=messages, | |
max_tokens=1024, | |
temperature=1.3, | |
stream=True | |
) | |
for chunk in stream: | |
content = chunk.choices[0].delta.content | |
if content: | |
response_content += chunk.choices[0].delta.content | |
yield response_content | |
# Extract text from an image using Tesseract | |
def extract_text_from_image(filepath: str, languages: List[str]): | |
image = Image.open(filepath) | |
lang_str = '+'.join(languages) # Join languages for Tesseract | |
return pytesseract.image_to_string(image=image, lang=lang_str) | |
# Function to get embeddings for text using SentenceTransformer | |
def get_embedding(text): | |
return model1.encode(text, convert_to_tensor=True) | |
# Calculate similarity between two texts using cosine similarity | |
def calculate_similarity(text1, text2): | |
embedding1 = get_embedding(text1) | |
embedding2 = get_embedding(text2) | |
similarity = util.pytorch_cos_sim(embedding1, embedding2) | |
return similarity.item() | |
# Assign grades based on similarity score | |
def get_grade(similarity_score): | |
if similarity_score >= 0.9: | |
return 5 | |
elif similarity_score >= 0.8: | |
return 4 | |
elif similarity_score >= 0.7: | |
return 3 | |
elif similarity_score >= 0.6: | |
return 2 | |
else: | |
return 1 | |
# Function to evaluate student's answer by comparing it to a model answer | |
def evaluate_answer(image, languages): | |
student_answer = extract_text_from_image(image, languages) | |
model_answer = "The process of photosynthesis helps plants produce glucose using sunlight." | |
similarity_score = calculate_similarity(student_answer, model_answer) | |
grade = get_grade(similarity_score) | |
feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}" | |
prompt = f"The student got grade: {grade} when the student's answer is: {student_answer} and the teacher's answer is: {model_answer}. Justify the grade given to the student." | |
return grade, similarity_score * 100, feedback, prompt | |
# Main interface function for Gradio | |
async def gradio_interface(image, languages: List[str], prompt="", history=[]): | |
grade, similarity_score, feedback, prompt = evaluate_answer(image, languages) | |
response = "" | |
async for result in chat_groq(prompt, history): | |
response = result # Get the Groq response | |
return grade, similarity_score, feedback, response | |
# Get available Tesseract languages | |
language_choices = pytesseract.get_languages() | |
# Define Gradio interface | |
interface = gr.Interface( | |
fn=gradio_interface, | |
inputs=[ | |
gr.Image(type="filepath", label="Input"), | |
gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='Language'), | |
gr.Textbox(lines=2, placeholder="Enter your prompt here", label="Prompt") | |
], | |
outputs=[ | |
gr.Text(label="Grade"), | |
gr.Number(label="Similarity Score (%)"), | |
gr.Text(label="Feedback"), | |
gr.Text(label="Generated Response") | |
], | |
title="Automated Grading System", | |
description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, and feedback based on the model answer.", | |
live=True | |
) | |
if __name__ == "__main__": | |
interface.queue() | |
interface.launch() | |