Spaces:
Sleeping
Sleeping
File size: 1,584 Bytes
6337bb1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
import torch
import numpy as np
from transformers import AutoModelForSequenceClassification
# Load ONLY the model, NOT the tokenizer
model = AutoModelForSequenceClassification.from_pretrained(
"Kevintu/Engessay_grading_ML")
def process_embeddings(embeddings_array):
# Convert the received embeddings to the format expected by the model
embeddings_tensor = torch.tensor(embeddings_array)
# Process embeddings with the model
model.eval()
with torch.no_grad():
# Create a dict with the expected input format
model_inputs = {
'input_ids': None, # Not needed since we're using embeddings directly
'attention_mask': None, # Not needed for this use case
'inputs_embeds': embeddings_tensor # Pass embeddings directly
}
outputs = model(**model_inputs)
predictions = outputs.logits.squeeze()
item_names = ["cohesion", "syntax", "vocabulary",
"phraseology", "grammar", "conventions"]
scaled_scores = 2.25 * predictions.numpy() - 1.25
rounded_scores = [round(score * 2) / 2 for score in scaled_scores]
results = {item: f"{score:.1f}" for item,
score in zip(item_names, rounded_scores)}
return results
# Create Gradio interface for embeddings input
demo = gr.Interface(
fn=process_embeddings,
inputs=gr.JSON(label="Embeddings"),
outputs=gr.JSON(label="Scores"),
title="Essay Grading API (Embeddings Only)",
description="Grade essays based on precomputed embeddings"
)
demo.queue()
demo.launch()
|