|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
import torch |
|
|
|
|
|
model_name = "mjpsm/recommendation-overview-classification-model" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
id2label = model.config.id2label |
|
|
|
|
|
def predict_tag(text): |
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) |
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
predicted_class_id = torch.argmax(logits, dim=1).item() |
|
predicted_label = id2label[predicted_class_id] |
|
return predicted_label |
|
|
|
|
|
demo = gr.Interface( |
|
fn=predict_tag, |
|
inputs=gr.Textbox(lines=4, placeholder="Enter student reflection..."), |
|
outputs="text", |
|
title="🧠 Recommendation Overview Classifier", |
|
description="Enter a student's reflection after a math game. The model will return a motivational recommendation tag.", |
|
examples=[ |
|
"I got frustrated when I made a mistake but I didn’t give up.", |
|
"I asked my classmate for help and it finally made sense.", |
|
"It felt like budgeting in real life when I played that part of the game.", |
|
"Even though I was confused, I tried a new strategy and it worked.", |
|
], |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|