|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
def predict_rooms(new_students, new_temperature): |
|
|
|
model_name = "AI" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
|
|
|
|
inputs = tokenizer.encode_plus( |
|
"Number of students: {}, Temperature: {}".format(new_students, new_temperature), |
|
padding="max_length", |
|
truncation=True, |
|
max_length=64, |
|
return_tensors="pt" |
|
) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
predicted_rooms = torch.argmax(logits, dim=1).item() |
|
|
|
return predicted_rooms |
|
|
|
def greet(name): |
|
return "Hello " + name + "!" |
|
|
|
iface = gr.Interface( |
|
fn=[predict_rooms, greet], |
|
inputs=[["number", "number"], "text"], |
|
outputs=["number", "text"], |
|
title="Room Prediction", |
|
description="Predict the number of rooms based on the number of students and temperature, and greet the user." |
|
) |
|
|
|
iface.launch() |
|
|