AImodel / app.py
durrani's picture
app.py
2086dfb
raw
history blame
1.22 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
def predict_rooms(new_students, new_temperature):
# Load the model and tokenizer
model_name = "AI" # Replace with the name or path of the model you want to use
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
# Convert the input to tokens
inputs = tokenizer.encode_plus(
"Number of students: {}, Temperature: {}".format(new_students, new_temperature),
padding="max_length",
truncation=True,
max_length=64,
return_tensors="pt"
)
# Make the prediction
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
predicted_rooms = torch.argmax(logits, dim=1).item()
return predicted_rooms
def greet(name):
return "Hello " + name + "!"
iface = gr.Interface(
fn=[predict_rooms, greet],
inputs=[["number", "number"], "text"],
outputs=["number", "text"],
title="Room Prediction",
description="Predict the number of rooms based on the number of students and temperature, and greet the user."
)
iface.launch()