AImodel / app.py
durrani's picture
app.py
a448ea8
raw
history blame
1.07 kB
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
# Load the model and tokenizer
model_name = "AI" # Replace with the name or path of the model you want to use
tokenizer = AutoTokenizer.from_pretrained("AI")
model = AutoModelForSequenceClassification.from_pretrained("AI")
# Values for the new scenario
new_students = int(input("Enter the number of students in the new scenario: "))
new_temperature = int(input("Enter the temperature in the new scenario: "))
# Convert the input to tokens
inputs = tokenizer.encode_plus(
"Number of students: {}, Temperature: {}".format(new_students, new_temperature),
padding="max_length",
truncation=True,
max_length=64,
return_tensors="pt"
)
# Make the prediction
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
predicted_rooms = torch.argmax(logits, dim=1).item()
# Print the results
print("Number of students:", new_students)
print("Temperature:", new_temperature)
print("Predicted label for number of rooms:", predicted_rooms)