mavinsao's picture
fix
a7d2103 verified
raw
history blame
1.2 kB
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import streamlit as st
# Set device (GPU if available, otherwise CPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("mavinsao/mi-roberta-classification")
model = AutoModelForSequenceClassification.from_pretrained("mavinsao/mi-roberta-classification").to(device)
# Streamlit app
st.title('Mental Illness Prediction')
# Input text area for user input
sentence = st.text_area("Enter the long sentence to predict your mental illness state:")
# Prediction button
if st.button('Predict'):
# Tokenize the input sentence
inputs = tokenizer(sentence, return_tensors="pt", padding=True, truncation=True).to(device)
# Forward pass
with torch.no_grad():
outputs = model(**inputs)
# Get predicted probabilities
probabilities = torch.sigmoid(outputs.logits).squeeze(dim=0)
# Get predicted labels with probability greater than 0.5
predicted_labels = [label for i, label in enumerate(tokenizer.labels) if probabilities[i] > 0.5]
st.write("Predicted labels:", predicted_labels)