|
import torch |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
import streamlit as st |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("mavinsao/mi-roberta-classification") |
|
model = AutoModelForSequenceClassification.from_pretrained("mavinsao/mi-roberta-classification").to(device) |
|
|
|
|
|
st.title('Mental Illness Prediction') |
|
|
|
|
|
sentence = st.text_area("Enter the long sentence to predict your mental illness state:") |
|
|
|
|
|
if st.button('Predict'): |
|
|
|
inputs = tokenizer(sentence, return_tensors="pt", padding=True, truncation=True).to(device) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
|
|
|
|
probabilities = torch.sigmoid(outputs.logits).squeeze(dim=0) |
|
|
|
|
|
predicted_labels = [label for i, label in enumerate(tokenizer.labels) if probabilities[i] > 0.5] |
|
|
|
st.write("Predicted labels:", predicted_labels) |
|
|