mavinsao commited on
Commit
ff694a9
·
verified ·
1 Parent(s): 1346a67
Files changed (1) hide show
  1. app.py +15 -15
app.py CHANGED
@@ -1,8 +1,8 @@
1
- import streamlit as st
2
  import torch
3
  import torch.nn as nn
4
  from transformers import RobertaTokenizer, RobertaModel
5
  import json
 
6
 
7
  # Set device (GPU if available, otherwise CPU)
8
  device = torch.device("cpu")
@@ -56,8 +56,6 @@ roberta_model = MIRobertaClassifier(num_classes=num_classes).to(device)
56
  # Load the state dictionary into the model
57
  roberta_model.load_state_dict(roberta_loaded_model_state['state_dict'])
58
 
59
-
60
-
61
  # Load the state dictionary into the model
62
  mi_loaded_model_state = torch.load('reddit_miroberta_state.pth', map_location=device)
63
  # Create an instance of your model
@@ -67,7 +65,6 @@ mi_model = MIRobertaClassifier(num_classes=num_classes).to(device)
67
  mi_model.load_state_dict(mi_loaded_model_state['state_dict'])
68
 
69
 
70
-
71
  def predict_label(sentence, tokenizer, model1, model2, device):
72
  # Tokenize the sentence and create attention mask
73
  tokenized_input = tokenizer(
@@ -96,28 +93,31 @@ def predict_label(sentence, tokenizer, model1, model2, device):
96
  ensemble_outputs = (outputs1 + outputs2) / 2
97
 
98
  # Apply softmax to get probabilities
99
- probabilities = torch.max(ensemble_outputs, dim=1)[0].tolist()
100
 
101
  # Map the predicted index back to the original class label using class_names
102
  class_names = list(common_label_map.keys())
103
-
104
- predicted_index = torch.argmax(ensemble_outputs, dim=1)
105
- predicted_label = class_names[predicted_index.item()]
106
 
107
- # Create JSON response
108
- response = {
109
- "label": predicted_label,
110
- "score": probabilities[common_label_map[predicted_label]]
111
- }
 
 
 
 
 
 
112
 
113
- return response
114
 
115
 
116
  # Streamlit app
117
  st.title('Mental Illness Prediction')
118
 
119
  # Input text area for user input
120
- sentence = st.text_area("Enter the sentence to predict your mental illness state:")
121
 
122
  # Prediction button
123
  if st.button('Predict'):
 
 
1
  import torch
2
  import torch.nn as nn
3
  from transformers import RobertaTokenizer, RobertaModel
4
  import json
5
+ import streamlit as st
6
 
7
  # Set device (GPU if available, otherwise CPU)
8
  device = torch.device("cpu")
 
56
  # Load the state dictionary into the model
57
  roberta_model.load_state_dict(roberta_loaded_model_state['state_dict'])
58
 
 
 
59
  # Load the state dictionary into the model
60
  mi_loaded_model_state = torch.load('reddit_miroberta_state.pth', map_location=device)
61
  # Create an instance of your model
 
65
  mi_model.load_state_dict(mi_loaded_model_state['state_dict'])
66
 
67
 
 
68
  def predict_label(sentence, tokenizer, model1, model2, device):
69
  # Tokenize the sentence and create attention mask
70
  tokenized_input = tokenizer(
 
93
  ensemble_outputs = (outputs1 + outputs2) / 2
94
 
95
  # Apply softmax to get probabilities
96
+ probabilities = torch.softmax(ensemble_outputs, dim=1)[0].tolist()
97
 
98
  # Map the predicted index back to the original class label using class_names
99
  class_names = list(common_label_map.keys())
 
 
 
100
 
101
+ # Get predicted index and score for each label
102
+ label_scores = {}
103
+ for i, label in enumerate(class_names):
104
+ label_index = common_label_map[label]
105
+ label_scores[label] = probabilities[label_index]
106
+
107
+ # Sort label scores by score values in descending order
108
+ sorted_label_scores = {k: v for k, v in sorted(label_scores.items(), key=lambda item: item[1], reverse=True)}
109
+
110
+ # Get the predicted label
111
+ predicted_index = torch.argmax(ensemble_outputs, dim=1)
112
 
113
+ return sorted_label_scores
114
 
115
 
116
  # Streamlit app
117
  st.title('Mental Illness Prediction')
118
 
119
  # Input text area for user input
120
+ sentence = st.text_area("Enter the long sentence to predict your mental illness state:")
121
 
122
  # Prediction button
123
  if st.button('Predict'):