Sumon670
commited on
Commit
·
9432e38
1
Parent(s):
c91521b
Addding lstm model
Browse files
app_main_hf.py
CHANGED
@@ -15,7 +15,7 @@ import shutil
|
|
15 |
import gc
|
16 |
from transformers.utils.hub import TRANSFORMERS_CACHE
|
17 |
|
18 |
-
torch.classes.__path__ = []
|
19 |
|
20 |
|
21 |
try:
|
|
|
15 |
import gc
|
16 |
from transformers.utils.hub import TRANSFORMERS_CACHE
|
17 |
|
18 |
+
torch.classes.__path__ = []
|
19 |
|
20 |
|
21 |
try:
|
sentiment_analysis/config/stage1_models.json
CHANGED
@@ -43,5 +43,20 @@
|
|
43 |
"device": "cpu",
|
44 |
"load_function": "load_model",
|
45 |
"predict_function": "predict"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
}
|
47 |
}
|
|
|
43 |
"device": "cpu",
|
44 |
"load_function": "load_model",
|
45 |
"predict_function": "predict"
|
46 |
+
},
|
47 |
+
"4": {
|
48 |
+
"name": "LSTM Custom Model",
|
49 |
+
"type": "lstm_uncased_custom",
|
50 |
+
"module_path": "hmv_cfg_base_stage1.model4",
|
51 |
+
"hf_location": "tachygraphy-microtrext-norm-org/LSTM-LV1-SentimentPolarities",
|
52 |
+
"tokenizer_class": "",
|
53 |
+
"model_class": "",
|
54 |
+
"problem_type": "multi_label_classification",
|
55 |
+
"base_model": "",
|
56 |
+
"base_model_class": "",
|
57 |
+
"num_labels": 3,
|
58 |
+
"device": "cpu",
|
59 |
+
"load_function": "load_model",
|
60 |
+
"predict_function": "predict"
|
61 |
}
|
62 |
}
|
sentiment_analysis/hmv_cfg_base_stage1/model4.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))
|
5 |
+
|
6 |
+
from imports import *
|
7 |
+
|
8 |
+
import importlib.util
|
9 |
+
import os
|
10 |
+
import sys
|
11 |
+
import joblib
|
12 |
+
|
13 |
+
import torch
|
14 |
+
import torch.nn as nn
|
15 |
+
import torch.functional as F
|
16 |
+
from transformers import DebertaV2Model, DebertaV2Tokenizer, AutoModel, AutoTokenizer
|
17 |
+
import safetensors
|
18 |
+
# from safetensors import load_file, save_file
|
19 |
+
import json
|
20 |
+
from huggingface_hub import hf_hub_download
|
21 |
+
from safetensors.torch import save_file, safe_open
|
22 |
+
|
23 |
+
|
24 |
+
import pickle
|
25 |
+
import tensorflow as tf
|
26 |
+
# from tensorflow.keras.preprocessing.sequence import pad_sequences
|
27 |
+
# from keras.preprocessing.sequence import pad_sequences
|
28 |
+
# from keras_preprocessing.sequence import pad_sequences
|
29 |
+
# from tensorflow.keras.preprocessing.sequence import pad_sequences
|
30 |
+
|
31 |
+
|
32 |
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
33 |
+
CONFIG_STAGE1 = os.path.join(BASE_DIR, "..", "config", "stage1_models.json")
|
34 |
+
|
35 |
+
MODEL_OPTIONS = {
|
36 |
+
"4": {
|
37 |
+
"name": "LSTM Custom Model",
|
38 |
+
"type": "lstm_uncased_custom",
|
39 |
+
"module_path": "hmv_cfg_base_stage1.model4",
|
40 |
+
"hf_location": "tachygraphy-microtrext-norm-org/LSTM-LV1-SentimentPolarities",
|
41 |
+
"tokenizer_class": "",
|
42 |
+
"model_class": "",
|
43 |
+
"problem_type": "multi_label_classification",
|
44 |
+
"base_model": "",
|
45 |
+
"base_model_class": "",
|
46 |
+
"num_labels": 3,
|
47 |
+
"device": "cpu",
|
48 |
+
"load_function": "load_model",
|
49 |
+
"predict_function": "predict"
|
50 |
+
}
|
51 |
+
}
|
52 |
+
|
53 |
+
|
54 |
+
model_key = "4"
|
55 |
+
model_info = MODEL_OPTIONS[model_key]
|
56 |
+
hf_location = model_info["hf_location"]
|
57 |
+
|
58 |
+
|
59 |
+
@st.cache_resource
|
60 |
+
def load_model():
|
61 |
+
repo_id = hf_location
|
62 |
+
print("Loading model 4")
|
63 |
+
model_path = hf_hub_download(repo_id=repo_id, filename="lstm.h5")
|
64 |
+
tokenizer_path = hf_hub_download(repo_id=repo_id, filename="tokenizer.pickle")
|
65 |
+
|
66 |
+
lstm_model = tf.keras.models.load_model(model_path)
|
67 |
+
|
68 |
+
with open(tokenizer_path, "rb") as handle:
|
69 |
+
tokenizer = pickle.load(handle)
|
70 |
+
print("Model 4 loaded")
|
71 |
+
|
72 |
+
return lstm_model, tokenizer
|
73 |
+
|
74 |
+
|
75 |
+
def pad_sequences_custom(sequences, maxlen, dtype="int32", padding="pre", truncating="pre", value=0):
|
76 |
+
"""
|
77 |
+
Pads each sequence to the same length (maxlen).
|
78 |
+
|
79 |
+
Args:
|
80 |
+
sequences (list of list of int): A list where each element is a sequence (list of integers).
|
81 |
+
maxlen (int): Maximum length of all sequences.
|
82 |
+
dtype (str): Data type of the output (default "int32").
|
83 |
+
padding (str): 'pre' or 'post'—whether to add padding before or after the sequence.
|
84 |
+
truncating (str): 'pre' or 'post'—whether to remove values from the beginning or end if sequence is too long.
|
85 |
+
value (int): The padding value.
|
86 |
+
|
87 |
+
Returns:
|
88 |
+
numpy.ndarray: 2D array of shape (number of sequences, maxlen)
|
89 |
+
"""
|
90 |
+
# Initialize a numpy array with the pad value.
|
91 |
+
num_samples = len(sequences)
|
92 |
+
padded = np.full((num_samples, maxlen), value, dtype=dtype)
|
93 |
+
|
94 |
+
for i, seq in enumerate(sequences):
|
95 |
+
if not seq:
|
96 |
+
continue # skip empty sequences
|
97 |
+
if len(seq) > maxlen:
|
98 |
+
if truncating == "pre":
|
99 |
+
trunc = seq[-maxlen:]
|
100 |
+
elif truncating == "post":
|
101 |
+
trunc = seq[:maxlen]
|
102 |
+
else:
|
103 |
+
raise ValueError("Invalid truncating type: choose 'pre' or 'post'.")
|
104 |
+
else:
|
105 |
+
trunc = seq
|
106 |
+
if padding == "post":
|
107 |
+
padded[i, :len(trunc)] = trunc
|
108 |
+
elif padding == "pre":
|
109 |
+
padded[i, -len(trunc):] = trunc
|
110 |
+
else:
|
111 |
+
raise ValueError("Invalid padding type: choose 'pre' or 'post'.")
|
112 |
+
|
113 |
+
return padded
|
114 |
+
|
115 |
+
def predict(text, model, tokenizer, device, max_len=128):
|
116 |
+
# def predict(text, model, tokenizer, max_len=128):
|
117 |
+
# Convert text to a sequence of integers
|
118 |
+
sequences = tokenizer.texts_to_sequences([text])
|
119 |
+
# Pad the sequence using our custom padding function
|
120 |
+
padded_sequences = pad_sequences_custom(sequences, maxlen=max_len, dtype="int32", value=0)
|
121 |
+
# Get the model's output (logits); assume shape is (1, num_classes)
|
122 |
+
logits = model.predict(padded_sequences, batch_size=1, verbose=0)[0]
|
123 |
+
print(logits)
|
124 |
+
# Convert logits to probabilities using the exponential and normalize (softmax)
|
125 |
+
# exp_logits = np.exp(logits)
|
126 |
+
# probabilities = exp_logits / np.sum(exp_logits)
|
127 |
+
|
128 |
+
# Ensure the output is a 2D array: shape (1, 3)
|
129 |
+
probabilities = logits / logits.sum()
|
130 |
+
print(probabilities)
|
131 |
+
probabilities = np.atleast_2d(probabilities)
|
132 |
+
print(probabilities)
|
133 |
+
return probabilities
|
134 |
+
|
135 |
+
|
136 |
+
# def predict(text, model, tokenizer, max_len=128):
|
137 |
+
# sequences = tokenizer.texts_to_sequences([text])
|
138 |
+
# # Use our custom pad_sequences function:
|
139 |
+
# padded_sequences = pad_sequences_custom(sequences, maxlen=max_len, dtype="int32", value=0)
|
140 |
+
# prediction = model.predict(padded_sequences, batch_size=1, verbose=0)[0]
|
141 |
+
# pred_class = np.argmax(prediction)
|
142 |
+
# sentiment_labels = ["Negative", "Neutral", "Positive"]
|
143 |
+
# probabilities = prediction / prediction.sum()
|
144 |
+
# return sentiment_labels[pred_class], pred_class, probabilities
|
145 |
+
|
146 |
+
|
sentiment_analysis/sentiment_analysis_main.py
CHANGED
@@ -195,7 +195,7 @@ if "model_changed" not in st.session_state:
|
|
195 |
st.session_state.model_changed = False
|
196 |
if "text_changed" not in st.session_state:
|
197 |
st.session_state.text_changed = False
|
198 |
-
if "
|
199 |
st.session_state.disabled = False
|
200 |
|
201 |
|
@@ -249,7 +249,9 @@ def show_sentiment_analysis():
|
|
249 |
"⚠️ Error: Model failed to load! Check model selection or configuration.")
|
250 |
st.stop()
|
251 |
|
252 |
-
model.to(device)
|
|
|
|
|
253 |
|
254 |
# predictions = predict(user_input, model, tokenizer, device)
|
255 |
|
@@ -270,7 +272,7 @@ def show_sentiment_analysis():
|
|
270 |
st.write(f"**Predicted Sentiment Scores:** {predictions_array}")
|
271 |
|
272 |
# enable_ui()
|
273 |
-
|
274 |
# Display binary classification result
|
275 |
st.write(f"**Predicted Sentiment:**")
|
276 |
st.write(f"**NEGATIVE:** {binary_predictions[0]}, **NEUTRAL:** {binary_predictions[1]}, **POSITIVE:** {binary_predictions[2]}")
|
|
|
195 |
st.session_state.model_changed = False
|
196 |
if "text_changed" not in st.session_state:
|
197 |
st.session_state.text_changed = False
|
198 |
+
if "disabled" not in st.session_state:
|
199 |
st.session_state.disabled = False
|
200 |
|
201 |
|
|
|
249 |
"⚠️ Error: Model failed to load! Check model selection or configuration.")
|
250 |
st.stop()
|
251 |
|
252 |
+
# model.to(device)
|
253 |
+
if hasattr(model, "to"):
|
254 |
+
model.to(device)
|
255 |
|
256 |
# predictions = predict(user_input, model, tokenizer, device)
|
257 |
|
|
|
272 |
st.write(f"**Predicted Sentiment Scores:** {predictions_array}")
|
273 |
|
274 |
# enable_ui()
|
275 |
+
##
|
276 |
# Display binary classification result
|
277 |
st.write(f"**Predicted Sentiment:**")
|
278 |
st.write(f"**NEGATIVE:** {binary_predictions[0]}, **NEUTRAL:** {binary_predictions[1]}, **POSITIVE:** {binary_predictions[2]}")
|