Archisman Karmakar
commited on
Commit
·
2d6564f
1
Parent(s):
4822903
2025.03.21.post1
Browse files- app_main_hf.py +24 -7
- emotionMoodtag_analysis/emotion_analysis_main.py +2 -2
- pyproject.toml +1 -1
- sentimentPolarity_analysis/sentiment_analysis_main.py +2 -2
- transformation_and_Normalization/config/stage3_models.json +30 -0
- transformation_and_Normalization/hmv_cfg_base_stage3/model1.py +1 -1
- transformation_and_Normalization/hmv_cfg_base_stage3/model2.py +117 -0
- transformation_and_Normalization/hmv_cfg_base_stage3/model3.py +117 -0
- transformation_and_Normalization/transformationNormalization_main.py +12 -6
app_main_hf.py
CHANGED
@@ -31,6 +31,12 @@ else:
|
|
31 |
except RuntimeError:
|
32 |
asyncio.set_event_loop(asyncio.new_event_loop())
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
import joblib
|
36 |
import importlib
|
@@ -47,10 +53,6 @@ from dashboard import show_dashboard
|
|
47 |
|
48 |
# from text_transformation import show_text_transformation
|
49 |
|
50 |
-
st.set_page_config(
|
51 |
-
page_title="Tachygraphy Microtext Analysis & Normalization",
|
52 |
-
# layout="wide"
|
53 |
-
)
|
54 |
|
55 |
|
56 |
def free_memory():
|
@@ -112,10 +114,21 @@ def main():
|
|
112 |
|
113 |
st.sidebar.title("Navigation")
|
114 |
with st.sidebar:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
selection = option_menu(
|
116 |
menu_title=None, # No title for a sleek look
|
117 |
options=["Dashboard", "Stage 1: Sentiment Polarity Analysis", "Stage 2: Emotion Mood-tag Analysis", "Stage 3: Text Transformation & Normalization"],
|
118 |
-
icons=
|
119 |
menu_icon="cast", # Main menu icon
|
120 |
default_index=0, # Highlight the first option
|
121 |
orientation="vertical",
|
@@ -126,11 +139,11 @@ def main():
|
|
126 |
"font-size": "16px",
|
127 |
"text-align": "left",
|
128 |
"margin": "0px",
|
129 |
-
"color": "#
|
130 |
"transition": "0.3s",
|
131 |
},
|
132 |
"nav-link-selected": {
|
133 |
-
"background-color": "#
|
134 |
"color": "white",
|
135 |
"font-weight": "bold",
|
136 |
"border-radius": "8px",
|
@@ -160,22 +173,26 @@ def main():
|
|
160 |
st.session_state.current_page = selection
|
161 |
|
162 |
if selection == "Dashboard":
|
|
|
163 |
# st.cache_resource.clear()
|
164 |
# free_memory()
|
165 |
show_dashboard()
|
166 |
|
167 |
elif selection == "Stage 1: Sentiment Polarity Analysis":
|
|
|
168 |
# st.cache_resource.clear()
|
169 |
# free_memory()
|
170 |
show_sentiment_analysis()
|
171 |
|
172 |
elif selection == "Stage 2: Emotion Mood-tag Analysis":
|
|
|
173 |
# st.cache_resource.clear()
|
174 |
# free_memory()
|
175 |
show_emotion_analysis()
|
176 |
# st.write("This section is under development.")
|
177 |
|
178 |
elif selection == "Stage 3: Text Transformation & Normalization":
|
|
|
179 |
# st.cache_resource.clear()
|
180 |
# free_memory()
|
181 |
transform_and_normalize()
|
|
|
31 |
except RuntimeError:
|
32 |
asyncio.set_event_loop(asyncio.new_event_loop())
|
33 |
|
34 |
+
st.set_page_config(
|
35 |
+
# page_title="Tachygraphy Microtext Analysis & Normalization",
|
36 |
+
layout="wide"
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
|
41 |
import joblib
|
42 |
import importlib
|
|
|
53 |
|
54 |
# from text_transformation import show_text_transformation
|
55 |
|
|
|
|
|
|
|
|
|
56 |
|
57 |
|
58 |
def free_memory():
|
|
|
114 |
|
115 |
st.sidebar.title("Navigation")
|
116 |
with st.sidebar:
|
117 |
+
|
118 |
+
# selected = option_menu("Main Menu", ["Home", 'Settings'],
|
119 |
+
# icons=['house', 'gear'], menu_icon="cast", default_index=1)
|
120 |
+
# selected
|
121 |
+
|
122 |
+
# # 2. horizontal menu
|
123 |
+
# selected2 = option_menu(None, ["Home", "Upload", "Tasks", 'Settings'],
|
124 |
+
# icons=['house', 'cloud-upload', "list-task", 'gear'],
|
125 |
+
# menu_icon="cast", default_index=0, orientation="horizontal")
|
126 |
+
# selected2
|
127 |
+
|
128 |
selection = option_menu(
|
129 |
menu_title=None, # No title for a sleek look
|
130 |
options=["Dashboard", "Stage 1: Sentiment Polarity Analysis", "Stage 2: Emotion Mood-tag Analysis", "Stage 3: Text Transformation & Normalization"],
|
131 |
+
icons=['house', 'diagram-3', "snow", 'activity'],
|
132 |
menu_icon="cast", # Main menu icon
|
133 |
default_index=0, # Highlight the first option
|
134 |
orientation="vertical",
|
|
|
139 |
"font-size": "16px",
|
140 |
"text-align": "left",
|
141 |
"margin": "0px",
|
142 |
+
"color": "#000000",
|
143 |
"transition": "0.3s",
|
144 |
},
|
145 |
"nav-link-selected": {
|
146 |
+
"background-color": "#020045",
|
147 |
"color": "white",
|
148 |
"font-weight": "bold",
|
149 |
"border-radius": "8px",
|
|
|
173 |
st.session_state.current_page = selection
|
174 |
|
175 |
if selection == "Dashboard":
|
176 |
+
# st.title("Tachygraphy Micro-text Analysis & Normalization")
|
177 |
# st.cache_resource.clear()
|
178 |
# free_memory()
|
179 |
show_dashboard()
|
180 |
|
181 |
elif selection == "Stage 1: Sentiment Polarity Analysis":
|
182 |
+
# st.title("Sentiment Polarity Analysis")
|
183 |
# st.cache_resource.clear()
|
184 |
# free_memory()
|
185 |
show_sentiment_analysis()
|
186 |
|
187 |
elif selection == "Stage 2: Emotion Mood-tag Analysis":
|
188 |
+
# st.title("Emotion Mood-tag Analysis")
|
189 |
# st.cache_resource.clear()
|
190 |
# free_memory()
|
191 |
show_emotion_analysis()
|
192 |
# st.write("This section is under development.")
|
193 |
|
194 |
elif selection == "Stage 3: Text Transformation & Normalization":
|
195 |
+
# st.title("Text Transformation & Normalization")
|
196 |
# st.cache_resource.clear()
|
197 |
# free_memory()
|
198 |
transform_and_normalize()
|
emotionMoodtag_analysis/emotion_analysis_main.py
CHANGED
@@ -217,12 +217,12 @@ def show_emotion_analysis():
|
|
217 |
|
218 |
# Model selection with change detection
|
219 |
selected_model = st.selectbox(
|
220 |
-
"Choose a model:", list(MODEL_OPTIONS.keys()), key="
|
221 |
)
|
222 |
|
223 |
# Text input with change detection
|
224 |
user_input = st.text_input(
|
225 |
-
"Enter text for emotions mood-tag analysis:", key="
|
226 |
)
|
227 |
user_input_copy = user_input
|
228 |
|
|
|
217 |
|
218 |
# Model selection with change detection
|
219 |
selected_model = st.selectbox(
|
220 |
+
"Choose a model:", list(MODEL_OPTIONS.keys()), key="selected_model_stage2", on_change=on_model_change
|
221 |
)
|
222 |
|
223 |
# Text input with change detection
|
224 |
user_input = st.text_input(
|
225 |
+
"Enter text for emotions mood-tag analysis:", key="user_input_stage2", on_change=on_text_change
|
226 |
)
|
227 |
user_input_copy = user_input
|
228 |
|
pyproject.toml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
[project]
|
2 |
name = "tachygraphy-microtext-analysis-and-normalization"
|
3 |
-
version = "2025.03.
|
4 |
description = ""
|
5 |
authors = [
|
6 |
{ name = "Archisman Karmakar", email = "[email protected]" },
|
|
|
1 |
[project]
|
2 |
name = "tachygraphy-microtext-analysis-and-normalization"
|
3 |
+
version = "2025.03.22.post1"
|
4 |
description = ""
|
5 |
authors = [
|
6 |
{ name = "Archisman Karmakar", email = "[email protected]" },
|
sentimentPolarity_analysis/sentiment_analysis_main.py
CHANGED
@@ -215,12 +215,12 @@ def show_sentiment_analysis():
|
|
215 |
|
216 |
# Model selection with change detection
|
217 |
selected_model = st.selectbox(
|
218 |
-
"Choose a model:", list(MODEL_OPTIONS.keys()), key="
|
219 |
)
|
220 |
|
221 |
# Text input with change detection
|
222 |
user_input = st.text_input(
|
223 |
-
"Enter text for sentiment analysis:", key="
|
224 |
)
|
225 |
user_input_copy = user_input
|
226 |
|
|
|
215 |
|
216 |
# Model selection with change detection
|
217 |
selected_model = st.selectbox(
|
218 |
+
"Choose a model:", list(MODEL_OPTIONS.keys()), key="selected_model_stage1", on_change=on_model_change
|
219 |
)
|
220 |
|
221 |
# Text input with change detection
|
222 |
user_input = st.text_input(
|
223 |
+
"Enter text for sentiment analysis:", key="user_input_stage1", on_change=on_text_change
|
224 |
)
|
225 |
user_input_copy = user_input
|
226 |
|
transformation_and_Normalization/config/stage3_models.json
CHANGED
@@ -13,5 +13,35 @@
|
|
13 |
"max_top_k": 50265,
|
14 |
"load_function": "load_model",
|
15 |
"predict_function": "predict"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
17 |
}
|
|
|
13 |
"max_top_k": 50265,
|
14 |
"load_function": "load_model",
|
15 |
"predict_function": "predict"
|
16 |
+
},
|
17 |
+
"2": {
|
18 |
+
"name": "Microsoft Prophet Net Uncased Large for Conditional Text Generation",
|
19 |
+
"type": "hf_automodel_finetuned_mstctg",
|
20 |
+
"module_path": "hmv_cfg_base_stage3.model2",
|
21 |
+
"hf_location": "tachygraphy-microtrext-norm-org/ProphetNet_ForCondGen_Uncased_Large_HFTSeq2Seq_Batch4_ngram3",
|
22 |
+
"tokenizer_class": "ProphetNetTokenizer",
|
23 |
+
"model_class": "ProphetNetForConditionalGeneration",
|
24 |
+
"problem_type": "text_transformamtion_and_normalization",
|
25 |
+
"base_model": "microsoft/prophetnet-large-uncased",
|
26 |
+
"base_model_class": "ProphetNetForConditionalGeneration",
|
27 |
+
"device": "cpu",
|
28 |
+
"max_top_k": 32128,
|
29 |
+
"load_function": "load_model",
|
30 |
+
"predict_function": "predict"
|
31 |
+
},
|
32 |
+
"3": {
|
33 |
+
"name": "Google T5 v1.1 Base for Conditional Text Generation",
|
34 |
+
"type": "hf_automodel_finetuned_gt5tctg",
|
35 |
+
"module_path": "hmv_cfg_base_stage3.model3",
|
36 |
+
"hf_location": "tachygraphy-microtrext-norm-org/T5-1.1-HF-seq2seq-Trainer-Batch4",
|
37 |
+
"tokenizer_class": "T5Tokenizer",
|
38 |
+
"model_class": "T5ForConditionalGeneration",
|
39 |
+
"problem_type": "text_transformamtion_and_normalization",
|
40 |
+
"base_model": "google/t5-v1_1-base",
|
41 |
+
"base_model_class": "T5ForConditionalGeneration",
|
42 |
+
"device": "cpu",
|
43 |
+
"max_top_k": 32128,
|
44 |
+
"load_function": "load_model",
|
45 |
+
"predict_function": "predict"
|
46 |
}
|
47 |
}
|
transformation_and_Normalization/hmv_cfg_base_stage3/model1.py
CHANGED
@@ -9,7 +9,7 @@ import sys
|
|
9 |
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))
|
10 |
|
11 |
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
12 |
-
|
13 |
|
14 |
|
15 |
MODEL_OPTIONS = {
|
|
|
9 |
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))
|
10 |
|
11 |
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
12 |
+
CONFIG_STAGE3 = os.path.join(BASE_DIR, "..", "config", "stage3_models.json")
|
13 |
|
14 |
|
15 |
MODEL_OPTIONS = {
|
transformation_and_Normalization/hmv_cfg_base_stage3/model2.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import ProphetNetTokenizer, ProphetNetForConditionalGeneration, AutoTokenizer, AutoModelForSequenceClassification, AutoModel
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from imports import *
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
|
9 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))
|
10 |
+
|
11 |
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
12 |
+
CONFIG_STAGE3 = os.path.join(BASE_DIR, "..", "config", "stage3_models.json")
|
13 |
+
|
14 |
+
|
15 |
+
MODEL_OPTIONS = {
|
16 |
+
"2": {
|
17 |
+
"name": "Microsoft Prophet Net Uncased Large for Conditional Text Generation",
|
18 |
+
"type": "hf_automodel_finetuned_mstctg",
|
19 |
+
"module_path": "hmv_cfg_base_stage3.model2",
|
20 |
+
"hf_location": "tachygraphy-microtrext-norm-org/ProphetNet_ForCondGen_Uncased_Large_HFTSeq2Seq_Batch4_ngram3",
|
21 |
+
"tokenizer_class": "ProphetNetTokenizer",
|
22 |
+
"model_class": "ProphetNetForConditionalGeneration",
|
23 |
+
"problem_type": "text_transformamtion_and_normalization",
|
24 |
+
"base_model": "microsoft/prophetnet-large-uncased",
|
25 |
+
"base_model_class": "ProphetNetForConditionalGeneration",
|
26 |
+
"device": "cpu",
|
27 |
+
"max_top_k": 32128,
|
28 |
+
"load_function": "load_model",
|
29 |
+
"predict_function": "predict"
|
30 |
+
}
|
31 |
+
}
|
32 |
+
|
33 |
+
model_key = "2"
|
34 |
+
model_info = MODEL_OPTIONS[model_key]
|
35 |
+
hf_location = model_info["hf_location"]
|
36 |
+
|
37 |
+
tokenizer_class = globals()[model_info["tokenizer_class"]]
|
38 |
+
model_class = globals()[model_info["model_class"]]
|
39 |
+
|
40 |
+
|
41 |
+
@st.cache_resource
|
42 |
+
def load_model():
|
43 |
+
tokenizer = tokenizer_class.from_pretrained(hf_location)
|
44 |
+
print("Loading model 2")
|
45 |
+
model = model_class.from_pretrained(hf_location,
|
46 |
+
# device_map=torch.device(
|
47 |
+
# "cuda" if torch.cuda.is_available() else "cpu")
|
48 |
+
)
|
49 |
+
print("Model 2 loaded")
|
50 |
+
|
51 |
+
return model, tokenizer
|
52 |
+
|
53 |
+
|
54 |
+
def predict(
|
55 |
+
model, tokenizer, text, device,
|
56 |
+
num_return_sequences=1,
|
57 |
+
beams=None, # Beam search
|
58 |
+
do_sample=False, # Sampling flag
|
59 |
+
temp=None, # Temperature (only for sampling)
|
60 |
+
top_p=None,
|
61 |
+
top_k=None,
|
62 |
+
max_new_tokens=1024,
|
63 |
+
early_stopping=True
|
64 |
+
):
|
65 |
+
# Tokenize input
|
66 |
+
padded = tokenizer(text, return_tensors='pt', truncation=False, padding=True).to(device)
|
67 |
+
input_ids = padded['input_ids'].to(device)
|
68 |
+
attention_mask = padded['attention_mask'].to(device)
|
69 |
+
|
70 |
+
# Validate arguments
|
71 |
+
if beams is not None and do_sample:
|
72 |
+
raise ValueError("Cannot use `beams` and `do_sample=True` together. Choose either beam search (`beams=5`) or sampling (`do_sample=True, temp=0.7`).")
|
73 |
+
|
74 |
+
if temp is not None and not do_sample:
|
75 |
+
raise ValueError("`temp` (temperature) can only be used in sampling mode (`do_sample=True`).")
|
76 |
+
|
77 |
+
if (top_p is not None or top_k is not None) and not do_sample:
|
78 |
+
raise ValueError("`top_p` and `top_k` can only be used in sampling mode (`do_sample=True`).")
|
79 |
+
|
80 |
+
# Beam search (Deterministic)
|
81 |
+
if beams is not None:
|
82 |
+
outputs = model.generate(
|
83 |
+
input_ids=input_ids,
|
84 |
+
attention_mask=attention_mask,
|
85 |
+
max_new_tokens=max_new_tokens,
|
86 |
+
num_return_sequences=num_return_sequences,
|
87 |
+
num_beams=beams,
|
88 |
+
early_stopping=early_stopping,
|
89 |
+
do_sample=False # No randomness
|
90 |
+
)
|
91 |
+
|
92 |
+
# Sampling Cases
|
93 |
+
else:
|
94 |
+
generate_args = {
|
95 |
+
"input_ids": input_ids,
|
96 |
+
"attention_mask": attention_mask,
|
97 |
+
"max_new_tokens": max_new_tokens,
|
98 |
+
"num_return_sequences": num_return_sequences,
|
99 |
+
"do_sample": True, # Enable stochastic sampling
|
100 |
+
"temperature": temp if temp is not None else 0.7, # Default temp if not passed
|
101 |
+
}
|
102 |
+
|
103 |
+
# Add `top_p` if set
|
104 |
+
if top_p is not None:
|
105 |
+
generate_args["top_p"] = top_p
|
106 |
+
|
107 |
+
# Add `top_k` if set
|
108 |
+
if top_k is not None:
|
109 |
+
generate_args["top_k"] = top_k
|
110 |
+
|
111 |
+
# Generate
|
112 |
+
outputs = model.generate(**generate_args)
|
113 |
+
|
114 |
+
# Decode predictions into human-readable text
|
115 |
+
predictions = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
116 |
+
|
117 |
+
return predictions
|
transformation_and_Normalization/hmv_cfg_base_stage3/model3.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import T5Tokenizer, T5ForConditionalGeneration, AutoTokenizer, AutoModelForSequenceClassification, AutoModel
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from imports import *
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
|
9 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))
|
10 |
+
|
11 |
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
12 |
+
CONFIG_STAGE3 = os.path.join(BASE_DIR, "..", "config", "stage3_models.json")
|
13 |
+
|
14 |
+
|
15 |
+
MODEL_OPTIONS = {
|
16 |
+
"3": {
|
17 |
+
"name": "Google T5 v1.1 Base for Conditional Text Generation",
|
18 |
+
"type": "hf_automodel_finetuned_gt5tctg",
|
19 |
+
"module_path": "hmv_cfg_base_stage3.model3",
|
20 |
+
"hf_location": "tachygraphy-microtrext-norm-org/T5-1.1-HF-seq2seq-Trainer-Batch4",
|
21 |
+
"tokenizer_class": "T5Tokenizer",
|
22 |
+
"model_class": "T5ForConditionalGeneration",
|
23 |
+
"problem_type": "text_transformamtion_and_normalization",
|
24 |
+
"base_model": "google/t5-v1_1-base",
|
25 |
+
"base_model_class": "T5ForConditionalGeneration",
|
26 |
+
"device": "cpu",
|
27 |
+
"max_top_k": 32128,
|
28 |
+
"load_function": "load_model",
|
29 |
+
"predict_function": "predict"
|
30 |
+
}
|
31 |
+
}
|
32 |
+
|
33 |
+
model_key = "3"
|
34 |
+
model_info = MODEL_OPTIONS[model_key]
|
35 |
+
hf_location = model_info["hf_location"]
|
36 |
+
|
37 |
+
tokenizer_class = globals()[model_info["tokenizer_class"]]
|
38 |
+
model_class = globals()[model_info["model_class"]]
|
39 |
+
|
40 |
+
|
41 |
+
@st.cache_resource
|
42 |
+
def load_model():
|
43 |
+
tokenizer = tokenizer_class.from_pretrained(hf_location)
|
44 |
+
print("Loading model 3")
|
45 |
+
model = model_class.from_pretrained(hf_location,
|
46 |
+
# device_map=torch.device(
|
47 |
+
# "cuda" if torch.cuda.is_available() else "cpu")
|
48 |
+
)
|
49 |
+
print("Model 3 loaded")
|
50 |
+
|
51 |
+
return model, tokenizer
|
52 |
+
|
53 |
+
|
54 |
+
def predict(
|
55 |
+
model, tokenizer, text, device,
|
56 |
+
num_return_sequences=1,
|
57 |
+
beams=None, # Beam search
|
58 |
+
do_sample=False, # Sampling flag
|
59 |
+
temp=None, # Temperature (only for sampling)
|
60 |
+
top_p=None,
|
61 |
+
top_k=None,
|
62 |
+
max_new_tokens=1024,
|
63 |
+
early_stopping=True
|
64 |
+
):
|
65 |
+
# Tokenize input
|
66 |
+
padded = tokenizer(text, return_tensors='pt', truncation=False, padding=True).to(device)
|
67 |
+
input_ids = padded['input_ids'].to(device)
|
68 |
+
attention_mask = padded['attention_mask'].to(device)
|
69 |
+
|
70 |
+
# Validate arguments
|
71 |
+
if beams is not None and do_sample:
|
72 |
+
raise ValueError("Cannot use `beams` and `do_sample=True` together. Choose either beam search (`beams=5`) or sampling (`do_sample=True, temp=0.7`).")
|
73 |
+
|
74 |
+
if temp is not None and not do_sample:
|
75 |
+
raise ValueError("`temp` (temperature) can only be used in sampling mode (`do_sample=True`).")
|
76 |
+
|
77 |
+
if (top_p is not None or top_k is not None) and not do_sample:
|
78 |
+
raise ValueError("`top_p` and `top_k` can only be used in sampling mode (`do_sample=True`).")
|
79 |
+
|
80 |
+
# Beam search (Deterministic)
|
81 |
+
if beams is not None:
|
82 |
+
outputs = model.generate(
|
83 |
+
input_ids=input_ids,
|
84 |
+
attention_mask=attention_mask,
|
85 |
+
max_new_tokens=max_new_tokens,
|
86 |
+
num_return_sequences=num_return_sequences,
|
87 |
+
num_beams=beams,
|
88 |
+
early_stopping=early_stopping,
|
89 |
+
do_sample=False # No randomness
|
90 |
+
)
|
91 |
+
|
92 |
+
# Sampling Cases
|
93 |
+
else:
|
94 |
+
generate_args = {
|
95 |
+
"input_ids": input_ids,
|
96 |
+
"attention_mask": attention_mask,
|
97 |
+
"max_new_tokens": max_new_tokens,
|
98 |
+
"num_return_sequences": num_return_sequences,
|
99 |
+
"do_sample": True, # Enable stochastic sampling
|
100 |
+
"temperature": temp if temp is not None else 0.7, # Default temp if not passed
|
101 |
+
}
|
102 |
+
|
103 |
+
# Add `top_p` if set
|
104 |
+
if top_p is not None:
|
105 |
+
generate_args["top_p"] = top_p
|
106 |
+
|
107 |
+
# Add `top_k` if set
|
108 |
+
if top_k is not None:
|
109 |
+
generate_args["top_k"] = top_k
|
110 |
+
|
111 |
+
# Generate
|
112 |
+
outputs = model.generate(**generate_args)
|
113 |
+
|
114 |
+
# Decode predictions into human-readable text
|
115 |
+
predictions = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
116 |
+
|
117 |
+
return predictions
|
transformation_and_Normalization/transformationNormalization_main.py
CHANGED
@@ -224,6 +224,12 @@ def transform_and_normalize():
|
|
224 |
# No cache clearing here—only in the model change callback!
|
225 |
|
226 |
# st.write(st.session_state)
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
|
228 |
if "top_k" not in st.session_state:
|
229 |
st.session_state.top_k = 50
|
@@ -242,12 +248,12 @@ def transform_and_normalize():
|
|
242 |
|
243 |
# Model selection with change detection; clearing cache happens in on_model_change()
|
244 |
selected_model = st.selectbox(
|
245 |
-
"Choose a model:", model_names, key="
|
246 |
)
|
247 |
|
248 |
# Text input with change detection
|
249 |
user_input = st.text_input(
|
250 |
-
"Enter text for emotions mood-tag analysis:", key="
|
251 |
)
|
252 |
|
253 |
st.markdown("#### Generation Parameters")
|
@@ -321,7 +327,7 @@ def transform_and_normalize():
|
|
321 |
user_input_copy = user_input
|
322 |
|
323 |
current_time = time.time()
|
324 |
-
if user_input.strip() and (current_time - st.session_state.last_change >= 1.
|
325 |
st.session_state.last_processed_input = user_input
|
326 |
|
327 |
progress_bar = st.progress(0)
|
@@ -348,11 +354,11 @@ def transform_and_normalize():
|
|
348 |
update_progress(progress_bar, 10, 100)
|
349 |
|
350 |
if len(predictions) > 1:
|
351 |
-
st.write("###
|
352 |
for i, pred in enumerate(predictions, start=1):
|
353 |
-
st.markdown(f"**Sequence {i}:** {pred}")
|
354 |
else:
|
355 |
-
st.write("###
|
356 |
st.write(predictions[0])
|
357 |
progress_bar.empty()
|
358 |
# else:
|
|
|
224 |
# No cache clearing here—only in the model change callback!
|
225 |
|
226 |
# st.write(st.session_state)
|
227 |
+
|
228 |
+
if "last_change" not in st.session_state:
|
229 |
+
st.session_state.last_change = time.time()
|
230 |
+
if "auto_predict_triggered" not in st.session_state:
|
231 |
+
st.session_state.auto_predict_triggered = False
|
232 |
+
|
233 |
|
234 |
if "top_k" not in st.session_state:
|
235 |
st.session_state.top_k = 50
|
|
|
248 |
|
249 |
# Model selection with change detection; clearing cache happens in on_model_change()
|
250 |
selected_model = st.selectbox(
|
251 |
+
"Choose a model:", model_names, key="selected_model_stage3", on_change=on_model_change
|
252 |
)
|
253 |
|
254 |
# Text input with change detection
|
255 |
user_input = st.text_input(
|
256 |
+
"Enter text for emotions mood-tag analysis:", key="user_input_stage3", on_change=on_text_change
|
257 |
)
|
258 |
|
259 |
st.markdown("#### Generation Parameters")
|
|
|
327 |
user_input_copy = user_input
|
328 |
|
329 |
current_time = time.time()
|
330 |
+
if user_input.strip() and (current_time - st.session_state.last_change >= 1.25):
|
331 |
st.session_state.last_processed_input = user_input
|
332 |
|
333 |
progress_bar = st.progress(0)
|
|
|
354 |
update_progress(progress_bar, 10, 100)
|
355 |
|
356 |
if len(predictions) > 1:
|
357 |
+
st.write("### Predictions:")
|
358 |
for i, pred in enumerate(predictions, start=1):
|
359 |
+
st.markdown(f"**Prediction Sequence {i}:** {pred}")
|
360 |
else:
|
361 |
+
st.write("### Predicted Sequence:")
|
362 |
st.write(predictions[0])
|
363 |
progress_bar.empty()
|
364 |
# else:
|