Archisman Karmakar
commited on
Commit
·
912c816
1
Parent(s):
ff1e899
URL Endpoint update
Browse files- .github/workflows/deploy_to_HF_space_DIRECT.yml +5 -5
- .github/workflows/dfploy_to_HF_space_DOCKER +2 -2
- dashboard.py +2 -2
- emotionMoodtag_analysis/config/stage2_models.json +2 -2
- emotionMoodtag_analysis/hmv_cfg_base_stage2/model1.py +1 -1
- emotionMoodtag_analysis/hmv_cfg_base_stage2/model2.py +1 -1
- sentimentPolarity_analysis/config/stage1_models.json +4 -4
- sentimentPolarity_analysis/hmv_cfg_base_stage1/model1.py +1 -1
- sentimentPolarity_analysis/hmv_cfg_base_stage1/model2.py +1 -1
- sentimentPolarity_analysis/hmv_cfg_base_stage1/model3.py +1 -1
- sentimentPolarity_analysis/hmv_cfg_base_stage1/model4.py +1 -1
- transformation_and_Normalization/config/stage3_models.json +3 -3
- transformation_and_Normalization/hmv_cfg_base_stage3/model1.py +1 -1
- transformation_and_Normalization/hmv_cfg_base_stage3/model2.py +1 -1
- transformation_and_Normalization/hmv_cfg_base_stage3/model3.py +1 -1
.github/workflows/deploy_to_HF_space_DIRECT.yml
CHANGED
@@ -76,8 +76,8 @@ jobs:
|
|
76 |
env:
|
77 |
HF_READ_WRITE_TOKEN: ${{ secrets.HF_READ_WRITE_TOKEN }}
|
78 |
run: |
|
79 |
-
git remote add space https://huggingface.co/spaces/tachygraphy-
|
80 |
-
git push --force https://${{ secrets.HF_USERNAME }}:${{ secrets.HF_READ_WRITE_TOKEN }}@huggingface.co/spaces/tachygraphy-
|
81 |
|
82 |
|
83 |
|
@@ -214,7 +214,7 @@ jobs:
|
|
214 |
|
215 |
# - name: Clone Hugging Face Space repository
|
216 |
# run: |
|
217 |
-
# git clone https://HF_USERNAME:${{ secrets.HF_TOKEN }}@huggingface.co/spaces/tachygraphy-
|
218 |
|
219 |
# - name: Copy repository files to HF Space
|
220 |
# run: |
|
@@ -227,7 +227,7 @@ jobs:
|
|
227 |
# # run: |
|
228 |
# # cd hf-space
|
229 |
# # git init
|
230 |
-
# # git remote add origin https://huggingface.co/spaces/tachygraphy-
|
231 |
# # git checkout -b main
|
232 |
# # git add .
|
233 |
# # git commit -m "Update deployment via GitHub Actions"
|
@@ -240,7 +240,7 @@ jobs:
|
|
240 |
# git init
|
241 |
# # Remove existing origin if it exists
|
242 |
# git remote remove origin || true
|
243 |
-
# git remote add origin https://huggingface.co/spaces/tachygraphy-
|
244 |
# git checkout -b main
|
245 |
# git add .
|
246 |
# git commit -m "Update deployment via GitHub Actions"
|
|
|
76 |
env:
|
77 |
HF_READ_WRITE_TOKEN: ${{ secrets.HF_READ_WRITE_TOKEN }}
|
78 |
run: |
|
79 |
+
git remote add space https://huggingface.co/spaces/tachygraphy-microtext-normalization-iemk/Tachygraphy-Microtext-Analysis-and-Normalization-ArchismanCoder
|
80 |
+
git push --force https://${{ secrets.HF_USERNAME }}:${{ secrets.HF_READ_WRITE_TOKEN }}@huggingface.co/spaces/tachygraphy-microtext-normalization-iemk/Tachygraphy-Microtext-Analysis-and-Normalization-ArchismanCoder
|
81 |
|
82 |
|
83 |
|
|
|
214 |
|
215 |
# - name: Clone Hugging Face Space repository
|
216 |
# run: |
|
217 |
+
# git clone https://HF_USERNAME:${{ secrets.HF_TOKEN }}@huggingface.co/spaces/tachygraphy-microtext-normalization-iemk/Tachygraphy-Microtext-Analysis-and-Normalization-ArchismanCoder hf-space
|
218 |
|
219 |
# - name: Copy repository files to HF Space
|
220 |
# run: |
|
|
|
227 |
# # run: |
|
228 |
# # cd hf-space
|
229 |
# # git init
|
230 |
+
# # git remote add origin https://huggingface.co/spaces/tachygraphy-microtext-normalization-iemk/Tachygraphy-Microtext-Analysis-and-Normalization-ArchismanCoder
|
231 |
# # git checkout -b main
|
232 |
# # git add .
|
233 |
# # git commit -m "Update deployment via GitHub Actions"
|
|
|
240 |
# git init
|
241 |
# # Remove existing origin if it exists
|
242 |
# git remote remove origin || true
|
243 |
+
# git remote add origin https://huggingface.co/spaces/tachygraphy-microtext-normalization-iemk/Tachygraphy-Microtext-Analysis-and-Normalization-ArchismanCoder
|
244 |
# git checkout -b main
|
245 |
# git add .
|
246 |
# git commit -m "Update deployment via GitHub Actions"
|
.github/workflows/dfploy_to_HF_space_DOCKER
CHANGED
@@ -28,7 +28,7 @@ jobs:
|
|
28 |
|
29 |
|
30 |
- name: Build the Docker image
|
31 |
-
run: docker build -t huggingface.co/spaces/tachygraphy-
|
32 |
|
33 |
- name: Push the Docker image to Hugging Face
|
34 |
-
run: docker push huggingface.co/spaces/tachygraphy-
|
|
|
28 |
|
29 |
|
30 |
- name: Build the Docker image
|
31 |
+
run: docker build -t huggingface.co/spaces/tachygraphy-microtext-normalization-iemk/Tachygraphy-Microtext-Analysis-and-Normalization-ArchismanCoder .
|
32 |
|
33 |
- name: Push the Docker image to Hugging Face
|
34 |
+
run: docker push huggingface.co/spaces/tachygraphy-microtext-normalization-iemk/Tachygraphy-Microtext-Analysis-and-Normalization-ArchismanCoder
|
dashboard.py
CHANGED
@@ -90,10 +90,10 @@ def show_dashboard():
|
|
90 |
st.write("""
|
91 |
- Training Source: [GitHub @ Tachygraphy Micro-text Analysis & Normalization](https://github.com/ArchismanKarmakar/Tachygraphy-Microtext-Analysis-And-Normalization)
|
92 |
- Kaggle Collections: [Kaggle @ Tachygraphy Micro-text Analysis & Normalization](https://www.kaggle.com/datasets/archismancoder/dataset-tachygraphy/data?select=Tachygraphy_MicroText-AIO-V3.xlsx)
|
93 |
-
- Hugging Face Org: [Hugging Face @ Tachygraphy Micro-text Analysis & Normalization](https://huggingface.co/tachygraphy-
|
94 |
- Deployment Source: [GitHub](https://github.com/ArchismanKarmakar/Tachygraphy-Microtext-Analysis-And-Normalization-Deployment-Source-HuggingFace_Streamlit_JPX14032025)
|
95 |
- Streamlit Deployemnt: [Streamlit](https://tachygraphy-microtext.streamlit.app/)
|
96 |
-
- Hugging Face Space Deployment: [Hugging Face Space](https://huggingface.co/spaces/tachygraphy-
|
97 |
""")
|
98 |
|
99 |
create_footer()
|
|
|
90 |
st.write("""
|
91 |
- Training Source: [GitHub @ Tachygraphy Micro-text Analysis & Normalization](https://github.com/ArchismanKarmakar/Tachygraphy-Microtext-Analysis-And-Normalization)
|
92 |
- Kaggle Collections: [Kaggle @ Tachygraphy Micro-text Analysis & Normalization](https://www.kaggle.com/datasets/archismancoder/dataset-tachygraphy/data?select=Tachygraphy_MicroText-AIO-V3.xlsx)
|
93 |
+
- Hugging Face Org: [Hugging Face @ Tachygraphy Micro-text Analysis & Normalization](https://huggingface.co/tachygraphy-microtext-normalization-iemk)
|
94 |
- Deployment Source: [GitHub](https://github.com/ArchismanKarmakar/Tachygraphy-Microtext-Analysis-And-Normalization-Deployment-Source-HuggingFace_Streamlit_JPX14032025)
|
95 |
- Streamlit Deployemnt: [Streamlit](https://tachygraphy-microtext.streamlit.app/)
|
96 |
+
- Hugging Face Space Deployment: [Hugging Face Space](https://huggingface.co/spaces/tachygraphy-microtext-normalization-iemk/Tachygraphy-Microtext-Analysis-and-Normalization-ArchismanCoder)
|
97 |
""")
|
98 |
|
99 |
create_footer()
|
emotionMoodtag_analysis/config/stage2_models.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"name": "DeBERTa v3 Base for Sequence Classification",
|
4 |
"type": "hf_automodel_finetuned_dbt3",
|
5 |
"module_path": "hmv_cfg_base_stage2.model1",
|
6 |
-
"hf_location": "tachygraphy-
|
7 |
"tokenizer_class": "DebertaV2Tokenizer",
|
8 |
"model_class": "DebertaV2ForSequenceClassification",
|
9 |
"problem_type": "regression",
|
@@ -18,7 +18,7 @@
|
|
18 |
"name": "DeBERTa v3 Base Custom Model with minimal Regularized Loss",
|
19 |
"type": "db3_base_custom",
|
20 |
"module_path": "hmv_cfg_base_stage2.model2",
|
21 |
-
"hf_location": "tachygraphy-
|
22 |
"tokenizer_class": "DebertaV2Tokenizer",
|
23 |
"model_class": "EmotionModel",
|
24 |
"problem_type": "regression",
|
|
|
3 |
"name": "DeBERTa v3 Base for Sequence Classification",
|
4 |
"type": "hf_automodel_finetuned_dbt3",
|
5 |
"module_path": "hmv_cfg_base_stage2.model1",
|
6 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/DeBERTa-v3-seqClassfication-LV2-EmotionMoodtags-Batch8",
|
7 |
"tokenizer_class": "DebertaV2Tokenizer",
|
8 |
"model_class": "DebertaV2ForSequenceClassification",
|
9 |
"problem_type": "regression",
|
|
|
18 |
"name": "DeBERTa v3 Base Custom Model with minimal Regularized Loss",
|
19 |
"type": "db3_base_custom",
|
20 |
"module_path": "hmv_cfg_base_stage2.model2",
|
21 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/DeBERTa-v3-Base-Cust-LV2-EmotionMoodtags-minRegLoss",
|
22 |
"tokenizer_class": "DebertaV2Tokenizer",
|
23 |
"model_class": "EmotionModel",
|
24 |
"problem_type": "regression",
|
emotionMoodtag_analysis/hmv_cfg_base_stage2/model1.py
CHANGED
@@ -19,7 +19,7 @@ MODEL_OPTIONS = {
|
|
19 |
"name": "DeBERTa v3 Base for Sequence Classification",
|
20 |
"type": "hf_automodel_finetuned_dbt3",
|
21 |
"module_path": "hmv_cfg_base_stage2.model1",
|
22 |
-
"hf_location": "tachygraphy-
|
23 |
"tokenizer_class": "DebertaV2Tokenizer",
|
24 |
"model_class": "DebertaV2ForSequenceClassification",
|
25 |
"problem_type": "regression",
|
|
|
19 |
"name": "DeBERTa v3 Base for Sequence Classification",
|
20 |
"type": "hf_automodel_finetuned_dbt3",
|
21 |
"module_path": "hmv_cfg_base_stage2.model1",
|
22 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/DeBERTa-v3-seqClassfication-LV2-EmotionMoodtags-Batch8",
|
23 |
"tokenizer_class": "DebertaV2Tokenizer",
|
24 |
"model_class": "DebertaV2ForSequenceClassification",
|
25 |
"problem_type": "regression",
|
emotionMoodtag_analysis/hmv_cfg_base_stage2/model2.py
CHANGED
@@ -25,7 +25,7 @@ MODEL_OPTIONS = {
|
|
25 |
"name": "DeBERTa v3 Base Custom Model with minimal Regularized Loss",
|
26 |
"type": "db3_base_custom",
|
27 |
"module_path": "hmv_cfg_base_stage2.model2",
|
28 |
-
"hf_location": "tachygraphy-
|
29 |
"tokenizer_class": "DebertaV2Tokenizer",
|
30 |
"model_class": "EmotionModel",
|
31 |
"problem_type": "regression",
|
|
|
25 |
"name": "DeBERTa v3 Base Custom Model with minimal Regularized Loss",
|
26 |
"type": "db3_base_custom",
|
27 |
"module_path": "hmv_cfg_base_stage2.model2",
|
28 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/DeBERTa-v3-Base-Cust-LV2-EmotionMoodtags-minRegLoss",
|
29 |
"tokenizer_class": "DebertaV2Tokenizer",
|
30 |
"model_class": "EmotionModel",
|
31 |
"problem_type": "regression",
|
sentimentPolarity_analysis/config/stage1_models.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"name": "DeBERTa v3 Base for Sequence Classification",
|
4 |
"type": "hf_automodel_finetuned_dbt3",
|
5 |
"module_path": "hmv_cfg_base_stage1.model1",
|
6 |
-
"hf_location": "tachygraphy-
|
7 |
"tokenizer_class": "DebertaV2Tokenizer",
|
8 |
"model_class": "DebertaV2ForSequenceClassification",
|
9 |
"problem_type": "multi_label_classification",
|
@@ -18,7 +18,7 @@
|
|
18 |
"name": "DeBERTa v3 Base Custom Model with minimal Regularized Loss",
|
19 |
"type": "db3_base_custom",
|
20 |
"module_path": "hmv_cfg_base_stage1.model2",
|
21 |
-
"hf_location": "tachygraphy-
|
22 |
"tokenizer_class": "DebertaV2Tokenizer",
|
23 |
"model_class": "SentimentModel",
|
24 |
"problem_type": "multi_label_classification",
|
@@ -33,7 +33,7 @@
|
|
33 |
"name": "BERT Base Uncased Custom Model",
|
34 |
"type": "bert_base_uncased_custom",
|
35 |
"module_path": "hmv_cfg_base_stage1.model3",
|
36 |
-
"hf_location": "https://huggingface.co/tachygraphy-
|
37 |
"tokenizer_class": "AutoTokenizer",
|
38 |
"model_class": "BERT_architecture",
|
39 |
"problem_type": "multi_label_classification",
|
@@ -48,7 +48,7 @@
|
|
48 |
"name": "LSTM Custom Model",
|
49 |
"type": "lstm_uncased_custom",
|
50 |
"module_path": "hmv_cfg_base_stage1.model4",
|
51 |
-
"hf_location": "tachygraphy-
|
52 |
"tokenizer_class": "",
|
53 |
"model_class": "",
|
54 |
"problem_type": "multi_label_classification",
|
|
|
3 |
"name": "DeBERTa v3 Base for Sequence Classification",
|
4 |
"type": "hf_automodel_finetuned_dbt3",
|
5 |
"module_path": "hmv_cfg_base_stage1.model1",
|
6 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/DeBERTa-v3-seqClassfication-LV1-SentimentPolarities-Batch8",
|
7 |
"tokenizer_class": "DebertaV2Tokenizer",
|
8 |
"model_class": "DebertaV2ForSequenceClassification",
|
9 |
"problem_type": "multi_label_classification",
|
|
|
18 |
"name": "DeBERTa v3 Base Custom Model with minimal Regularized Loss",
|
19 |
"type": "db3_base_custom",
|
20 |
"module_path": "hmv_cfg_base_stage1.model2",
|
21 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/DeBERTa-v3-Base-Cust-LV1-SentimentPolarities-minRegLoss",
|
22 |
"tokenizer_class": "DebertaV2Tokenizer",
|
23 |
"model_class": "SentimentModel",
|
24 |
"problem_type": "multi_label_classification",
|
|
|
33 |
"name": "BERT Base Uncased Custom Model",
|
34 |
"type": "bert_base_uncased_custom",
|
35 |
"module_path": "hmv_cfg_base_stage1.model3",
|
36 |
+
"hf_location": "https://huggingface.co/tachygraphy-microtext-normalization-iemk/BERT-LV1-SentimentPolarities/resolve/main/saved_weights.pt",
|
37 |
"tokenizer_class": "AutoTokenizer",
|
38 |
"model_class": "BERT_architecture",
|
39 |
"problem_type": "multi_label_classification",
|
|
|
48 |
"name": "LSTM Custom Model",
|
49 |
"type": "lstm_uncased_custom",
|
50 |
"module_path": "hmv_cfg_base_stage1.model4",
|
51 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/LSTM-LV1-SentimentPolarities",
|
52 |
"tokenizer_class": "",
|
53 |
"model_class": "",
|
54 |
"problem_type": "multi_label_classification",
|
sentimentPolarity_analysis/hmv_cfg_base_stage1/model1.py
CHANGED
@@ -19,7 +19,7 @@ MODEL_OPTIONS = {
|
|
19 |
"name": "DeBERTa v3 Base for Sequence Classification",
|
20 |
"type": "hf_automodel_finetuned_dbt3",
|
21 |
"module_path": "hmv_cfg_base_stage1.model1",
|
22 |
-
"hf_location": "tachygraphy-
|
23 |
"tokenizer_class": "DebertaV2Tokenizer",
|
24 |
"model_class": "DebertaV2ForSequenceClassification",
|
25 |
"problem_type": "multi_label_classification",
|
|
|
19 |
"name": "DeBERTa v3 Base for Sequence Classification",
|
20 |
"type": "hf_automodel_finetuned_dbt3",
|
21 |
"module_path": "hmv_cfg_base_stage1.model1",
|
22 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/DeBERTa-v3-seqClassfication-LV1-SentimentPolarities-Batch8",
|
23 |
"tokenizer_class": "DebertaV2Tokenizer",
|
24 |
"model_class": "DebertaV2ForSequenceClassification",
|
25 |
"problem_type": "multi_label_classification",
|
sentimentPolarity_analysis/hmv_cfg_base_stage1/model2.py
CHANGED
@@ -27,7 +27,7 @@ MODEL_OPTIONS = {
|
|
27 |
"name": "DeBERTa v3 Base Custom Model with minimal Regularized Loss",
|
28 |
"type": "db3_base_custom",
|
29 |
"module_path": "hmv_cfg_base_stage1.model2",
|
30 |
-
"hf_location": "tachygraphy-
|
31 |
"tokenizer_class": "DebertaV2Tokenizer",
|
32 |
"model_class": "SentimentModel",
|
33 |
"problem_type": "multi_label_classification",
|
|
|
27 |
"name": "DeBERTa v3 Base Custom Model with minimal Regularized Loss",
|
28 |
"type": "db3_base_custom",
|
29 |
"module_path": "hmv_cfg_base_stage1.model2",
|
30 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/DeBERTa-v3-Base-Cust-LV1-SentimentPolarities-minRegLoss",
|
31 |
"tokenizer_class": "DebertaV2Tokenizer",
|
32 |
"model_class": "SentimentModel",
|
33 |
"problem_type": "multi_label_classification",
|
sentimentPolarity_analysis/hmv_cfg_base_stage1/model3.py
CHANGED
@@ -29,7 +29,7 @@ MODEL_OPTIONS = {
|
|
29 |
"name": "BERT Base Uncased Custom Model",
|
30 |
"type": "bert_base_uncased_custom",
|
31 |
"module_path": "hmv_cfg_base_stage1.model3",
|
32 |
-
"hf_location": "https://huggingface.co/tachygraphy-
|
33 |
"tokenizer_class": "AutoTokenizer",
|
34 |
"model_class": "BERT_architecture",
|
35 |
"problem_type": "multi_label_classification",
|
|
|
29 |
"name": "BERT Base Uncased Custom Model",
|
30 |
"type": "bert_base_uncased_custom",
|
31 |
"module_path": "hmv_cfg_base_stage1.model3",
|
32 |
+
"hf_location": "https://huggingface.co/tachygraphy-microtext-normalization-iemk/BERT-LV1-SentimentPolarities/resolve/main/saved_weights.pt",
|
33 |
"tokenizer_class": "AutoTokenizer",
|
34 |
"model_class": "BERT_architecture",
|
35 |
"problem_type": "multi_label_classification",
|
sentimentPolarity_analysis/hmv_cfg_base_stage1/model4.py
CHANGED
@@ -37,7 +37,7 @@ MODEL_OPTIONS = {
|
|
37 |
"name": "LSTM Custom Model",
|
38 |
"type": "lstm_uncased_custom",
|
39 |
"module_path": "hmv_cfg_base_stage1.model4",
|
40 |
-
"hf_location": "tachygraphy-
|
41 |
"tokenizer_class": "",
|
42 |
"model_class": "",
|
43 |
"problem_type": "multi_label_classification",
|
|
|
37 |
"name": "LSTM Custom Model",
|
38 |
"type": "lstm_uncased_custom",
|
39 |
"module_path": "hmv_cfg_base_stage1.model4",
|
40 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/LSTM-LV1-SentimentPolarities",
|
41 |
"tokenizer_class": "",
|
42 |
"model_class": "",
|
43 |
"problem_type": "multi_label_classification",
|
transformation_and_Normalization/config/stage3_models.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"name": "Facebook BART Base for Conditional Text Generation",
|
4 |
"type": "hf_automodel_finetuned_fbtctg",
|
5 |
"module_path": "hmv_cfg_base_stage3.model1",
|
6 |
-
"hf_location": "tachygraphy-
|
7 |
"tokenizer_class": "BartTokenizer",
|
8 |
"model_class": "BartForConditionalGeneration",
|
9 |
"problem_type": "text_transformamtion_and_normalization",
|
@@ -18,7 +18,7 @@
|
|
18 |
"name": "Microsoft Prophet Net Uncased Large for Conditional Text Generation",
|
19 |
"type": "hf_automodel_finetuned_mstctg",
|
20 |
"module_path": "hmv_cfg_base_stage3.model2",
|
21 |
-
"hf_location": "tachygraphy-
|
22 |
"tokenizer_class": "ProphetNetTokenizer",
|
23 |
"model_class": "ProphetNetForConditionalGeneration",
|
24 |
"problem_type": "text_transformamtion_and_normalization",
|
@@ -33,7 +33,7 @@
|
|
33 |
"name": "Google T5 v1.1 Base for Conditional Text Generation",
|
34 |
"type": "hf_automodel_finetuned_gt5tctg",
|
35 |
"module_path": "hmv_cfg_base_stage3.model3",
|
36 |
-
"hf_location": "tachygraphy-
|
37 |
"tokenizer_class": "T5Tokenizer",
|
38 |
"model_class": "T5ForConditionalGeneration",
|
39 |
"problem_type": "text_transformamtion_and_normalization",
|
|
|
3 |
"name": "Facebook BART Base for Conditional Text Generation",
|
4 |
"type": "hf_automodel_finetuned_fbtctg",
|
5 |
"module_path": "hmv_cfg_base_stage3.model1",
|
6 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/BART-base-HF-Seq2Seq-Trainer-Batch4",
|
7 |
"tokenizer_class": "BartTokenizer",
|
8 |
"model_class": "BartForConditionalGeneration",
|
9 |
"problem_type": "text_transformamtion_and_normalization",
|
|
|
18 |
"name": "Microsoft Prophet Net Uncased Large for Conditional Text Generation",
|
19 |
"type": "hf_automodel_finetuned_mstctg",
|
20 |
"module_path": "hmv_cfg_base_stage3.model2",
|
21 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/ProphetNet_ForCondGen_Uncased_Large_HFTSeq2Seq_Batch4_ngram3",
|
22 |
"tokenizer_class": "ProphetNetTokenizer",
|
23 |
"model_class": "ProphetNetForConditionalGeneration",
|
24 |
"problem_type": "text_transformamtion_and_normalization",
|
|
|
33 |
"name": "Google T5 v1.1 Base for Conditional Text Generation",
|
34 |
"type": "hf_automodel_finetuned_gt5tctg",
|
35 |
"module_path": "hmv_cfg_base_stage3.model3",
|
36 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/T5-1.1-HF-seq2seq-Trainer-Batch4",
|
37 |
"tokenizer_class": "T5Tokenizer",
|
38 |
"model_class": "T5ForConditionalGeneration",
|
39 |
"problem_type": "text_transformamtion_and_normalization",
|
transformation_and_Normalization/hmv_cfg_base_stage3/model1.py
CHANGED
@@ -17,7 +17,7 @@ MODEL_OPTIONS = {
|
|
17 |
"name": "Facebook BART Base for Conditional Text Generation",
|
18 |
"type": "hf_automodel_finetuned_fbtctg",
|
19 |
"module_path": "hmv_cfg_base_stage3.model1",
|
20 |
-
"hf_location": "tachygraphy-
|
21 |
"tokenizer_class": "BartTokenizer",
|
22 |
"model_class": "BartForConditionalGeneration",
|
23 |
"problem_type": "text_transformamtion_and_normalization",
|
|
|
17 |
"name": "Facebook BART Base for Conditional Text Generation",
|
18 |
"type": "hf_automodel_finetuned_fbtctg",
|
19 |
"module_path": "hmv_cfg_base_stage3.model1",
|
20 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/BART-base-HF-Seq2Seq-Trainer-Batch4",
|
21 |
"tokenizer_class": "BartTokenizer",
|
22 |
"model_class": "BartForConditionalGeneration",
|
23 |
"problem_type": "text_transformamtion_and_normalization",
|
transformation_and_Normalization/hmv_cfg_base_stage3/model2.py
CHANGED
@@ -17,7 +17,7 @@ MODEL_OPTIONS = {
|
|
17 |
"name": "Microsoft Prophet Net Uncased Large for Conditional Text Generation",
|
18 |
"type": "hf_automodel_finetuned_mstctg",
|
19 |
"module_path": "hmv_cfg_base_stage3.model2",
|
20 |
-
"hf_location": "tachygraphy-
|
21 |
"tokenizer_class": "ProphetNetTokenizer",
|
22 |
"model_class": "ProphetNetForConditionalGeneration",
|
23 |
"problem_type": "text_transformamtion_and_normalization",
|
|
|
17 |
"name": "Microsoft Prophet Net Uncased Large for Conditional Text Generation",
|
18 |
"type": "hf_automodel_finetuned_mstctg",
|
19 |
"module_path": "hmv_cfg_base_stage3.model2",
|
20 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/ProphetNet_ForCondGen_Uncased_Large_HFTSeq2Seq_Batch4_ngram3",
|
21 |
"tokenizer_class": "ProphetNetTokenizer",
|
22 |
"model_class": "ProphetNetForConditionalGeneration",
|
23 |
"problem_type": "text_transformamtion_and_normalization",
|
transformation_and_Normalization/hmv_cfg_base_stage3/model3.py
CHANGED
@@ -17,7 +17,7 @@ MODEL_OPTIONS = {
|
|
17 |
"name": "Google T5 v1.1 Base for Conditional Text Generation",
|
18 |
"type": "hf_automodel_finetuned_gt5tctg",
|
19 |
"module_path": "hmv_cfg_base_stage3.model3",
|
20 |
-
"hf_location": "tachygraphy-
|
21 |
"tokenizer_class": "T5Tokenizer",
|
22 |
"model_class": "T5ForConditionalGeneration",
|
23 |
"problem_type": "text_transformamtion_and_normalization",
|
|
|
17 |
"name": "Google T5 v1.1 Base for Conditional Text Generation",
|
18 |
"type": "hf_automodel_finetuned_gt5tctg",
|
19 |
"module_path": "hmv_cfg_base_stage3.model3",
|
20 |
+
"hf_location": "tachygraphy-microtext-normalization-iemk/T5-1.1-HF-seq2seq-Trainer-Batch4",
|
21 |
"tokenizer_class": "T5Tokenizer",
|
22 |
"model_class": "T5ForConditionalGeneration",
|
23 |
"problem_type": "text_transformamtion_and_normalization",
|