livinNector commited on
Commit
ae899ff
·
verified ·
1 Parent(s): 90ab499

End of training

Browse files
README.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: mit
4
+ base_model: microsoft/Multilingual-MiniLM-L12-H384
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ - f1
10
+ model-index:
11
+ - name: m-minilm-l12-h384-data-augumented-dra-tam-mal-aw-classification-finetune
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # m-minilm-l12-h384-data-augumented-dra-tam-mal-aw-classification-finetune
19
+
20
+ This model is a fine-tuned version of [microsoft/Multilingual-MiniLM-L12-H384](https://huggingface.co/microsoft/Multilingual-MiniLM-L12-H384) on the None dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 0.6411
23
+ - Accuracy: 0.7702
24
+ - F1: 0.8164
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 0.0001
44
+ - train_batch_size: 128
45
+ - eval_batch_size: 128
46
+ - seed: 42
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: linear
49
+ - num_epochs: 6
50
+
51
+ ### Training results
52
+
53
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
54
+ |:-------------:|:------:|:----:|:---------------:|:--------:|:------:|
55
+ | 0.6713 | 0.2222 | 20 | 0.6683 | 0.5998 | 0.7499 |
56
+ | 0.6494 | 0.4444 | 40 | 0.6559 | 0.6019 | 0.6459 |
57
+ | 0.6431 | 0.6667 | 60 | 0.6389 | 0.6508 | 0.7346 |
58
+ | 0.6079 | 0.8889 | 80 | 0.6318 | 0.6720 | 0.7166 |
59
+ | 0.5667 | 1.1111 | 100 | 0.5755 | 0.7021 | 0.7401 |
60
+ | 0.5353 | 1.3333 | 120 | 0.5437 | 0.7213 | 0.7927 |
61
+ | 0.5345 | 1.5556 | 140 | 0.5306 | 0.7482 | 0.7964 |
62
+ | 0.5178 | 1.7778 | 160 | 0.5366 | 0.7184 | 0.8031 |
63
+ | 0.4952 | 2.0 | 180 | 0.5046 | 0.7543 | 0.8050 |
64
+ | 0.4183 | 2.2222 | 200 | 0.5798 | 0.7278 | 0.7466 |
65
+ | 0.4257 | 2.4444 | 220 | 0.5373 | 0.7673 | 0.8075 |
66
+ | 0.3932 | 2.6667 | 240 | 0.5214 | 0.7665 | 0.8093 |
67
+ | 0.3914 | 2.8889 | 260 | 0.5125 | 0.7616 | 0.8133 |
68
+ | 0.3447 | 3.1111 | 280 | 0.5534 | 0.7653 | 0.8076 |
69
+ | 0.3122 | 3.3333 | 300 | 0.5874 | 0.7543 | 0.7901 |
70
+ | 0.3116 | 3.5556 | 320 | 0.5594 | 0.7649 | 0.8003 |
71
+ | 0.326 | 3.7778 | 340 | 0.5446 | 0.7661 | 0.8158 |
72
+ | 0.2979 | 4.0 | 360 | 0.5750 | 0.7681 | 0.8145 |
73
+ | 0.2457 | 4.2222 | 380 | 0.6121 | 0.7677 | 0.8140 |
74
+ | 0.2383 | 4.4444 | 400 | 0.5861 | 0.7689 | 0.8118 |
75
+ | 0.2396 | 4.6667 | 420 | 0.6161 | 0.7734 | 0.8156 |
76
+ | 0.2311 | 4.8889 | 440 | 0.5909 | 0.7751 | 0.8121 |
77
+ | 0.2139 | 5.1111 | 460 | 0.6411 | 0.7702 | 0.8164 |
78
+ | 0.2038 | 5.3333 | 480 | 0.6462 | 0.7718 | 0.8154 |
79
+ | 0.1884 | 5.5556 | 500 | 0.6443 | 0.7645 | 0.8043 |
80
+ | 0.1889 | 5.7778 | 520 | 0.6588 | 0.7665 | 0.8064 |
81
+ | 0.2081 | 6.0 | 540 | 0.6581 | 0.7665 | 0.8054 |
82
+
83
+
84
+ ### Framework versions
85
+
86
+ - Transformers 4.45.2
87
+ - Pytorch 2.5.1+cu121
88
+ - Datasets 3.2.0
89
+ - Tokenizers 0.20.3
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/Multilingual-MiniLM-L12-H384",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 384,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 1536,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "problem_type": "single_label_classification",
21
+ "tokenizer_class": "XLMRobertaTokenizer",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.45.2",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 250037
27
+ }
default/head_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": "gelu",
4
+ "bias": true,
5
+ "embedding_size": 768,
6
+ "head_type": "masked_lm",
7
+ "label2id": null,
8
+ "layer_norm": true,
9
+ "layers": 2,
10
+ "shift_labels": false,
11
+ "vocab_size": 250000
12
+ },
13
+ "hidden_size": 768,
14
+ "model_class": "BertAdapterModel",
15
+ "model_name": "ai4bharat/IndicBERTv2-MLM-only",
16
+ "model_type": "bert",
17
+ "name": "default",
18
+ "version": "adapters.1.0.1"
19
+ }
default/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81ade58fc9df9dbd31f6b4dfb0ec3bcafffbd5f20b8f19438fbadbddf6688352
3
+ size 771371254
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6a794d71ca6987b3aa2eb0fdec40bcef5f06a41eefc3f2010930ea9b7556022
3
+ size 470641664
tam_mal_ai_aw_classification_adapter/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "alpha": 8,
4
+ "architecture": "lora",
5
+ "attn_matrices": [
6
+ "q",
7
+ "v"
8
+ ],
9
+ "composition_mode": "add",
10
+ "dropout": 0.1,
11
+ "init_weights": "lora",
12
+ "intermediate_lora": false,
13
+ "leave_out": [],
14
+ "output_lora": false,
15
+ "r": 12,
16
+ "selfattn_lora": true,
17
+ "use_gating": false
18
+ },
19
+ "config_id": "a0c8452a4cfb970e",
20
+ "hidden_size": 768,
21
+ "model_class": "BertAdapterModel",
22
+ "model_name": "ai4bharat/IndicBERTv2-MLM-only",
23
+ "model_type": "bert",
24
+ "name": "tam_mal_ai_aw_classification_adapter",
25
+ "version": "adapters.1.0.1"
26
+ }
tam_mal_ai_aw_classification_adapter/pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b924cc8d8ea3bb7be73459cf3bc217f1dda9e5607a9be79faa037f4ca483f54
3
+ size 1788390
tam_mal_ai_aw_classification_head/head_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": "ReLU",
4
+ "bias": true,
5
+ "dropout_prob": null,
6
+ "head_type": "classification",
7
+ "label2id": {
8
+ "Abusive": 1,
9
+ "Non-Abusive": 0
10
+ },
11
+ "layers": 2,
12
+ "num_labels": 2,
13
+ "use_pooler": false
14
+ },
15
+ "hidden_size": 768,
16
+ "model_class": "BertAdapterModel",
17
+ "model_name": "ai4bharat/IndicBERTv2-MLM-only",
18
+ "model_type": "bert",
19
+ "name": "tam_mal_ai_aw_classification_head",
20
+ "version": "adapters.1.0.1"
21
+ }
tam_mal_ai_aw_classification_head/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e7d8c1397bae7f2d2c0043913999e01f2c1a6cf0c1f88a37c663f5ab7f3ae94
3
+ size 2370792
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94c38db30681f6d72f206d289bb9adf2bbabcf01b017565da663fe1deaa5e7ee
3
+ size 5368