Text Classification
Transformers
Safetensors
xlm-roberta
dardem commited on
Commit
4fb9074
·
verified ·
1 Parent(s): 926ad04

Upload 9 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ddda8a979a91c4d27e59e463f912e505efb5da2207b8b538231704aeff1f02f
3
+ size 4479472785
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b806d207a5e890dc2d4793ef8cb15b9980241c2a6a5833cc6c7199dab90289d4
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3d6b57964600db6fd639f5946057da47504071ec873597fb6423e307b0b4e9e
3
+ size 1064
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ffb37461c391f096759f4a9bbbc329da0f36952f88bab061fcf84940c022e98
3
+ size 17082999
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "model_max_length": 512,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "tokenizer_class": "XLMRobertaTokenizer",
54
+ "unk_token": "<unk>"
55
+ }
trainer_state.json ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2617274224758148,
3
+ "best_model_checkpoint": "xlm-roberta-large-all-full-finetuned-toxicity-classification/checkpoint-4016",
4
+ "epoch": 2.9987546699875467,
5
+ "eval_steps": 500,
6
+ "global_step": 6021,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24906600249066002,
13
+ "grad_norm": 13.982502937316895,
14
+ "learning_rate": 1.83391463212091e-05,
15
+ "loss": 0.4913,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.49813200498132004,
20
+ "grad_norm": 9.81984806060791,
21
+ "learning_rate": 1.6678292642418204e-05,
22
+ "loss": 0.3674,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.7471980074719801,
27
+ "grad_norm": 9.316424369812012,
28
+ "learning_rate": 1.5017438963627307e-05,
29
+ "loss": 0.3317,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.9962640099626401,
34
+ "grad_norm": 13.302652359008789,
35
+ "learning_rate": 1.3356585284836408e-05,
36
+ "loss": 0.3147,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 1.0,
41
+ "eval_loss": 0.2968192994594574,
42
+ "eval_runtime": 142.521,
43
+ "eval_samples_per_second": 50.091,
44
+ "eval_steps_per_second": 3.136,
45
+ "step": 2008
46
+ },
47
+ {
48
+ "epoch": 1.2450809464508095,
49
+ "grad_norm": 16.878368377685547,
50
+ "learning_rate": 1.1695731606045508e-05,
51
+ "loss": 0.2468,
52
+ "step": 2500
53
+ },
54
+ {
55
+ "epoch": 1.4941469489414696,
56
+ "grad_norm": 10.831304550170898,
57
+ "learning_rate": 1.0034877927254609e-05,
58
+ "loss": 0.237,
59
+ "step": 3000
60
+ },
61
+ {
62
+ "epoch": 1.7432129514321295,
63
+ "grad_norm": 21.390304565429688,
64
+ "learning_rate": 8.374024248463712e-06,
65
+ "loss": 0.2271,
66
+ "step": 3500
67
+ },
68
+ {
69
+ "epoch": 1.9922789539227894,
70
+ "grad_norm": 10.786124229431152,
71
+ "learning_rate": 6.7131705696728125e-06,
72
+ "loss": 0.2278,
73
+ "step": 4000
74
+ },
75
+ {
76
+ "epoch": 2.0,
77
+ "eval_loss": 0.2617274224758148,
78
+ "eval_runtime": 142.583,
79
+ "eval_samples_per_second": 50.069,
80
+ "eval_steps_per_second": 3.135,
81
+ "step": 4016
82
+ },
83
+ {
84
+ "epoch": 2.241095890410959,
85
+ "grad_norm": 10.759682655334473,
86
+ "learning_rate": 5.0523168908819146e-06,
87
+ "loss": 0.1553,
88
+ "step": 4500
89
+ },
90
+ {
91
+ "epoch": 2.490161892901619,
92
+ "grad_norm": 7.604818344116211,
93
+ "learning_rate": 3.391463212091015e-06,
94
+ "loss": 0.1573,
95
+ "step": 5000
96
+ },
97
+ {
98
+ "epoch": 2.739227895392279,
99
+ "grad_norm": 5.243312358856201,
100
+ "learning_rate": 1.7306095333001162e-06,
101
+ "loss": 0.1422,
102
+ "step": 5500
103
+ },
104
+ {
105
+ "epoch": 2.988293897882939,
106
+ "grad_norm": 11.100104331970215,
107
+ "learning_rate": 6.975585450921775e-08,
108
+ "loss": 0.1439,
109
+ "step": 6000
110
+ },
111
+ {
112
+ "epoch": 2.9987546699875467,
113
+ "eval_loss": 0.31470784544944763,
114
+ "eval_runtime": 142.6105,
115
+ "eval_samples_per_second": 50.059,
116
+ "eval_steps_per_second": 3.134,
117
+ "step": 6021
118
+ }
119
+ ],
120
+ "logging_steps": 500,
121
+ "max_steps": 6021,
122
+ "num_input_tokens_seen": 0,
123
+ "num_train_epochs": 3,
124
+ "save_steps": 500,
125
+ "stateful_callbacks": {
126
+ "TrainerControl": {
127
+ "args": {
128
+ "should_epoch_stop": false,
129
+ "should_evaluate": false,
130
+ "should_log": false,
131
+ "should_save": true,
132
+ "should_training_stop": true
133
+ },
134
+ "attributes": {}
135
+ }
136
+ },
137
+ "total_flos": 1.7951793851787264e+17,
138
+ "train_batch_size": 16,
139
+ "trial_name": null,
140
+ "trial_params": null
141
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ff0521c1a232f96fab6509f0075c296345eb44eeadab1b4bb03817792d3caa6
3
+ size 5432