Text Classification
Transformers
Safetensors
xlm-roberta
dardem commited on
Commit
71494ff
·
verified ·
1 Parent(s): 77e6890

Upload 2 files

Browse files
Files changed (2) hide show
  1. trainer_state.json +105 -0
  2. training_args.bin +3 -0
trainer_state.json ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2607206702232361,
3
+ "best_model_checkpoint": "tweet-xlmr-finetuned-toxicity-classification/checkpoint-2008",
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 4016,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24906600249066002,
13
+ "grad_norm": 22.069509506225586,
14
+ "learning_rate": 1.9003487792725463e-05,
15
+ "loss": 0.4381,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.49813200498132004,
20
+ "grad_norm": 8.925381660461426,
21
+ "learning_rate": 1.8006975585450922e-05,
22
+ "loss": 0.3312,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.7471980074719801,
27
+ "grad_norm": 17.554454803466797,
28
+ "learning_rate": 1.7010463378176384e-05,
29
+ "loss": 0.3061,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.9962640099626401,
34
+ "grad_norm": 13.063820838928223,
35
+ "learning_rate": 1.6013951170901845e-05,
36
+ "loss": 0.2846,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 1.0,
41
+ "eval_loss": 0.2607206702232361,
42
+ "eval_runtime": 142.7454,
43
+ "eval_samples_per_second": 50.012,
44
+ "eval_steps_per_second": 3.131,
45
+ "step": 2008
46
+ },
47
+ {
48
+ "epoch": 1.2450809464508095,
49
+ "grad_norm": 22.06833267211914,
50
+ "learning_rate": 1.5017438963627307e-05,
51
+ "loss": 0.1974,
52
+ "step": 2500
53
+ },
54
+ {
55
+ "epoch": 1.4941469489414696,
56
+ "grad_norm": 10.912453651428223,
57
+ "learning_rate": 1.4020926756352765e-05,
58
+ "loss": 0.2038,
59
+ "step": 3000
60
+ },
61
+ {
62
+ "epoch": 1.7432129514321295,
63
+ "grad_norm": 18.099023818969727,
64
+ "learning_rate": 1.3024414549078229e-05,
65
+ "loss": 0.1937,
66
+ "step": 3500
67
+ },
68
+ {
69
+ "epoch": 1.9922789539227894,
70
+ "grad_norm": 8.781684875488281,
71
+ "learning_rate": 1.2027902341803687e-05,
72
+ "loss": 0.1929,
73
+ "step": 4000
74
+ },
75
+ {
76
+ "epoch": 2.0,
77
+ "eval_loss": 0.2738674283027649,
78
+ "eval_runtime": 142.6259,
79
+ "eval_samples_per_second": 50.054,
80
+ "eval_steps_per_second": 3.134,
81
+ "step": 4016
82
+ }
83
+ ],
84
+ "logging_steps": 500,
85
+ "max_steps": 10035,
86
+ "num_input_tokens_seen": 0,
87
+ "num_train_epochs": 5,
88
+ "save_steps": 500,
89
+ "stateful_callbacks": {
90
+ "TrainerControl": {
91
+ "args": {
92
+ "should_epoch_stop": false,
93
+ "should_evaluate": false,
94
+ "should_log": false,
95
+ "should_save": true,
96
+ "should_training_stop": false
97
+ },
98
+ "attributes": {}
99
+ }
100
+ },
101
+ "total_flos": 1.1972522224674816e+17,
102
+ "train_batch_size": 16,
103
+ "trial_name": null,
104
+ "trial_params": null
105
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e9045e261f431f3e8a4fe87aecf5eb7bb75f2822ded05da045c9997972441de
3
+ size 5368