great0001 commited on
Commit
7b0fb63
·
verified ·
1 Parent(s): ab12263

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c95cd537b014738234d9c92874ac9ba1dd950b6079118241be47b0f10d27ce84
3
  size 156926880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81ca70464c6891920e1f1c6fe83db22bc0a91f7c7af8769d6b2121524c5f10b7
3
  size 156926880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1450084527cc85189315e4b94acaff8b5eb1b93aa0a96f5c2a280cc26886b99
3
  size 79968772
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:051d2b675f138dbfb7f1bacb7a389b85aedf2fb7a36e0e277db4b2e4320710ee
3
  size 79968772
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0327c646fbcebdbfe58bb39ffa0aa64a521ffb921c8df37b9e57436d246f1866
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caa173be0daf3b70291aa54f3fc172b70d25960de7e9a07e7f57c5f27ae513c9
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8148b27980adb9b0bdd5c04049e531bfefcc05c06612ee72169cfb9b9fd7ee7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fd8212ec5ec3406d74a7f927b717dd30ea8a06115ee6582e14976f7b84b4b58
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.06591957811470006,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 36.684,
59
  "eval_steps_per_second": 18.342,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -77,7 +120,7 @@
77
  "attributes": {}
78
  }
79
  },
80
- "total_flos": 2483537156505600.0,
81
  "train_batch_size": 2,
82
  "trial_name": null,
83
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.13183915622940012,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 36.684,
59
  "eval_steps_per_second": 18.342,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.07910349373764008,
64
+ "grad_norm": 2.1022636890411377,
65
+ "learning_rate": 0.00016324453755953773,
66
+ "loss": 1.2797,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.0922874093605801,
71
+ "grad_norm": 2.0848679542541504,
72
+ "learning_rate": 0.00015000000000000001,
73
+ "loss": 1.3117,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.1054713249835201,
78
+ "grad_norm": 2.1038565635681152,
79
+ "learning_rate": 0.00013546048870425356,
80
+ "loss": 1.2998,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.11865524060646011,
85
+ "grad_norm": 2.0592269897460938,
86
+ "learning_rate": 0.00012000256937760445,
87
+ "loss": 1.3859,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.13183915622940012,
92
+ "grad_norm": 1.9211629629135132,
93
+ "learning_rate": 0.00010402659401094152,
94
+ "loss": 1.2282,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.13183915622940012,
99
+ "eval_loss": 1.1935606002807617,
100
+ "eval_runtime": 4.3651,
101
+ "eval_samples_per_second": 36.654,
102
+ "eval_steps_per_second": 18.327,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
120
  "attributes": {}
121
  }
122
  },
123
+ "total_flos": 4967074313011200.0,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null