kostiantynk1205 commited on
Commit
8eb2abb
·
verified ·
1 Parent(s): 3dfbc2c

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c99a17e61167bdd22027d5e74d6950bc7fccc49a5763828724aea074a8d7b1d
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:188eeb86ef22fe0a859a7a70627a7f2148904fbb59b0e35a1ecb549e6efe65ee
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:235d71225018e2bbba6d0edf1a30d2d5fbbf49bed103f705d28c8cf7fc0e6120
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c980c4ec7c96f1817eb0e5876cdb98423414ae5a1f74709f44120e3ff20823fb
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c143614aa475513a4db1c2c5778c359a2fbe173dda8313ed145dc946741a2fbe
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6782a17cd94b88584cdd39e48a03aa9ee53aa99ebb01a7ca50bd04caffebba8e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8148b27980adb9b0bdd5c04049e531bfefcc05c06612ee72169cfb9b9fd7ee7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fd8212ec5ec3406d74a7f927b717dd30ea8a06115ee6582e14976f7b84b4b58
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.00046687629412272777,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 32.104,
59
  "eval_steps_per_second": 16.052,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -77,7 +120,7 @@
77
  "attributes": {}
78
  }
79
  },
80
- "total_flos": 8763353962905600.0,
81
  "train_batch_size": 2,
82
  "trial_name": null,
83
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0009337525882454555,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 32.104,
59
  "eval_steps_per_second": 16.052,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.0005602515529472734,
64
+ "grad_norm": 6.204837799072266,
65
+ "learning_rate": 0.00016324453755953773,
66
+ "loss": 0.8176,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.0006536268117718189,
71
+ "grad_norm": 4.421334743499756,
72
+ "learning_rate": 0.00015000000000000001,
73
+ "loss": 0.6348,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.0007470020705963644,
78
+ "grad_norm": 5.26912260055542,
79
+ "learning_rate": 0.00013546048870425356,
80
+ "loss": 0.5701,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.0008403773294209099,
85
+ "grad_norm": 12.316139221191406,
86
+ "learning_rate": 0.00012000256937760445,
87
+ "loss": 0.4992,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.0009337525882454555,
92
+ "grad_norm": 7.5016350746154785,
93
+ "learning_rate": 0.00010402659401094152,
94
+ "loss": 0.4787,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.0009337525882454555,
99
+ "eval_loss": 0.13443197309970856,
100
+ "eval_runtime": 1402.4134,
101
+ "eval_samples_per_second": 32.154,
102
+ "eval_steps_per_second": 16.077,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
120
  "attributes": {}
121
  }
122
  },
123
+ "total_flos": 1.75267079258112e+16,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null