rakhman-llm commited on
Commit
0591982
·
verified ·
1 Parent(s): 902c94d

Training in progress, step 43000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a9ad8913737016dba8650f3d7e7a145a4ce8913d458d7301634297efd00278c
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0ecd85397c9873033c8fc72b8b958ac68dabe4479b23d12cd143f2bdaec2e49
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:674d8858e4474544083140dd69e34c045fc6b88c296cb2ac0d991075b6a9ebd1
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba0de08f17477673f2db27ad78dc1cb0fb353da11bb179b4a06c1a9b0f4eb722
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07401c37c6a1bb4042ae60703b7c654f256d7be062ccd8d53ae2bf95d64aab34
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9125611692f04b4e4f7334517ac150a7a29269d10071636ff9110a9581f5f331
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:03af6009394f4a2fb32c0cb40ddd5c5c4f50ce08dbb2d5a4f3d15eba8d0516e9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0ef3fe9ebcf9f8f4fee3c065efe25035b81435ae8780b056e116dcb1a8a428f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.8333333333333335,
5
  "eval_steps": 500,
6
- "global_step": 42500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2998,6 +2998,41 @@
2998
  "learning_rate": 1.1182222222222224e-06,
2999
  "loss": 0.0549,
3000
  "step": 42500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3001
  }
3002
  ],
3003
  "logging_steps": 100,
@@ -3017,7 +3052,7 @@
3017
  "attributes": {}
3018
  }
3019
  },
3020
- "total_flos": 1.035228413952e+17,
3021
  "train_batch_size": 4,
3022
  "trial_name": null,
3023
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.8666666666666667,
5
  "eval_steps": 500,
6
+ "global_step": 43000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2998
  "learning_rate": 1.1182222222222224e-06,
2999
  "loss": 0.0549,
3000
  "step": 42500
3001
+ },
3002
+ {
3003
+ "epoch": 2.84,
3004
+ "grad_norm": 0.13869111239910126,
3005
+ "learning_rate": 1.0737777777777778e-06,
3006
+ "loss": 0.0543,
3007
+ "step": 42600
3008
+ },
3009
+ {
3010
+ "epoch": 2.8466666666666667,
3011
+ "grad_norm": 0.12573951482772827,
3012
+ "learning_rate": 1.0293333333333334e-06,
3013
+ "loss": 0.0499,
3014
+ "step": 42700
3015
+ },
3016
+ {
3017
+ "epoch": 2.8533333333333335,
3018
+ "grad_norm": 0.23486433923244476,
3019
+ "learning_rate": 9.848888888888889e-07,
3020
+ "loss": 0.0533,
3021
+ "step": 42800
3022
+ },
3023
+ {
3024
+ "epoch": 2.86,
3025
+ "grad_norm": 0.20186831057071686,
3026
+ "learning_rate": 9.404444444444445e-07,
3027
+ "loss": 0.0452,
3028
+ "step": 42900
3029
+ },
3030
+ {
3031
+ "epoch": 2.8666666666666667,
3032
+ "grad_norm": 0.047486595809459686,
3033
+ "learning_rate": 8.960000000000001e-07,
3034
+ "loss": 0.0534,
3035
+ "step": 43000
3036
  }
3037
  ],
3038
  "logging_steps": 100,
 
3052
  "attributes": {}
3053
  }
3054
  },
3055
+ "total_flos": 1.0474075717632e+17,
3056
  "train_batch_size": 4,
3057
  "trial_name": null,
3058
  "trial_params": null