masatochi commited on
Commit
3a93ec6
·
verified ·
1 Parent(s): 6025050

Training in progress, step 115, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:797691a5c1bede0a3013242e12ad082cf5b59954a8a0fc0dd7a75224b4282d66
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c302e89b218f98577e052fd6f9ae6583d00c39f84fff987a701313ab02e477ae
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c6da235d2be338d88881dc48554c3fe142c62bdbac18c080cd1bbe05a538c5e
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fef6524a33a4c5d88382f225ea87d038cc2a45b3239e2c50653ef19cfd0fcfeb
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a4992a0a541377e74f550ef72fa9af6f8fee0ce175cbdc61ec06b94024e86a4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4e6470455f66907a19173307202d571c6c3f1f8cd4ddcc5b663dc15013ec0cd
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:802e09b6cc63e64e726d0b68ba37b81d6a6fcf54cdf00e4821b3e38426a8a5c4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d445cf5235925cca1a6d5e57200162b082b481744bf739a511d532ac296ab841
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.05379301913319885,
5
  "eval_steps": 34,
6
- "global_step": 110,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -809,6 +809,41 @@
809
  "learning_rate": 0.00010922683594633021,
810
  "loss": 1.11,
811
  "step": 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
812
  }
813
  ],
814
  "logging_steps": 1,
@@ -828,7 +863,7 @@
828
  "attributes": {}
829
  }
830
  },
831
- "total_flos": 4.882849615105229e+17,
832
  "train_batch_size": 3,
833
  "trial_name": null,
834
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.05623815636652607,
5
  "eval_steps": 34,
6
+ "global_step": 115,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
809
  "learning_rate": 0.00010922683594633021,
810
  "loss": 1.11,
811
  "step": 110
812
+ },
813
+ {
814
+ "epoch": 0.054282046579864296,
815
+ "grad_norm": 1.154991865158081,
816
+ "learning_rate": 0.00010738525274748741,
817
+ "loss": 0.9469,
818
+ "step": 111
819
+ },
820
+ {
821
+ "epoch": 0.05477107402652974,
822
+ "grad_norm": 1.1687790155410767,
823
+ "learning_rate": 0.000105541147491597,
824
+ "loss": 1.1575,
825
+ "step": 112
826
+ },
827
+ {
828
+ "epoch": 0.05526010147319518,
829
+ "grad_norm": 1.1781808137893677,
830
+ "learning_rate": 0.00010369514993891452,
831
+ "loss": 1.0453,
832
+ "step": 113
833
+ },
834
+ {
835
+ "epoch": 0.05574912891986063,
836
+ "grad_norm": 1.1359920501708984,
837
+ "learning_rate": 0.00010184789049591299,
838
+ "loss": 1.072,
839
+ "step": 114
840
+ },
841
+ {
842
+ "epoch": 0.05623815636652607,
843
+ "grad_norm": 1.2046093940734863,
844
+ "learning_rate": 0.0001,
845
+ "loss": 0.921,
846
+ "step": 115
847
  }
848
  ],
849
  "logging_steps": 1,
 
863
  "attributes": {}
864
  }
865
  },
866
+ "total_flos": 5.104797324882739e+17,
867
  "train_batch_size": 3,
868
  "trial_name": null,
869
  "trial_params": null