infogep commited on
Commit
ccab7b4
·
verified ·
1 Parent(s): fc823ff

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2a0e9101060b204be6a16b690e019d834fa0766aaf2299391407aa97665276a
3
  size 800116456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b62b8b91228dc9599bcfac12a5216824474c0d9793c439f02ec32148339bd4c
3
  size 800116456
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1aa1a207edd26c3614c57078c6097a507caa520dacb367243d9f7e96c7767541
3
  size 406743412
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e313f2d8aa14524c2687300de8ec529b28ece9360c7093726a63f2b8eb7bd43
3
  size 406743412
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a1afc67d8443c7d735bd225b28b031da4967c4f9edd23293f4559934baf5bea
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1851d1505f0cd3a6fd1c87c8576b78d87bc7cae7f240dc9cd241fbfe1d155fe
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:152cf5e1f9adf3dc2c608dbb3e394e09940ab42688cdb5a07d89f1cccb7f89a5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d0f57336dd58c2282a758b1873df2644647c71e8296b0dab58cb3a9f5f7c78
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.4909262657165527,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.028409090909090908,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 15.588,
59
  "eval_steps_per_second": 3.913,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -86,7 +129,7 @@
86
  "attributes": {}
87
  }
88
  },
89
- "total_flos": 1.9931848859713536e+16,
90
  "train_batch_size": 8,
91
  "trial_name": null,
92
  "trial_params": null
 
1
  {
2
+ "best_metric": 2.180443048477173,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.056818181818181816,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 15.588,
59
  "eval_steps_per_second": 3.913,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.03409090909090909,
64
+ "grad_norm": 6.00586462020874,
65
+ "learning_rate": 6.992307692307692e-06,
66
+ "loss": 2.9064,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.03977272727272727,
71
+ "grad_norm": 6.616552352905273,
72
+ "learning_rate": 6.215384615384615e-06,
73
+ "loss": 2.5966,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.045454545454545456,
78
+ "grad_norm": 7.654419898986816,
79
+ "learning_rate": 5.438461538461538e-06,
80
+ "loss": 2.2871,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.05113636363636364,
85
+ "grad_norm": 6.925200939178467,
86
+ "learning_rate": 4.661538461538462e-06,
87
+ "loss": 1.8554,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.056818181818181816,
92
+ "grad_norm": 13.769781112670898,
93
+ "learning_rate": 3.884615384615385e-06,
94
+ "loss": 1.2905,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.056818181818181816,
99
+ "eval_loss": 2.180443048477173,
100
+ "eval_runtime": 47.2352,
101
+ "eval_samples_per_second": 15.687,
102
+ "eval_steps_per_second": 3.938,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
129
  "attributes": {}
130
  }
131
  },
132
+ "total_flos": 3.986369771942707e+16,
133
  "train_batch_size": 8,
134
  "trial_name": null,
135
  "trial_params": null