Mel-Iza0 commited on
Commit
7687a53
·
verified ·
1 Parent(s): 73dba7b

Training in progress, step 10, checkpoint

Browse files
checkpoint-10/adapter_config.json CHANGED
@@ -19,9 +19,9 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "o_proj",
23
  "k_proj",
24
- "q_proj",
25
  "v_proj"
26
  ],
27
  "task_type": "CAUSAL_LM",
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "q_proj",
23
  "o_proj",
24
  "k_proj",
 
25
  "v_proj"
26
  ],
27
  "task_type": "CAUSAL_LM",
checkpoint-10/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec2ae84c83281c2be94195f6460ed91b8dc598ab2bc5a97a9d7a410b064ede57
3
  size 27297032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d46729a49bc65312125c027d4dfd17938a0e34784daa55c4274e81b99761d8eb
3
  size 27297032
checkpoint-10/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4c625e2f8ffef89aab694b1276bbf6dd0892425f211ad0a759c9926c15065fb
3
  size 54678010
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e407504c0274504ea8ae1da85d4ff8aa72e90e9e5f40bbcacd01385ae49f44fc
3
  size 54678010
checkpoint-10/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43ca330165ba8d7437a8214e8d593473913e426042eb1309ff957f321c801eaf
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f446d1d665fe499a7e8afff583956eaaf2eb2926c47d7d56c7673b47ebeae0f
3
  size 14512
checkpoint-10/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8349807745c80d91093c27c78b812eff8d79a14aaa3ebc250fab32e1eba091f2
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23aa51841e50368d5625a032a866966dc4cb2985a862cc82a8208ccc23da1821
3
  size 14512
checkpoint-10/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 1.2503728866577148,
3
  "best_model_checkpoint": "./mistral/29-02-24-Weni-test-folder-upload_Zeroshot-2_max_steps-30_batch_8_2024-02-29_ppid_7/checkpoint-10",
4
  "epoch": 0.006199628022318661,
5
  "eval_steps": 10,
@@ -10,10 +10,10 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.01,
13
- "eval_loss": 1.2503728866577148,
14
- "eval_runtime": 207.1384,
15
- "eval_samples_per_second": 13.841,
16
- "eval_steps_per_second": 3.461,
17
  "step": 10
18
  }
19
  ],
@@ -22,7 +22,7 @@
22
  "num_input_tokens_seen": 0,
23
  "num_train_epochs": 1,
24
  "save_steps": 10,
25
- "total_flos": 5259188527693824.0,
26
  "train_batch_size": 8,
27
  "trial_name": null,
28
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.2690961360931396,
3
  "best_model_checkpoint": "./mistral/29-02-24-Weni-test-folder-upload_Zeroshot-2_max_steps-30_batch_8_2024-02-29_ppid_7/checkpoint-10",
4
  "epoch": 0.006199628022318661,
5
  "eval_steps": 10,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.01,
13
+ "eval_loss": 1.2690961360931396,
14
+ "eval_runtime": 206.4706,
15
+ "eval_samples_per_second": 13.886,
16
+ "eval_steps_per_second": 3.473,
17
  "step": 10
18
  }
19
  ],
 
22
  "num_input_tokens_seen": 0,
23
  "num_train_epochs": 1,
24
  "save_steps": 10,
25
+ "total_flos": 5359972082778112.0,
26
  "train_batch_size": 8,
27
  "trial_name": null,
28
  "trial_params": null
checkpoint-10/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00a49a41db2912f3869dfbf7750585d354fbdd598aed12c4aa8d7c648e1e617b
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8fa4a6d3049983810523685fa90b9ab4fc3fd64b76b897e665e803a3e9ada3b
3
  size 5112