MohamedAhmedAE commited on
Commit
748f987
·
verified ·
1 Parent(s): ff696d9

Training in progress, step 85400

Browse files
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6804e9f66223b09bb658d657fc6e13aa50694672ebb634d1c800766bc2fa7b9
3
  size 2684416208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c827c6acb286eef9eb5d9fab2316b7545ab03a9b49ef673a99c9760af01f486c
3
  size 2684416208
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "down_proj",
24
- "q_proj",
25
- "up_proj",
26
  "gate_proj",
 
 
27
  "v_proj",
28
- "k_proj",
29
- "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "k_proj",
 
 
24
  "gate_proj",
25
+ "o_proj",
26
+ "down_proj",
27
  "v_proj",
28
+ "q_proj",
29
+ "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6804e9f66223b09bb658d657fc6e13aa50694672ebb634d1c800766bc2fa7b9
3
  size 2684416208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c35a93f0fbc67dcff98e79a028ed18bce041ca6c504534cfeba1ffe113541a6
3
  size 2684416208
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:356a70458aa868105f82ab5806ad493a4b7a2b1a5f4f312d5fad9f6e2d84bd5f
3
  size 1364844242
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:630b4e45f445964eef38461a3e815468bcb4d79c516277156cdd1f608dc6fd5f
3
  size 1364844242
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:146268fced5a4a83c4515dbc3b480e5b723bda9119bcafea221f6ab6c5493a08
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9729d7d0dcfc381ff20b9d9582e8dcd5a65ff6a31a9fa0a4ab53b8e8735d6817
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f11268aacf97b04105141009394a27dfe91653458e6c3f073d55a42d01c2ddb3
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:998cab517918ddd7b7621f6bc4d2103805ef631de606bc987cf4431529e437c9
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.11851011091544887,
5
  "eval_steps": 200,
6
- "global_step": 85200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2317,62 +2317,6 @@
2317
  "learning_rate": 1.9340168348018822e-05,
2318
  "loss": 1.7144,
2319
  "step": 83600
2320
- },
2321
- {
2322
- "epoch": 0.11656276167505418,
2323
- "grad_norm": 0.6080924272537231,
2324
- "learning_rate": 1.933704269142008e-05,
2325
- "loss": 1.6828,
2326
- "step": 83800
2327
- },
2328
- {
2329
- "epoch": 0.11684095442368199,
2330
- "grad_norm": 0.3429834246635437,
2331
- "learning_rate": 1.9333909902897212e-05,
2332
- "loss": 1.7374,
2333
- "step": 84000
2334
- },
2335
- {
2336
- "epoch": 0.11711914717230981,
2337
- "grad_norm": 0.34908148646354675,
2338
- "learning_rate": 1.9330769984843144e-05,
2339
- "loss": 1.7273,
2340
- "step": 84200
2341
- },
2342
- {
2343
- "epoch": 0.11739733992093762,
2344
- "grad_norm": 0.47220101952552795,
2345
- "learning_rate": 1.932762293965624e-05,
2346
- "loss": 1.6758,
2347
- "step": 84400
2348
- },
2349
- {
2350
- "epoch": 0.11767553266956543,
2351
- "grad_norm": 0.5649632215499878,
2352
- "learning_rate": 1.9324468769740307e-05,
2353
- "loss": 1.6967,
2354
- "step": 84600
2355
- },
2356
- {
2357
- "epoch": 0.11795372541819325,
2358
- "grad_norm": 0.3771503269672394,
2359
- "learning_rate": 1.932130747750461e-05,
2360
- "loss": 1.7156,
2361
- "step": 84800
2362
- },
2363
- {
2364
- "epoch": 0.11823191816682106,
2365
- "grad_norm": 0.3423559367656708,
2366
- "learning_rate": 1.9318139065363826e-05,
2367
- "loss": 1.6854,
2368
- "step": 85000
2369
- },
2370
- {
2371
- "epoch": 0.11851011091544887,
2372
- "grad_norm": 0.4594859182834625,
2373
- "learning_rate": 1.93149635357381e-05,
2374
- "loss": 1.7195,
2375
- "step": 85200
2376
  }
2377
  ],
2378
  "logging_steps": 200,
@@ -2392,7 +2336,7 @@
2392
  "attributes": {}
2393
  }
2394
  },
2395
- "total_flos": 1.6558717875730022e+18,
2396
  "train_batch_size": 2,
2397
  "trial_name": null,
2398
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.11628456892642636,
5
  "eval_steps": 200,
6
+ "global_step": 83600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2317
  "learning_rate": 1.9340168348018822e-05,
2318
  "loss": 1.7144,
2319
  "step": 83600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2320
  }
2321
  ],
2322
  "logging_steps": 200,
 
2336
  "attributes": {}
2337
  }
2338
  },
2339
+ "total_flos": 1.596842508550865e+18,
2340
  "train_batch_size": 2,
2341
  "trial_name": null,
2342
  "trial_params": null