felipeoes commited on
Commit
b05e6b8
·
verified ·
1 Parent(s): c6d94cb

Model save

Browse files
README.md CHANGED
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/felipealumni-usp/huggingface/runs/jjfyallp)
31
 
32
  This model was trained with SFT.
33
 
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/felipealumni-usp/huggingface/runs/14hhk8v2)
31
 
32
  This model was trained with SFT.
33
 
adapter_config.json CHANGED
@@ -23,13 +23,13 @@
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
- "gate_proj",
27
- "k_proj",
28
  "v_proj",
29
- "down_proj",
30
  "q_proj",
 
31
  "o_proj",
32
- "up_proj"
33
  ],
34
  "task_type": "CAUSAL_LM",
35
  "use_dora": false,
 
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
+ "up_proj",
 
27
  "v_proj",
28
+ "gate_proj",
29
  "q_proj",
30
+ "down_proj",
31
  "o_proj",
32
+ "k_proj"
33
  ],
34
  "task_type": "CAUSAL_LM",
35
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc77982bfe49132e805495731a85eaf071bf4c83503fed5ff6644bed6b449105
3
  size 2185294624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee6f852710431925d2a1f6c53b6eee2ab41a88503b8e68e942b7141261fe3378
3
  size 2185294624
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34a6ae75d47d454f0101c9ca7b608e588594fc7b30eb17f2d31d88aa58e3bdf8
3
  size 6904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64a1648b309c8fd7212c79a531682373982e420f4d0a03ccde70dcf3209f0fd4
3
  size 6904