AlyGreo commited on
Commit
ccba40c
·
verified ·
1 Parent(s): 91c23f7

End of training

Browse files
README.md CHANGED
@@ -27,15 +27,15 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/agreu77/huggingface/runs/64jcqo2e)
31
 
32
  This model was trained with SFT.
33
 
34
  ### Framework versions
35
 
36
  - TRL: 0.12.1
37
- - Transformers: 4.46.2
38
- - Pytorch: 2.5.1+cu121
39
  - Datasets: 3.1.0
40
  - Tokenizers: 0.20.3
41
 
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/agreu77/huggingface/runs/t5row45h)
31
 
32
  This model was trained with SFT.
33
 
34
  ### Framework versions
35
 
36
  - TRL: 0.12.1
37
+ - Transformers: 4.46.3
38
+ - Pytorch: 2.4.0
39
  - Datasets: 3.1.0
40
  - Tokenizers: 0.20.3
41
 
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-2-2b",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 8,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "v_proj",
26
+ "gate_proj",
27
+ "o_proj",
28
+ "down_proj",
29
+ "q_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dde6aa0684cf6e909cf723211010ee20106a9fa669ef078f3e3a37dd73207e69
3
+ size 41581360
runs/Dec06_11-14-13_b78e070a1243/events.out.tfevents.1733483662.b78e070a1243.506.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcda08bf4f1464fe767262059f7309077fd08f8815ae1aea058da1b93a18edb7
3
+ size 7720
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0abe8d78075218fb2f0e35fe2ca31576d3241bac60637fd1c3274c4ee9dd486
3
  size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6517df23e72e8234c910f907d37c8ac95e3ffd452fce6d20d4a1d55c93bcc74
3
  size 5560