secemp9 commited on
Commit
a0e37ea
·
verified ·
1 Parent(s): 6e8b76b

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: apache-2.0
4
+ base_model: unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - instruction_solution_to_thought_dataset.jsonl
9
+ model-index:
10
+ - name: outputs_solution_to_thought
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
18
+ <details><summary>See axolotl config</summary>
19
+
20
+ axolotl version: `0.7.0`
21
+ ```yaml
22
+ # Base model configuration
23
+ base_model: unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit
24
+ load_in_4bit: true
25
+
26
+ # Dataset configuration
27
+ datasets:
28
+ - path: instruction_solution_to_thought_dataset.jsonl
29
+ type: chat_template
30
+
31
+ # Chat template
32
+ chat_template: chatml
33
+
34
+ # LoRA adapter configuration
35
+ adapter: lora
36
+ lora_r: 16
37
+ lora_alpha: 16
38
+ lora_dropout: 0
39
+ lora_target_modules:
40
+ - q_proj
41
+ - k_proj
42
+ - v_proj
43
+ - o_proj
44
+ - gate_proj
45
+ - up_proj
46
+ - down_proj
47
+
48
+ # Training hyperparameters
49
+ max_seq_length: 128000
50
+ micro_batch_size: 2
51
+ gradient_accumulation_steps: 8
52
+ learning_rate: 3e-5
53
+ num_epochs: 2
54
+ warmup_steps: 100
55
+ optimizer: adamw_8bit
56
+ weight_decay: 0.01
57
+ lr_scheduler_type: cosine
58
+ max_grad_norm: 1.0
59
+ output_dir: ./outputs_solution_to_thought
60
+ seed: 3407
61
+ merge_lora: true
62
+ hf_upload: true
63
+ hf_repo: secemp9/TraceBack-12b
64
+ xformers_attention:
65
+ flash_attention: True
66
+ #lora_mlp_kernel: true
67
+ #lora_qkv_kernel: true
68
+ #lora_o_kernel: true
69
+ #fp16: true
70
+ #load_in_8bit: true # Enable 8-bit loading for LoRA finetuning
71
+ bf16: true # Enable BF16 mixed precision
72
+ # Multi-GPU training with DeepSpeed
73
+ deepspeed: deepspeed_configs/zero2.json
74
+
75
+ # Optional: Enable gradient checkpointing
76
+ gradient_checkpointing: true
77
+
78
+ ```
79
+
80
+ </details><br>
81
+
82
+ # outputs_solution_to_thought
83
+
84
+ This model is a fine-tuned version of [unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit](https://huggingface.co/unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit) on the instruction_solution_to_thought_dataset.jsonl dataset.
85
+
86
+ ## Model description
87
+
88
+ More information needed
89
+
90
+ ## Intended uses & limitations
91
+
92
+ More information needed
93
+
94
+ ## Training and evaluation data
95
+
96
+ More information needed
97
+
98
+ ## Training procedure
99
+
100
+ ### Training hyperparameters
101
+
102
+ The following hyperparameters were used during training:
103
+ - learning_rate: 3e-05
104
+ - train_batch_size: 2
105
+ - eval_batch_size: 2
106
+ - seed: 3407
107
+ - distributed_type: multi-GPU
108
+ - num_devices: 8
109
+ - gradient_accumulation_steps: 8
110
+ - total_train_batch_size: 128
111
+ - total_eval_batch_size: 16
112
+ - optimizer: Use adamw_8bit with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
113
+ - lr_scheduler_type: cosine
114
+ - lr_scheduler_warmup_steps: 100
115
+ - num_epochs: 2.0
116
+
117
+ ### Training results
118
+
119
+
120
+
121
+ ### Framework versions
122
+
123
+ - PEFT 0.14.0
124
+ - Transformers 4.48.3
125
+ - Pytorch 2.5.1+cu124
126
+ - Datasets 3.2.0
127
+ - Tokenizers 0.21.0
config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit",
4
+ "architectures": [
5
+ "MistralForCausalLM"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 5120,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 14336,
15
+ "max_position_embeddings": 131072,
16
+ "model_type": "mistral",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 40,
19
+ "num_key_value_heads": 8,
20
+ "pad_token_id": 10,
21
+ "quantization_config": {
22
+ "_load_in_4bit": true,
23
+ "_load_in_8bit": false,
24
+ "bnb_4bit_compute_dtype": "bfloat16",
25
+ "bnb_4bit_quant_storage": "uint8",
26
+ "bnb_4bit_quant_type": "nf4",
27
+ "bnb_4bit_use_double_quant": true,
28
+ "llm_int8_enable_fp32_cpu_offload": false,
29
+ "llm_int8_has_fp16_weight": false,
30
+ "llm_int8_skip_modules": [
31
+ "lm_head",
32
+ "multi_modal_projector",
33
+ "merger",
34
+ "modality_projection"
35
+ ],
36
+ "llm_int8_threshold": 6.0,
37
+ "load_in_4bit": true,
38
+ "load_in_8bit": false,
39
+ "quant_method": "bitsandbytes"
40
+ },
41
+ "rms_norm_eps": 1e-05,
42
+ "rope_theta": 1000000.0,
43
+ "sliding_window": null,
44
+ "tie_word_embeddings": false,
45
+ "torch_dtype": "bfloat16",
46
+ "transformers_version": "4.48.3",
47
+ "unsloth_fixed": true,
48
+ "use_cache": false,
49
+ "vocab_size": 131072
50
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "max_length": 131072,
6
+ "pad_token_id": 10,
7
+ "transformers_version": "4.48.3"
8
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ef793aab7f7093b35bc89285e1e118ed8caaf3d549a884b4080115609d36044
3
+ size 4999749144
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28d0a9815ee07ce4289084930adc2d22c31dfa33af59c4bc3140632625b9813a
3
+ size 3311649399
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0240ce510f08e6c2041724e9043e33be9d251d1e4a4d94eb68cd47b954b61d2
3
+ size 17078292
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff