Tohrumi commited on
Commit
96b31b4
·
verified ·
1 Parent(s): 92bba5e

Model save

Browse files
README.md CHANGED
@@ -2,11 +2,11 @@
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
 
5
  - trl
6
  - sft
7
- - translation
8
  - generated_from_trainer
9
- base_model: mistralai/Mistral-7B-v0.1
10
  model-index:
11
  - name: MistralAI_iwslt15_en_vi_manual
12
  results: []
@@ -17,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # MistralAI_iwslt15_en_vi_manual
19
 
20
- This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on an unknown dataset.
21
 
22
  ## Model description
23
 
@@ -40,20 +40,18 @@ The following hyperparameters were used during training:
40
  - train_batch_size: 8
41
  - eval_batch_size: 8
42
  - seed: 4269
 
 
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: linear
45
  - lr_scheduler_warmup_steps: 1
46
  - num_epochs: 1
47
  - mixed_precision_training: Native AMP
48
 
49
- ### Training results
50
-
51
-
52
-
53
  ### Framework versions
54
 
55
  - PEFT 0.10.0
56
- - Transformers 4.39.3
57
- - Pytorch 2.2.1
58
- - Datasets 2.18.0
59
  - Tokenizers 0.15.2
 
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
5
+ - unsloth
6
  - trl
7
  - sft
 
8
  - generated_from_trainer
9
+ base_model: unsloth/mistral-7b-bnb-4bit
10
  model-index:
11
  - name: MistralAI_iwslt15_en_vi_manual
12
  results: []
 
17
 
18
  # MistralAI_iwslt15_en_vi_manual
19
 
20
+ This model is a fine-tuned version of [unsloth/mistral-7b-bnb-4bit](https://huggingface.co/unsloth/mistral-7b-bnb-4bit) on an unknown dataset.
21
 
22
  ## Model description
23
 
 
40
  - train_batch_size: 8
41
  - eval_batch_size: 8
42
  - seed: 4269
43
+ - gradient_accumulation_steps: 4
44
+ - total_train_batch_size: 32
45
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
  - lr_scheduler_type: linear
47
  - lr_scheduler_warmup_steps: 1
48
  - num_epochs: 1
49
  - mixed_precision_training: Native AMP
50
 
 
 
 
 
51
  ### Framework versions
52
 
53
  - PEFT 0.10.0
54
+ - Transformers 4.38.2
55
+ - Pytorch 2.2.1+cu121
56
+ - Datasets 2.19.0
57
  - Tokenizers 0.15.2
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -11,17 +11,22 @@
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 16,
14
- "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 64,
20
  "rank_pattern": {},
21
- "revision": null,
22
  "target_modules": [
 
 
 
 
23
  "q_proj",
24
- "v_proj"
 
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/mistral-7b-bnb-4bit",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 16,
14
+ "lora_dropout": 0,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 16,
20
  "rank_pattern": {},
21
+ "revision": "unsloth",
22
  "target_modules": [
23
+ "k_proj",
24
+ "down_proj",
25
+ "gate_proj",
26
+ "v_proj",
27
  "q_proj",
28
+ "up_proj",
29
+ "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:79cf4849132a2098f10c59f47222c8f12ad2fdd76c6f3bd311012f57446bf5bf
3
- size 109069176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0846dd5dccaf1e9939a67a154d9c63c75124da79e549fe9a005c6fd04d028b54
3
+ size 167832240
runs/Apr22_03-08-14_b3b625229fbf/events.out.tfevents.1713755394.b3b625229fbf.943.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d81e1e6a186ded33d58914dbdaa058912ec1ab638b56e637a2abe5b1a24f1710
3
+ size 12489
special_tokens_map.json CHANGED
@@ -13,7 +13,7 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "</s>",
17
  "unk_token": {
18
  "content": "<unk>",
19
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "<s>",
17
  "unk_token": {
18
  "content": "<unk>",
19
  "lstrip": false,
tokenizer.json CHANGED
@@ -62,12 +62,6 @@
62
  "id": "A",
63
  "type_id": 0
64
  }
65
- },
66
- {
67
- "SpecialToken": {
68
- "id": "</s>",
69
- "type_id": 0
70
- }
71
  }
72
  ],
73
  "pair": [
@@ -83,12 +77,6 @@
83
  "type_id": 0
84
  }
85
  },
86
- {
87
- "SpecialToken": {
88
- "id": "</s>",
89
- "type_id": 0
90
- }
91
- },
92
  {
93
  "SpecialToken": {
94
  "id": "<s>",
@@ -100,24 +88,9 @@
100
  "id": "B",
101
  "type_id": 1
102
  }
103
- },
104
- {
105
- "SpecialToken": {
106
- "id": "</s>",
107
- "type_id": 1
108
- }
109
  }
110
  ],
111
  "special_tokens": {
112
- "</s>": {
113
- "id": "</s>",
114
- "ids": [
115
- 2
116
- ],
117
- "tokens": [
118
- "</s>"
119
- ]
120
- },
121
  "<s>": {
122
  "id": "<s>",
123
  "ids": [
 
62
  "id": "A",
63
  "type_id": 0
64
  }
 
 
 
 
 
 
65
  }
66
  ],
67
  "pair": [
 
77
  "type_id": 0
78
  }
79
  },
 
 
 
 
 
 
80
  {
81
  "SpecialToken": {
82
  "id": "<s>",
 
88
  "id": "B",
89
  "type_id": 1
90
  }
 
 
 
 
 
 
91
  }
92
  ],
93
  "special_tokens": {
 
 
 
 
 
 
 
 
 
94
  "<s>": {
95
  "id": "<s>",
96
  "ids": [
tokenizer_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "add_bos_token": true,
3
- "add_eos_token": true,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
@@ -32,9 +32,9 @@
32
  "clean_up_tokenization_spaces": false,
33
  "eos_token": "</s>",
34
  "legacy": true,
35
- "max_seq_length": 1024,
36
- "model_max_length": 1000000000000000019884624838656,
37
- "pad_token": "</s>",
38
  "sp_model_kwargs": {},
39
  "spaces_between_special_tokens": false,
40
  "tokenizer_class": "LlamaTokenizer",
 
1
  {
2
  "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
 
32
  "clean_up_tokenization_spaces": false,
33
  "eos_token": "</s>",
34
  "legacy": true,
35
+ "model_max_length": 32768,
36
+ "pad_token": "<s>",
37
+ "padding_side": "right",
38
  "sp_model_kwargs": {},
39
  "spaces_between_special_tokens": false,
40
  "tokenizer_class": "LlamaTokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ef14cbbaa82c9476964a862acb20d4f9512f4fdd2dd3044e9cf6352408a41d8
3
- size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e77d288f2f7e79efe7562cb0c176791526cf3d81156530170345f861e1f4c4d2
3
+ size 4920