Khetterman commited on
Commit
7d88fb8
·
verified ·
1 Parent(s): 482d91f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +33 -58
README.md CHANGED
@@ -1,23 +1,23 @@
1
  ---
2
  base_model:
3
- - Luni/StarDust-12b-v2
4
- - rityak/MN-Maghin-12B
5
- - rityak/MN-RocinanteCelestar-12B
6
  - Bacon666/Phenom-12B-0.1
7
- - VongolaChouko/Starcannon-Unleashed-12B-v1.0
8
  - FallenMerick/MN-Chunky-Lotus-12B
9
- - Nohobby/MN-12B-Siskin-v0.2
10
- - ThijsL202/MadMix-Unleashed-12B
11
- - Nohobby/InsanityB
12
- - royallab/MN-LooseCannon-12B-v2
13
- - GalrionSoftworks/Pleiades-12B-v1
14
  - GalrionSoftworks/Canidori-12B-v1
15
- - Trappu/Abomination-merge-attempt-12B
16
- - benhaotang/nemo-math-science-philosophy-12B
 
 
17
  - ProdeusUnity/Stellar-Odyssey-12b-v0.0
18
  - Pyroserenus/Orthrus-12b-v0.8
19
- - Svak/MN-12B-Inferor-v0.0
 
 
20
  - spow12/ChatWaifu_12B_v2.0
 
 
 
 
21
  library_name: transformers
22
  tags:
23
  - mergekit
@@ -35,16 +35,13 @@ language:
35
 
36
  ---
37
  # DarkAtom-12B-v3
38
- *Something that shouldn't exist*
39
 
40
  ![DarkAtomLogo256.png](https://cdn-uploads.huggingface.co/production/uploads/673125091920e70ac26c8a2e/mPixgwI3P4oONLCKATqmd.png)
41
 
42
  This is an interesting merge of **18 cool models**, created using [mergekit](https://github.com/arcee-ai/mergekit).
43
-
44
  It took quite a bit of my time, mostly due to the limitations of my old hardware, but I think it was definitely worth it.
45
-
46
  My thanks to the authors of the original models, your work is incredible.
47
-
48
  Enjoy exploring :)
49
 
50
  ## Merge Details
@@ -55,32 +52,31 @@ This model was merged using the multistep (Slerp|ModelStock|Ties) process and re
55
  ### Models
56
 
57
  The following models were included in the merge:
58
- * [Luni/StarDust-12b-v2](https://huggingface.co/Luni/StarDust-12b-v2)
59
- * [rityak/MN-Maghin-12B](https://huggingface.co/rityak/MN-Maghin-12B)
60
- * [rityak/MN-RocinanteCelestar-12B](https://huggingface.co/rityak/MN-RocinanteCelestar-12B)
61
  * [Bacon666/Phenom-12B-0.1](https://huggingface.co/Bacon666/Phenom-12B-0.1)
62
- * [VongolaChouko/Starcannon-Unleashed-12B-v1.0](https://huggingface.co/VongolaChouko/Starcannon-Unleashed-12B-v1.0)
63
  * [FallenMerick/MN-Chunky-Lotus-12B](https://huggingface.co/FallenMerick/MN-Chunky-Lotus-12B)
64
- * [Nohobby/MN-12B-Siskin-v0.2](https://huggingface.co/Nohobby/MN-12B-Siskin-v0.2)
65
- * [ThijsL202/MadMix-Unleashed-12B](https://huggingface.co/ThijsL202/MadMix-Unleashed-12B)
66
- * [Nohobby/InsanityB](https://huggingface.co/Nohobby/InsanityB)
67
- * [royallab/MN-LooseCannon-12B-v2](https://huggingface.co/royallab/MN-LooseCannon-12B-v2)
68
- * [GalrionSoftworks/Pleiades-12B-v1](https://huggingface.co/GalrionSoftworks/Pleiades-12B-v1)
69
  * [GalrionSoftworks/Canidori-12B-v1](https://huggingface.co/GalrionSoftworks/Canidori-12B-v1)
70
- * [Trappu/Abomination-merge-attempt-12B](https://huggingface.co/Trappu/Abomination-merge-attempt-12B)
71
- * [benhaotang/nemo-math-science-philosophy-12B](https://huggingface.co/benhaotang/nemo-math-science-philosophy-12B)
 
 
72
  * [ProdeusUnity/Stellar-Odyssey-12b-v0.0](https://huggingface.co/ProdeusUnity/Stellar-Odyssey-12b-v0.0)
73
  * [Pyroserenus/Orthrus-12b-v0.8](https://huggingface.co/Pyroserenus/Orthrus-12b-v0.8)
74
- * [Svak/MN-12B-Inferor-v0.0](https://huggingface.co/Svak/MN-12B-Inferor-v0.0)
 
 
75
  * [spow12/ChatWaifu_12B_v2.0](https://huggingface.co/spow12/ChatWaifu_12B_v2.0)
 
 
 
 
76
 
77
  ### Configuration
78
 
79
  The following YAML configurations was used to produce this model. Some parameters may have diffirent pattern, but its not important to understand my workflow.
80
 
81
-
82
- Generation_1 from 18 original models:
83
  ```yaml
 
84
  models:
85
  - model: Original_Model_M
86
  - model: Original_Model_K
@@ -90,68 +86,52 @@ dtype: bfloat16
90
  parameters:
91
  t: [0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9]
92
 
93
- ```
94
-
95
-
96
- Variant_N from Generation_1 and AlphaMerge:
97
- ```yaml
98
  models:
99
-
100
  - model: SecretModel_A
101
  parameters:
102
  density: [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
103
  weight: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
104
-
105
  - model: SecretModel_B
106
  parameters:
107
  density: [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2]
108
  weight: [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8]
109
-
110
  - model: SecretModel_C
111
  parameters:
112
  density: [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3]
113
  weight: [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7]
114
-
115
  - model: SecretModel_D
116
  parameters:
117
  density: [0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4]
118
  weight: [0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6]
119
-
120
  - model: SecretModel_E
121
  parameters:
122
  density: [0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5]
123
  weight: [0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5]
124
-
125
  - model: SecretModel_F
126
  parameters:
127
  density: [0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
128
  weight: [0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4]
129
-
130
  - model: SecretModel_G
131
  parameters:
132
  density: [0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
133
  weight: [0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3]
134
-
135
  - model: SecretModel_H
136
  parameters:
137
  density: [0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
138
  weight: [0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
139
-
140
  - model: SecretModel_I
141
  parameters:
142
  density: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
143
  weight: [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
144
-
145
  merge_method: ties
146
  base_model: AlphaMerge
147
  dtype: bfloat16
148
- ```
149
 
150
- Model stock merge for create:
151
- + Generation_2 from SecretModels
152
- + Variant_M from Generation_2
153
- + AlphaMerge from intuitively selected and forgotten models
154
- ```yaml
155
  models:
156
  - model: SecretModel_A
157
  - model: SecretModel_B
@@ -159,25 +139,20 @@ models:
159
  merge_method: model_stock
160
  base_model: SecretModel_A
161
  dtype: bfloat16
162
- ```
163
 
164
- Final Variant from Variant_N, Variant_M, and one good model from Generation_1:
165
- ```yaml
166
  models:
167
-
168
  - model: Variant_N
169
  parameters:
170
  density: [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
171
  weight: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
172
-
173
  - model: Good_G1_Model
174
  parameters:
175
  density: [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2]
176
  weight: [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8]
177
-
178
  merge_method: ties
179
  base_model: Variant_M
180
  dtype: bfloat16
181
  ```
182
 
183
- Have a good time 🖤
 
1
  ---
2
  base_model:
 
 
 
3
  - Bacon666/Phenom-12B-0.1
4
+ - benhaotang/nemo-math-science-philosophy-12B
5
  - FallenMerick/MN-Chunky-Lotus-12B
 
 
 
 
 
6
  - GalrionSoftworks/Canidori-12B-v1
7
+ - GalrionSoftworks/Pleiades-12B-v1
8
+ - Luni/StarDust-12b-v2
9
+ - Nohobby/InsanityB
10
+ - Nohobby/MN-12B-Siskin-v0.2
11
  - ProdeusUnity/Stellar-Odyssey-12b-v0.0
12
  - Pyroserenus/Orthrus-12b-v0.8
13
+ - rityak/MN-Maghin-12B
14
+ - rityak/MN-RocinanteCelestar-12B
15
+ - royallab/MN-LooseCannon-12B-v2
16
  - spow12/ChatWaifu_12B_v2.0
17
+ - Svak/MN-12B-Inferor-v0.0
18
+ - ThijsL202/MadMix-Unleashed-12B
19
+ - Trappu/Abomination-merge-attempt-12B
20
+ - VongolaChouko/Starcannon-Unleashed-12B-v1.0
21
  library_name: transformers
22
  tags:
23
  - mergekit
 
35
 
36
  ---
37
  # DarkAtom-12B-v3
38
+ >*Something that shouldn't exist*
39
 
40
  ![DarkAtomLogo256.png](https://cdn-uploads.huggingface.co/production/uploads/673125091920e70ac26c8a2e/mPixgwI3P4oONLCKATqmd.png)
41
 
42
  This is an interesting merge of **18 cool models**, created using [mergekit](https://github.com/arcee-ai/mergekit).
 
43
  It took quite a bit of my time, mostly due to the limitations of my old hardware, but I think it was definitely worth it.
 
44
  My thanks to the authors of the original models, your work is incredible.
 
45
  Enjoy exploring :)
46
 
47
  ## Merge Details
 
52
  ### Models
53
 
54
  The following models were included in the merge:
 
 
 
55
  * [Bacon666/Phenom-12B-0.1](https://huggingface.co/Bacon666/Phenom-12B-0.1)
56
+ * [benhaotang/nemo-math-science-philosophy-12B](https://huggingface.co/benhaotang/nemo-math-science-philosophy-12B)
57
  * [FallenMerick/MN-Chunky-Lotus-12B](https://huggingface.co/FallenMerick/MN-Chunky-Lotus-12B)
 
 
 
 
 
58
  * [GalrionSoftworks/Canidori-12B-v1](https://huggingface.co/GalrionSoftworks/Canidori-12B-v1)
59
+ * [GalrionSoftworks/Pleiades-12B-v1](https://huggingface.co/GalrionSoftworks/Pleiades-12B-v1)
60
+ * [Luni/StarDust-12b-v2](https://huggingface.co/Luni/StarDust-12b-v2)
61
+ * [Nohobby/InsanityB](https://huggingface.co/Nohobby/InsanityB)
62
+ * [Nohobby/MN-12B-Siskin-v0.2](https://huggingface.co/Nohobby/MN-12B-Siskin-v0.2)
63
  * [ProdeusUnity/Stellar-Odyssey-12b-v0.0](https://huggingface.co/ProdeusUnity/Stellar-Odyssey-12b-v0.0)
64
  * [Pyroserenus/Orthrus-12b-v0.8](https://huggingface.co/Pyroserenus/Orthrus-12b-v0.8)
65
+ * [rityak/MN-Maghin-12B](https://huggingface.co/rityak/MN-Maghin-12B)
66
+ * [rityak/MN-RocinanteCelestar-12B](https://huggingface.co/rityak/MN-RocinanteCelestar-12B)
67
+ * [royallab/MN-LooseCannon-12B-v2](https://huggingface.co/royallab/MN-LooseCannon-12B-v2)
68
  * [spow12/ChatWaifu_12B_v2.0](https://huggingface.co/spow12/ChatWaifu_12B_v2.0)
69
+ * [Svak/MN-12B-Inferor-v0.0](https://huggingface.co/Svak/MN-12B-Inferor-v0.0)
70
+ * [ThijsL202/MadMix-Unleashed-12B](https://huggingface.co/ThijsL202/MadMix-Unleashed-12B)
71
+ * [Trappu/Abomination-merge-attempt-12B](https://huggingface.co/Trappu/Abomination-merge-attempt-12B)
72
+ * [VongolaChouko/Starcannon-Unleashed-12B-v1.0](https://huggingface.co/VongolaChouko/Starcannon-Unleashed-12B-v1.0)
73
 
74
  ### Configuration
75
 
76
  The following YAML configurations was used to produce this model. Some parameters may have diffirent pattern, but its not important to understand my workflow.
77
 
 
 
78
  ```yaml
79
+ # Generation_1 from 18 original models:
80
  models:
81
  - model: Original_Model_M
82
  - model: Original_Model_K
 
86
  parameters:
87
  t: [0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9]
88
 
89
+ # Variant_N from Generation_1 and AlphaMerge:
 
 
 
 
90
  models:
 
91
  - model: SecretModel_A
92
  parameters:
93
  density: [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
94
  weight: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
 
95
  - model: SecretModel_B
96
  parameters:
97
  density: [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2]
98
  weight: [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8]
 
99
  - model: SecretModel_C
100
  parameters:
101
  density: [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3]
102
  weight: [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7]
 
103
  - model: SecretModel_D
104
  parameters:
105
  density: [0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4]
106
  weight: [0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6]
 
107
  - model: SecretModel_E
108
  parameters:
109
  density: [0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5]
110
  weight: [0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5]
 
111
  - model: SecretModel_F
112
  parameters:
113
  density: [0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
114
  weight: [0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4]
 
115
  - model: SecretModel_G
116
  parameters:
117
  density: [0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
118
  weight: [0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3]
 
119
  - model: SecretModel_H
120
  parameters:
121
  density: [0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
122
  weight: [0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
 
123
  - model: SecretModel_I
124
  parameters:
125
  density: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
126
  weight: [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
 
127
  merge_method: ties
128
  base_model: AlphaMerge
129
  dtype: bfloat16
 
130
 
131
+ # Model stock merge for create:
132
+ # + Generation_2 from SecretModels
133
+ # + Variant_M from Generation_2
134
+ # + AlphaMerge from intuitively selected and forgotten models
 
135
  models:
136
  - model: SecretModel_A
137
  - model: SecretModel_B
 
139
  merge_method: model_stock
140
  base_model: SecretModel_A
141
  dtype: bfloat16
 
142
 
143
+ # Final Variant from Variant_N, Variant_M, and one good model from Generation_1:
 
144
  models:
 
145
  - model: Variant_N
146
  parameters:
147
  density: [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
148
  weight: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
 
149
  - model: Good_G1_Model
150
  parameters:
151
  density: [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2]
152
  weight: [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8]
 
153
  merge_method: ties
154
  base_model: Variant_M
155
  dtype: bfloat16
156
  ```
157
 
158
+ >Have a good time 🖤