File size: 7,715 Bytes
c54aeaa
 
 
7d88fb8
c54aeaa
 
7d88fb8
 
 
 
c54aeaa
 
7d88fb8
 
 
c54aeaa
7d88fb8
 
 
 
c54aeaa
 
 
 
 
 
1058274
 
c54aeaa
 
 
 
 
31fcec8
 
 
c54aeaa
 
482d91f
7c7dacc
 
c54aeaa
31fcec8
 
0e3177c
482d91f
31fcec8
c54aeaa
 
0e3177c
c54aeaa
 
 
0e3177c
c54aeaa
 
 
7d88fb8
c54aeaa
 
7d88fb8
 
 
 
c54aeaa
 
7d88fb8
 
 
c54aeaa
7d88fb8
 
 
 
c54aeaa
 
 
482d91f
 
c54aeaa
7d88fb8
c54aeaa
482d91f
 
 
 
 
 
 
 
7d88fb8
482d91f
 
c54aeaa
 
 
482d91f
c54aeaa
 
 
482d91f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c54aeaa
482d91f
c54aeaa
482d91f
7d88fb8
 
 
 
482d91f
 
 
 
 
 
 
 
7d88fb8
482d91f
 
 
 
 
 
 
 
 
 
 
 
 
 
e3fbc82
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
---
base_model:
- Bacon666/Phenom-12B-0.1
- benhaotang/nemo-math-science-philosophy-12B
- FallenMerick/MN-Chunky-Lotus-12B
- GalrionSoftworks/Canidori-12B-v1
- GalrionSoftworks/Pleiades-12B-v1
- Luni/StarDust-12b-v2
- Nohobby/InsanityB
- Nohobby/MN-12B-Siskin-v0.2
- ProdeusUnity/Stellar-Odyssey-12b-v0.0
- Pyroserenus/Orthrus-12b-v0.8
- rityak/MN-Maghin-12B
- rityak/MN-RocinanteCelestar-12B
- royallab/MN-LooseCannon-12B-v2
- spow12/ChatWaifu_12B_v2.0
- Svak/MN-12B-Inferor-v0.0
- ThijsL202/MadMix-Unleashed-12B
- Trappu/Abomination-merge-attempt-12B
- VongolaChouko/Starcannon-Unleashed-12B-v1.0
library_name: transformers
tags:
- mergekit
- merge
- bfloat16
- safetensors
- 12b
- chat
- creative
- roleplay
- conversational
- creative-writing
- not-for-all-audiences
language:
- en
- ru

---
# DarkAtom-12B-v3

>Something that shouldn't exist.

![DarkAtomLogo256.png](https://cdn-uploads.huggingface.co/production/uploads/673125091920e70ac26c8a2e/mPixgwI3P4oONLCKATqmd.png)

This is an interesting merge of **18 cool models**, created using [mergekit](https://github.com/arcee-ai/mergekit).
It took quite a bit of my time, mostly due to the limitations of my old hardware, but I think it was definitely worth it.
Enjoy exploring :)

## Merge Details
### Method

This model was merged using the multistep (Slerp|ModelStock|Ties) process and remerge with some model variations for best result.

### Models

The following models were included in the merge:
* [Bacon666/Phenom-12B-0.1](https://huggingface.co/Bacon666/Phenom-12B-0.1)
* [benhaotang/nemo-math-science-philosophy-12B](https://huggingface.co/benhaotang/nemo-math-science-philosophy-12B)
* [FallenMerick/MN-Chunky-Lotus-12B](https://huggingface.co/FallenMerick/MN-Chunky-Lotus-12B)
* [GalrionSoftworks/Canidori-12B-v1](https://huggingface.co/GalrionSoftworks/Canidori-12B-v1)
* [GalrionSoftworks/Pleiades-12B-v1](https://huggingface.co/GalrionSoftworks/Pleiades-12B-v1)
* [Luni/StarDust-12b-v2](https://huggingface.co/Luni/StarDust-12b-v2)
* [Nohobby/InsanityB](https://huggingface.co/Nohobby/InsanityB)
* [Nohobby/MN-12B-Siskin-v0.2](https://huggingface.co/Nohobby/MN-12B-Siskin-v0.2)
* [ProdeusUnity/Stellar-Odyssey-12b-v0.0](https://huggingface.co/ProdeusUnity/Stellar-Odyssey-12b-v0.0)
* [Pyroserenus/Orthrus-12b-v0.8](https://huggingface.co/Pyroserenus/Orthrus-12b-v0.8)
* [rityak/MN-Maghin-12B](https://huggingface.co/rityak/MN-Maghin-12B)
* [rityak/MN-RocinanteCelestar-12B](https://huggingface.co/rityak/MN-RocinanteCelestar-12B)
* [royallab/MN-LooseCannon-12B-v2](https://huggingface.co/royallab/MN-LooseCannon-12B-v2)
* [spow12/ChatWaifu_12B_v2.0](https://huggingface.co/spow12/ChatWaifu_12B_v2.0)
* [Svak/MN-12B-Inferor-v0.0](https://huggingface.co/Svak/MN-12B-Inferor-v0.0)
* [ThijsL202/MadMix-Unleashed-12B](https://huggingface.co/ThijsL202/MadMix-Unleashed-12B)
* [Trappu/Abomination-merge-attempt-12B](https://huggingface.co/Trappu/Abomination-merge-attempt-12B)
* [VongolaChouko/Starcannon-Unleashed-12B-v1.0](https://huggingface.co/VongolaChouko/Starcannon-Unleashed-12B-v1.0)

### Configuration

The following YAML configurations was used to produce this model. Some parameters may have diffirent pattern, but its not important to understand my workflow.

```yaml
# Generation_1 from 18 original models:
models:
  - model: Original_Model_M
  - model: Original_Model_K
merge_method: slerp
base_model: Original_Model_M
dtype: bfloat16
parameters:
  t: [0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9]

# Variant_N from Generation_1 and AlphaMerge:
models:
  - model: SecretModel_A
    parameters:
      density: [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
      weight:  [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
  - model: SecretModel_B
    parameters:
      density: [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2]
      weight:  [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8]
  - model: SecretModel_C
    parameters:
      density: [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3]
      weight:  [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7]
  - model: SecretModel_D
    parameters:
      density: [0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4]
      weight:  [0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6]
  - model: SecretModel_E
    parameters:
      density: [0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5]
      weight:  [0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5]
  - model: SecretModel_F
    parameters:
      density: [0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
      weight:  [0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4]
  - model: SecretModel_G
    parameters:
      density: [0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
      weight:  [0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3]
  - model: SecretModel_H
    parameters:
      density: [0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
      weight:  [0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
  - model: SecretModel_I
    parameters:
      density: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
      weight:  [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
merge_method: ties
base_model: AlphaMerge
dtype: bfloat16

# Model stock merge for create:
# + Generation_2 from SecretModels
# + Variant_M from Generation_2
# + AlphaMerge from intuitively selected and forgotten models
models:
  - model: SecretModel_A
  - model: SecretModel_B
  - model: SecretModel_C
merge_method: model_stock
base_model: SecretModel_A
dtype: bfloat16

# Final Variant from Variant_N, Variant_M, and one good model from Generation_1:
models:
  - model: Variant_N
    parameters:
      density: [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
      weight:  [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
  - model: Good_G1_Model
    parameters:
      density: [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2]
      weight:  [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.8]
merge_method: ties
base_model: Variant_M
dtype: bfloat16
```

>My thanks to the authors of the original models, your work is incredible. Have a good time 🖤