File size: 1,779 Bytes
f66536a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# Toaster's notes
# self-attention: 1.0 (Wizard) + 0.8 (Toppy) + 0.6 (Westlake) + 0.2 (Frostwind)
# Meant to combine the smarter models together in this part, theoretically affects model attention to prompt.
# multi-layer perceptron: 1.0 (Wizard) +  0.8 (Frostwind) + 0.6 (MythoMist) + 0.5 (Toppy) + 0.4 (Westlake) + 0.2 (Evil)
# Meant to define the 'style' of the model - emphasis on the more creative models, though the smart ones occur as well to keep things together. A dash of evil should (theoretically) be enough, in my experience Toxic models don't need high concentrations to work. 
# Remainder values: I'm mostly guessing here. Things like lm_head are affected by this; least thought put into it.

models:
  - model: D:/MLnonsense/models/fearlessdots_WizardLM-2-7B-abliterated
    parameters:
      weight: 1.0
  - model: D:/MLnonsense/models/Undi95_Toppy-M-7B
    parameters:
      weight: 
        - filter: self_attn
          value: 0.8
        - value: 0.5
  - model: D:/MLnonsense/models/senseable_Westlake-7b-v2
    parameters:
      weight:
        - filter: self_attn
          value: 0.6
        - value: 0.4
  - model: D:/MLnonsense/models/maywell_PiVoT-0.1-Evil-a
    parameters:
      weight: 
        - filter: mlp
          value: 0.2
        - value: 0.0
  - model: D:/MLnonsense/models/Sao10K_Frostwind-v2.1-m7
    parameters:
      weight: 
        - filter: self_attn
          value: 0.2
        - filter: mlp
          value: 0.8
        - value: 0.5
  - model: D:/MLnonsense/models/Gryphe_MythoMist-7b
    parameters:
      weight: 
        - filter: mlp
          value: 0.6
        - value: 0.0
base_model: D:/MLnonsense/models/TeeZee_Mistral-7B-v0.1-fp32
merge_method: task_arithmetic
dtype: float32