File size: 493 Bytes
bbb6b09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
name:                Qwenvergence-14B-v6-Prose
merge_method:        ties
base_model:          Qwen/Qwen2.5-14B
tokenizer_source:    base
parameters:         
  density:           1.00
  weight:            1.00
  int8_mask:         true
  normalize:         true
  rescale:           false
dtype:               float32
out_dtype:           bfloat16
models:
  - model:           sometimesanotion/Qwenvergence-14B-v6-Prose-slerp
    parameters:
      density:       1.00
      weight:        1.00