mimi_name = "mimi-dbaa9758@125.safetensors" moshi_name = "hibiki-rs-220b12c0@200.safetensors" tokenizer_name = "tokenizer_spm_48k_multi6_2.model" [model] text_in_vocab_size = 48001 text_out_vocab_size = 48000 audio_vocab_size = 2049 audio_codebooks = 32 [model.transformer] d_model = 2560 num_heads = 20 num_layers = 24 dim_feedforward = 10240 causal = true norm_first = true bias_ff = false bias_attn = false context = 500 max_period = 100000 use_conv_block = false use_conv_bias = true gating = "silu" norm = "RmsNorm" positional_embedding = "Rope" conv_layout = false conv_kernel_size = 3 kv_repeat = 1 max_seq_len = 4096 [model.depformer] num_slices = 16 low_rank_embeddings = 128 [model.depformer.transformer] d_model = 1024 num_heads = 16 num_layers = 4 dim_feedforward = 3072 causal = true norm_first = true bias_ff = false bias_attn = false context = 16 max_period = 10000 use_conv_block = false use_conv_bias = true gating = "silu" norm = "RmsNorm" positional_embedding = "None" conv_layout = false conv_kernel_size = 3 kv_repeat = 1 max_seq_len = 4096 [model.conditioners.description] type = "Lut" n_bins = 31 dim = 16 possible_values = ["very_bad", "bad", "neutral", "good", "very_good"]