KingNish commited on
Commit
3df0909
·
verified ·
1 Parent(s): 695fb8c

Delete infer.py

Browse files
Files changed (1) hide show
  1. infer.py +0 -456
infer.py DELETED
@@ -1,456 +0,0 @@
1
- import os
2
- import sys
3
- sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xcodec_mini_infer'))
4
- sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xcodec_mini_infer', 'descriptaudiocodec'))
5
- import argparse
6
- import torch
7
- import numpy as np
8
- import json
9
- from omegaconf import OmegaConf
10
- import torchaudio
11
- from torchaudio.transforms import Resample
12
- import soundfile as sf
13
-
14
- import uuid
15
- from tqdm import tqdm
16
- from einops import rearrange
17
- from codecmanipulator import CodecManipulator
18
- from mmtokenizer import _MMSentencePieceTokenizer
19
- from transformers import AutoTokenizer, AutoModelForCausalLM, LogitsProcessor, LogitsProcessorList
20
- import glob
21
- import time
22
- import copy
23
- from collections import Counter
24
- from models.soundstream_hubert_new import SoundStream
25
- from vocoder import build_codec_model, process_audio
26
- from post_process_audio import replace_low_freq_with_energy_matched
27
- import re
28
-
29
-
30
- parser = argparse.ArgumentParser()
31
- # Model Configuration:
32
- parser.add_argument("--stage1_model", type=str, default="m-a-p/YuE-s1-7B-anneal-en-cot", help="The model checkpoint path or identifier for the Stage 1 model.")
33
- parser.add_argument("--stage2_model", type=str, default="m-a-p/YuE-s2-1B-general", help="The model checkpoint path or identifier for the Stage 2 model.")
34
- parser.add_argument("--max_new_tokens", type=int, default=3000, help="The maximum number of new tokens to generate in one pass during text generation.")
35
- parser.add_argument("--run_n_segments", type=int, default=2, help="The number of segments to process during the generation.")
36
- parser.add_argument("--stage2_batch_size", type=int, default=4, help="The batch size used in Stage 2 inference.")
37
- # Prompt
38
- parser.add_argument("--genre_txt", type=str, required=True, help="The file path to a text file containing genre tags that describe the musical style or characteristics (e.g., instrumental, genre, mood, vocal timbre, vocal gender). This is used as part of the generation prompt.")
39
- parser.add_argument("--lyrics_txt", type=str, required=True, help="The file path to a text file containing the lyrics for the music generation. These lyrics will be processed and split into structured segments to guide the generation process.")
40
- parser.add_argument("--use_audio_prompt", action="store_true", help="If set, the model will use an audio file as a prompt during generation. The audio file should be specified using --audio_prompt_path.")
41
- parser.add_argument("--audio_prompt_path", type=str, default="", help="The file path to an audio file to use as a reference prompt when --use_audio_prompt is enabled.")
42
- parser.add_argument("--prompt_start_time", type=float, default=0.0, help="The start time in seconds to extract the audio prompt from the given audio file.")
43
- parser.add_argument("--prompt_end_time", type=float, default=30.0, help="The end time in seconds to extract the audio prompt from the given audio file.")
44
- # Output
45
- parser.add_argument("--output_dir", type=str, default="./output", help="The directory where generated outputs will be saved.")
46
- parser.add_argument("--keep_intermediate", action="store_true", help="If set, intermediate outputs will be saved during processing.")
47
- parser.add_argument("--disable_offload_model", action="store_true", help="If set, the model will not be offloaded from the GPU to CPU after Stage 1 inference.")
48
- parser.add_argument("--cuda_idx", type=int, default=0)
49
- # Config for xcodec and upsampler
50
- parser.add_argument('--basic_model_config', default='./xcodec_mini_infer/final_ckpt/config.yaml', help='YAML files for xcodec configurations.')
51
- parser.add_argument('--resume_path', default='./xcodec_mini_infer/final_ckpt/ckpt_00360000.pth', help='Path to the xcodec checkpoint.')
52
- parser.add_argument('--config_path', type=str, default='./xcodec_mini_infer/decoders/config.yaml', help='Path to Vocos config file.')
53
- parser.add_argument('--vocal_decoder_path', type=str, default='./xcodec_mini_infer/decoders/decoder_131000.pth', help='Path to Vocos decoder weights.')
54
- parser.add_argument('--inst_decoder_path', type=str, default='./xcodec_mini_infer/decoders/decoder_151000.pth', help='Path to Vocos decoder weights.')
55
- parser.add_argument('-r', '--rescale', action='store_true', help='Rescale output to avoid clipping.')
56
-
57
-
58
- args = parser.parse_args()
59
- if args.use_audio_prompt and not args.audio_prompt_path:
60
- raise FileNotFoundError("Please offer audio prompt filepath using '--audio_prompt_path', when you enable 'use_audio_prompt'!")
61
- stage1_model = args.stage1_model
62
- stage2_model = args.stage2_model
63
- cuda_idx = args.cuda_idx
64
- max_new_tokens = args.max_new_tokens
65
- stage1_output_dir = os.path.join(args.output_dir, f"stage1")
66
- stage2_output_dir = stage1_output_dir.replace('stage1', 'stage2')
67
- os.makedirs(stage1_output_dir, exist_ok=True)
68
- os.makedirs(stage2_output_dir, exist_ok=True)
69
-
70
- # load tokenizer and model
71
- device = torch.device(f"cuda:{cuda_idx}" if torch.cuda.is_available() else "cpu")
72
- mmtokenizer = _MMSentencePieceTokenizer("./mm_tokenizer_v0.2_hf/tokenizer.model")
73
- model = AutoModelForCausalLM.from_pretrained(
74
- stage1_model,
75
- torch_dtype=torch.bfloat16,
76
- attn_implementation="flash_attention_2", # To enable flashattn, you have to install flash-attn
77
- )
78
- # to device, if gpu is available
79
- model.to(device)
80
- model.eval()
81
-
82
- codectool = CodecManipulator("xcodec", 0, 1)
83
- codectool_stage2 = CodecManipulator("xcodec", 0, 8)
84
- model_config = OmegaConf.load(args.basic_model_config)
85
- codec_model = eval(model_config.generator.name)(**model_config.generator.config).to(device)
86
- parameter_dict = torch.load(args.resume_path, map_location='cpu')
87
- codec_model.load_state_dict(parameter_dict['codec_model'])
88
- codec_model.to(device)
89
- codec_model.eval()
90
-
91
- class BlockTokenRangeProcessor(LogitsProcessor):
92
- def __init__(self, start_id, end_id):
93
- self.blocked_token_ids = list(range(start_id, end_id))
94
-
95
- def __call__(self, input_ids, scores):
96
- scores[:, self.blocked_token_ids] = -float("inf")
97
- return scores
98
-
99
- def load_audio_mono(filepath, sampling_rate=16000):
100
- audio, sr = torchaudio.load(filepath)
101
- # Convert to mono
102
- audio = torch.mean(audio, dim=0, keepdim=True)
103
- # Resample if needed
104
- if sr != sampling_rate:
105
- resampler = Resample(orig_freq=sr, new_freq=sampling_rate)
106
- audio = resampler(audio)
107
- return audio
108
-
109
- def split_lyrics(lyrics):
110
- pattern = r"\[(\w+)\](.*?)\n(?=\[|\Z)"
111
- segments = re.findall(pattern, lyrics, re.DOTALL)
112
- structured_lyrics = [f"[{seg[0]}]\n{seg[1].strip()}\n\n" for seg in segments]
113
- return structured_lyrics
114
-
115
- # Call the function and print the result
116
- stage1_output_set = []
117
- # Tips:
118
- # genre tags support instrumental,genre,mood,vocal timbr and vocal gender
119
- # all kinds of tags are needed
120
- with open(args.genre_txt) as f:
121
- genres = f.read().strip()
122
- with open(args.lyrics_txt) as f:
123
- lyrics = split_lyrics(f.read())
124
- # intruction
125
- full_lyrics = "\n".join(lyrics)
126
- prompt_texts = [f"Generate music from the given lyrics segment by segment.\n[Genre] {genres}\n{full_lyrics}"]
127
- prompt_texts += lyrics
128
-
129
-
130
- random_id = uuid.uuid4()
131
- output_seq = None
132
- # Here is suggested decoding config
133
- top_p = 0.93
134
- temperature = 1.0
135
- repetition_penalty = 1.2
136
- # special tokens
137
- start_of_segment = mmtokenizer.tokenize('[start_of_segment]')
138
- end_of_segment = mmtokenizer.tokenize('[end_of_segment]')
139
- # Format text prompt
140
- run_n_segments = min(args.run_n_segments+1, len(lyrics))
141
- for i, p in enumerate(tqdm(prompt_texts[:run_n_segments])):
142
- section_text = p.replace('[start_of_segment]', '').replace('[end_of_segment]', '')
143
- guidance_scale = 1.5 if i <=1 else 1.2
144
- if i==0:
145
- continue
146
- if i==1:
147
- if args.use_audio_prompt:
148
- audio_prompt = load_audio_mono(args.audio_prompt_path)
149
- audio_prompt.unsqueeze_(0)
150
- with torch.no_grad():
151
- raw_codes = codec_model.encode(audio_prompt.to(device), target_bw=0.5)
152
- raw_codes = raw_codes.transpose(0, 1)
153
- raw_codes = raw_codes.cpu().numpy().astype(np.int16)
154
- # Format audio prompt
155
- code_ids = codectool.npy2ids(raw_codes[0])
156
- audio_prompt_codec = code_ids[int(args.prompt_start_time *50): int(args.prompt_end_time *50)] # 50 is tps of xcodec
157
- audio_prompt_codec_ids = [mmtokenizer.soa] + codectool.sep_ids + audio_prompt_codec + [mmtokenizer.eoa]
158
- sentence_ids = mmtokenizer.tokenize("[start_of_reference]") + audio_prompt_codec_ids + mmtokenizer.tokenize("[end_of_reference]")
159
- head_id = mmtokenizer.tokenize(prompt_texts[0]) + sentence_ids
160
- else:
161
- head_id = mmtokenizer.tokenize(prompt_texts[0])
162
- prompt_ids = head_id + start_of_segment + mmtokenizer.tokenize(section_text) + [mmtokenizer.soa] + codectool.sep_ids
163
- else:
164
- prompt_ids = end_of_segment + start_of_segment + mmtokenizer.tokenize(section_text) + [mmtokenizer.soa] + codectool.sep_ids
165
-
166
- prompt_ids = torch.as_tensor(prompt_ids).unsqueeze(0).to(device)
167
- input_ids = torch.cat([raw_output, prompt_ids], dim=1) if i > 1 else prompt_ids
168
- # Use window slicing in case output sequence exceeds the context of model
169
- max_context = 16384-max_new_tokens-1
170
- if input_ids.shape[-1] > max_context:
171
- print(f'Section {i}: output length {input_ids.shape[-1]} exceeding context length {max_context}, now using the last {max_context} tokens.')
172
- input_ids = input_ids[:, -(max_context):]
173
- with torch.no_grad():
174
- output_seq = model.generate(
175
- input_ids=input_ids,
176
- max_new_tokens=max_new_tokens,
177
- min_new_tokens=100,
178
- do_sample=True,
179
- top_p=top_p,
180
- temperature=temperature,
181
- repetition_penalty=repetition_penalty,
182
- eos_token_id=mmtokenizer.eoa,
183
- pad_token_id=mmtokenizer.eoa,
184
- logits_processor=LogitsProcessorList([BlockTokenRangeProcessor(0, 32002), BlockTokenRangeProcessor(32016, 32016)]),
185
- guidance_scale=guidance_scale,
186
- )
187
- if output_seq[0][-1].item() != mmtokenizer.eoa:
188
- tensor_eoa = torch.as_tensor([[mmtokenizer.eoa]]).to(model.device)
189
- output_seq = torch.cat((output_seq, tensor_eoa), dim=1)
190
- if i > 1:
191
- raw_output = torch.cat([raw_output, prompt_ids, output_seq[:, input_ids.shape[-1]:]], dim=1)
192
- else:
193
- raw_output = output_seq
194
-
195
- # save raw output and check sanity
196
- ids = raw_output[0].cpu().numpy()
197
- soa_idx = np.where(ids == mmtokenizer.soa)[0].tolist()
198
- eoa_idx = np.where(ids == mmtokenizer.eoa)[0].tolist()
199
- if len(soa_idx)!=len(eoa_idx):
200
- raise ValueError(f'invalid pairs of soa and eoa, Num of soa: {len(soa_idx)}, Num of eoa: {len(eoa_idx)}')
201
-
202
- vocals = []
203
- instrumentals = []
204
- range_begin = 1 if args.use_audio_prompt else 0
205
- for i in range(range_begin, len(soa_idx)):
206
- codec_ids = ids[soa_idx[i]+1:eoa_idx[i]]
207
- if codec_ids[0] == 32016:
208
- codec_ids = codec_ids[1:]
209
- codec_ids = codec_ids[:2 * (codec_ids.shape[0] // 2)]
210
- vocals_ids = codectool.ids2npy(rearrange(codec_ids,"(n b) -> b n", b=2)[0])
211
- vocals.append(vocals_ids)
212
- instrumentals_ids = codectool.ids2npy(rearrange(codec_ids,"(n b) -> b n", b=2)[1])
213
- instrumentals.append(instrumentals_ids)
214
- vocals = np.concatenate(vocals, axis=1)
215
- instrumentals = np.concatenate(instrumentals, axis=1)
216
- vocal_save_path = os.path.join(stage1_output_dir, f"cot_{genres.replace(' ', '-')}_tp{top_p}_T{temperature}_rp{repetition_penalty}_maxtk{max_new_tokens}_vocal_{random_id}".replace('.', '@')+'.npy')
217
- inst_save_path = os.path.join(stage1_output_dir, f"cot_{genres.replace(' ', '-')}_tp{top_p}_T{temperature}_rp{repetition_penalty}_maxtk{max_new_tokens}_instrumental_{random_id}".replace('.', '@')+'.npy')
218
- np.save(vocal_save_path, vocals)
219
- np.save(inst_save_path, instrumentals)
220
- stage1_output_set.append(vocal_save_path)
221
- stage1_output_set.append(inst_save_path)
222
-
223
-
224
- # offload model
225
- if not args.disable_offload_model:
226
- model.cpu()
227
- del model
228
- torch.cuda.empty_cache()
229
-
230
- print("Stage 2 inference...")
231
- model_stage2 = AutoModelForCausalLM.from_pretrained(
232
- stage2_model,
233
- torch_dtype=torch.float16,
234
- attn_implementation="flash_attention_2"
235
- )
236
- model_stage2.to(device)
237
- model_stage2.eval()
238
-
239
- def stage2_generate(model, prompt, batch_size=16):
240
- codec_ids = codectool.unflatten(prompt, n_quantizer=1)
241
- codec_ids = codectool.offset_tok_ids(
242
- codec_ids,
243
- global_offset=codectool.global_offset,
244
- codebook_size=codectool.codebook_size,
245
- num_codebooks=codectool.num_codebooks,
246
- ).astype(np.int32)
247
-
248
- # Prepare prompt_ids based on batch size or single input
249
- if batch_size > 1:
250
- codec_list = []
251
- for i in range(batch_size):
252
- idx_begin = i * 300
253
- idx_end = (i + 1) * 300
254
- codec_list.append(codec_ids[:, idx_begin:idx_end])
255
-
256
- codec_ids = np.concatenate(codec_list, axis=0)
257
- prompt_ids = np.concatenate(
258
- [
259
- np.tile([mmtokenizer.soa, mmtokenizer.stage_1], (batch_size, 1)),
260
- codec_ids,
261
- np.tile([mmtokenizer.stage_2], (batch_size, 1)),
262
- ],
263
- axis=1
264
- )
265
- else:
266
- prompt_ids = np.concatenate([
267
- np.array([mmtokenizer.soa, mmtokenizer.stage_1]),
268
- codec_ids.flatten(), # Flatten the 2D array to 1D
269
- np.array([mmtokenizer.stage_2])
270
- ]).astype(np.int32)
271
- prompt_ids = prompt_ids[np.newaxis, ...]
272
-
273
- codec_ids = torch.as_tensor(codec_ids).to(device)
274
- prompt_ids = torch.as_tensor(prompt_ids).to(device)
275
- len_prompt = prompt_ids.shape[-1]
276
-
277
- block_list = LogitsProcessorList([BlockTokenRangeProcessor(0, 46358), BlockTokenRangeProcessor(53526, mmtokenizer.vocab_size)])
278
-
279
- # Teacher forcing generate loop
280
- for frames_idx in range(codec_ids.shape[1]):
281
- cb0 = codec_ids[:, frames_idx:frames_idx+1]
282
- prompt_ids = torch.cat([prompt_ids, cb0], dim=1)
283
- input_ids = prompt_ids
284
-
285
- with torch.no_grad():
286
- stage2_output = model.generate(input_ids=input_ids,
287
- min_new_tokens=7,
288
- max_new_tokens=7,
289
- eos_token_id=mmtokenizer.eoa,
290
- pad_token_id=mmtokenizer.eoa,
291
- logits_processor=block_list,
292
- )
293
-
294
- assert stage2_output.shape[1] - prompt_ids.shape[1] == 7, f"output new tokens={stage2_output.shape[1]-prompt_ids.shape[1]}"
295
- prompt_ids = stage2_output
296
-
297
- # Return output based on batch size
298
- if batch_size > 1:
299
- output = prompt_ids.cpu().numpy()[:, len_prompt:]
300
- output_list = [output[i] for i in range(batch_size)]
301
- output = np.concatenate(output_list, axis=0)
302
- else:
303
- output = prompt_ids[0].cpu().numpy()[len_prompt:]
304
-
305
- return output
306
-
307
- def stage2_inference(model, stage1_output_set, stage2_output_dir, batch_size=4):
308
- stage2_result = []
309
- for i in tqdm(range(len(stage1_output_set))):
310
- output_filename = os.path.join(stage2_output_dir, os.path.basename(stage1_output_set[i]))
311
-
312
- if os.path.exists(output_filename):
313
- print(f'{output_filename} stage2 has done.')
314
- continue
315
-
316
- # Load the prompt
317
- prompt = np.load(stage1_output_set[i]).astype(np.int32)
318
-
319
- # Only accept 6s segments
320
- output_duration = prompt.shape[-1] // 50 // 6 * 6
321
- num_batch = output_duration // 6
322
-
323
- if num_batch <= batch_size:
324
- # If num_batch is less than or equal to batch_size, we can infer the entire prompt at once
325
- output = stage2_generate(model, prompt[:, :output_duration*50], batch_size=num_batch)
326
- else:
327
- # If num_batch is greater than batch_size, process in chunks of batch_size
328
- segments = []
329
- num_segments = (num_batch // batch_size) + (1 if num_batch % batch_size != 0 else 0)
330
-
331
- for seg in range(num_segments):
332
- start_idx = seg * batch_size * 300
333
- # Ensure the end_idx does not exceed the available length
334
- end_idx = min((seg + 1) * batch_size * 300, output_duration*50) # Adjust the last segment
335
- current_batch_size = batch_size if seg != num_segments-1 or num_batch % batch_size == 0 else num_batch % batch_size
336
- segment = stage2_generate(
337
- model,
338
- prompt[:, start_idx:end_idx],
339
- batch_size=current_batch_size
340
- )
341
- segments.append(segment)
342
-
343
- # Concatenate all the segments
344
- output = np.concatenate(segments, axis=0)
345
-
346
- # Process the ending part of the prompt
347
- if output_duration*50 != prompt.shape[-1]:
348
- ending = stage2_generate(model, prompt[:, output_duration*50:], batch_size=1)
349
- output = np.concatenate([output, ending], axis=0)
350
- output = codectool_stage2.ids2npy(output)
351
-
352
- # Fix invalid codes (a dirty solution, which may harm the quality of audio)
353
- # We are trying to find better one
354
- fixed_output = copy.deepcopy(output)
355
- for i, line in enumerate(output):
356
- for j, element in enumerate(line):
357
- if element < 0 or element > 1023:
358
- counter = Counter(line)
359
- most_frequant = sorted(counter.items(), key=lambda x: x[1], reverse=True)[0][0]
360
- fixed_output[i, j] = most_frequant
361
- # save output
362
- np.save(output_filename, fixed_output)
363
- stage2_result.append(output_filename)
364
- return stage2_result
365
-
366
- stage2_result = stage2_inference(model_stage2, stage1_output_set, stage2_output_dir, batch_size=args.stage2_batch_size)
367
- print(stage2_result)
368
- print('Stage 2 DONE.\n')
369
- # convert audio tokens to audio
370
- def save_audio(wav: torch.Tensor, path, sample_rate: int, rescale: bool = False):
371
- folder_path = os.path.dirname(path)
372
- if not os.path.exists(folder_path):
373
- os.makedirs(folder_path)
374
- limit = 0.99
375
- max_val = wav.abs().max()
376
- wav = wav * min(limit / max_val, 1) if rescale else wav.clamp(-limit, limit)
377
- torchaudio.save(str(path), wav, sample_rate=sample_rate, encoding='PCM_S', bits_per_sample=16)
378
- # reconstruct tracks
379
- recons_output_dir = os.path.join(args.output_dir, "recons")
380
- recons_mix_dir = os.path.join(recons_output_dir, 'mix')
381
- os.makedirs(recons_mix_dir, exist_ok=True)
382
- tracks = []
383
- for npy in stage2_result:
384
- codec_result = np.load(npy)
385
- decodec_rlt=[]
386
- with torch.no_grad():
387
- decoded_waveform = codec_model.decode(torch.as_tensor(codec_result.astype(np.int16), dtype=torch.long).unsqueeze(0).permute(1, 0, 2).to(device))
388
- decoded_waveform = decoded_waveform.cpu().squeeze(0)
389
- decodec_rlt.append(torch.as_tensor(decoded_waveform))
390
- decodec_rlt = torch.cat(decodec_rlt, dim=-1)
391
- save_path = os.path.join(recons_output_dir, os.path.splitext(os.path.basename(npy))[0] + ".mp3")
392
- tracks.append(save_path)
393
- save_audio(decodec_rlt, save_path, 16000)
394
- # mix tracks
395
- for inst_path in tracks:
396
- try:
397
- if (inst_path.endswith('.wav') or inst_path.endswith('.mp3')) \
398
- and 'instrumental' in inst_path:
399
- # find pair
400
- vocal_path = inst_path.replace('instrumental', 'vocal')
401
- if not os.path.exists(vocal_path):
402
- continue
403
- # mix
404
- recons_mix = os.path.join(recons_mix_dir, os.path.basename(inst_path).replace('instrumental', 'mixed'))
405
- vocal_stem, sr = sf.read(inst_path)
406
- instrumental_stem, _ = sf.read(vocal_path)
407
- mix_stem = (vocal_stem + instrumental_stem) / 1
408
- sf.write(recons_mix, mix_stem, sr)
409
- except Exception as e:
410
- print(e)
411
-
412
- # vocoder to upsample audios
413
- vocal_decoder, inst_decoder = build_codec_model(args.config_path, args.vocal_decoder_path, args.inst_decoder_path)
414
- vocoder_output_dir = os.path.join(args.output_dir, 'vocoder')
415
- vocoder_stems_dir = os.path.join(vocoder_output_dir, 'stems')
416
- vocoder_mix_dir = os.path.join(vocoder_output_dir, 'mix')
417
- os.makedirs(vocoder_mix_dir, exist_ok=True)
418
- os.makedirs(vocoder_stems_dir, exist_ok=True)
419
- for npy in stage2_result:
420
- if 'instrumental' in npy:
421
- # Process instrumental
422
- instrumental_output = process_audio(
423
- npy,
424
- os.path.join(vocoder_stems_dir, 'instrumental.mp3'),
425
- args.rescale,
426
- args,
427
- inst_decoder,
428
- codec_model
429
- )
430
- else:
431
- # Process vocal
432
- vocal_output = process_audio(
433
- npy,
434
- os.path.join(vocoder_stems_dir, 'vocal.mp3'),
435
- args.rescale,
436
- args,
437
- vocal_decoder,
438
- codec_model
439
- )
440
- # mix tracks
441
- try:
442
- mix_output = instrumental_output + vocal_output
443
- vocoder_mix = os.path.join(vocoder_mix_dir, os.path.basename(recons_mix))
444
- save_audio(mix_output, vocoder_mix, 44100, args.rescale)
445
- print(f"Created mix: {vocoder_mix}")
446
- except RuntimeError as e:
447
- print(e)
448
- print(f"mix {vocoder_mix} failed! inst: {instrumental_output.shape}, vocal: {vocal_output.shape}")
449
-
450
- # Post process
451
- replace_low_freq_with_energy_matched(
452
- a_file=recons_mix, # 16kHz
453
- b_file=vocoder_mix, # 48kHz
454
- c_file=os.path.join(args.output_dir, os.path.basename(recons_mix)),
455
- cutoff_freq=5500.0
456
- )