KingNish commited on
Commit
725074b
·
1 Parent(s): bfa6f78

modified: app.py

Browse files
Files changed (1) hide show
  1. app.py +397 -156
app.py CHANGED
@@ -7,7 +7,7 @@ import spaces
7
  from transformers import AutoTokenizer, AutoModelForCausalLM, LogitsProcessor, LogitsProcessorList
8
  import torch
9
 
10
- is_shared_ui = True if "innova-ai/YuE-music-generator-demo" in os.environ['SPACE_ID'] else False
11
 
12
  # Install required package
13
  def install_flash_attn():
@@ -49,65 +49,83 @@ import sys
49
  sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xcodec_mini_infer'))
50
  sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xcodec_mini_infer', 'descriptaudiocodec'))
51
 
52
-
53
- import os
54
- import sys
55
- import torch
56
  import numpy as np
57
  import json
58
- import re
59
- import uuid
60
- import gradio as gr
61
- from tqdm import tqdm
62
  from omegaconf import OmegaConf
63
  import torchaudio
64
  from torchaudio.transforms import Resample
65
  import soundfile as sf
 
 
 
66
  from einops import rearrange
 
 
67
  from transformers import AutoTokenizer, AutoModelForCausalLM, LogitsProcessor, LogitsProcessorList
 
 
 
 
68
  from models.soundstream_hubert_new import SoundStream
69
  from vocoder import build_codec_model, process_audio
70
  from post_process_audio import replace_low_freq_with_energy_matched
 
71
 
72
- sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xcodec_mini_infer'))
73
- sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xcodec_mini_infer', 'descriptaudiocodec'))
74
- from codecmanipulator import CodecManipulator
75
- from mmtokenizer import _MMSentencePieceTokenizer
76
-
77
- # Load models once at startup
78
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
79
 
80
- # Load language model
81
- print("Loading language model...")
82
- model = AutoModelForCausalLM.from_pretrained(
83
- "m-a-p/YuE-s1-7B-anneal-en-cot",
84
- torch_dtype=torch.float16,
85
- attn_implementation="flash_attention_2",
86
- ).to(device)
87
- model.eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
- # Load tokenizers and codec tools
90
- print("Loading tokenizers...")
91
  mmtokenizer = _MMSentencePieceTokenizer("./mm_tokenizer_v0.2_hf/tokenizer.model")
92
- codectool = CodecManipulator("xcodec", 0, 1)
93
 
94
- # Load codec models
95
- print("Loading codec models...")
96
- model_config = OmegaConf.load('./xcodec_mini_infer/final_ckpt/config.yaml')
97
  codec_model = eval(model_config.generator.name)(**model_config.generator.config).to(device)
98
- parameter_dict = torch.load('./xcodec_mini_infer/final_ckpt/ckpt_00360000.pth', map_location='cpu')
99
  codec_model.load_state_dict(parameter_dict['codec_model'])
100
  codec_model.to(device)
101
  codec_model.eval()
102
 
103
- # Load vocoders
104
- print("Loading vocoders...")
105
- vocal_decoder, inst_decoder = build_codec_model(
106
- './xcodec_mini_infer/decoders/config.yaml',
107
- './xcodec_mini_infer/decoders/decoder_131000.pth',
108
- './xcodec_mini_infer/decoders/decoder_151000.pth'
109
- )
110
-
111
  class BlockTokenRangeProcessor(LogitsProcessor):
112
  def __init__(self, start_id, end_id):
113
  self.blocked_token_ids = list(range(start_id, end_id))
@@ -116,118 +134,325 @@ class BlockTokenRangeProcessor(LogitsProcessor):
116
  scores[:, self.blocked_token_ids] = -float("inf")
117
  return scores
118
 
 
 
 
 
 
 
 
 
 
 
119
  def split_lyrics(lyrics):
120
  pattern = r"\[(\w+)\](.*?)\n(?=\[|\Z)"
121
  segments = re.findall(pattern, lyrics, re.DOTALL)
122
- return [f"[{seg[0]}]\n{seg[1].strip()}\n\n" for seg in segments]
123
-
124
- def save_audio(wav: torch.Tensor, path, sample_rate: int, rescale: bool = False):
125
- os.makedirs(os.path.dirname(path), exist_ok=True)
126
- limit = 0.99
127
- max_val = wav.abs().max()
128
- wav = wav * min(limit / max_val, 1) if rescale else wav.clamp(-limit, limit)
129
- torchaudio.save(path, wav, sample_rate=sample_rate, encoding='PCM_S', bits_per_sample=16)
130
-
131
- @spaces.GPU(duration=150)
132
- def run_inference(genre_txt_content, lyrics_txt_content, num_segments=2, max_new_tokens=2000):
133
- try:
134
- # Create temporary output directory
135
- output_dir = tempfile.mkdtemp()
136
- stage1_output_dir = os.path.join(output_dir, "stage1")
137
- os.makedirs(stage1_output_dir, exist_ok=True)
138
-
139
- # Process inputs
140
- structured_lyrics = split_lyrics(lyrics_txt_content)
141
- full_lyrics = "\n".join(structured_lyrics)
142
- prompt_texts = [f"Generate music from the given lyrics segment by segment.\n[Genre] {genre_txt_content}\n{full_lyrics}"] + structured_lyrics
143
-
144
- # Generation parameters
145
- top_p = 0.93
146
- temperature = 1.0
147
- repetition_penalty = 1.2
148
- start_of_segment = mmtokenizer.tokenize('[start_of_segment]')
149
- end_of_segment = mmtokenizer.tokenize('[end_of_segment]')
150
- run_n_segments = min(num_segments + 1, len(structured_lyrics))
151
-
152
- # Generate tokens
153
- raw_output = None
154
- for i in tqdm(range(1, run_n_segments)):
155
- section_text = prompt_texts[i].replace('[start_of_segment]', '').replace('[end_of_segment]', '')
156
- guidance_scale = 1.5 if i <= 1 else 1.2
157
- prompt_ids = start_of_segment + mmtokenizer.tokenize(section_text) + [mmtokenizer.soa] + codectool.sep_ids
158
- prompt_ids = torch.as_tensor(prompt_ids).unsqueeze(0).to(device)
159
-
160
- input_ids = prompt_ids if i == 1 else torch.cat([raw_output, prompt_ids], dim=1)
161
- if input_ids.shape[-1] > 16384 - max_new_tokens - 1:
162
- input_ids = input_ids[:, -(16384 - max_new_tokens - 1):]
163
-
164
- with torch.no_grad():
165
- output_seq = model.generate(
166
- input_ids=input_ids,
167
- max_new_tokens=max_new_tokens,
168
- do_sample=True,
169
- top_p=top_p,
170
- temperature=temperature,
171
- repetition_penalty=repetition_penalty,
172
- eos_token_id=mmtokenizer.eoa,
173
- pad_token_id=mmtokenizer.eoa,
174
- logits_processor=LogitsProcessorList([
175
- BlockTokenRangeProcessor(0, 32002),
176
- BlockTokenRangeProcessor(32016, 32016)
177
- ]),
178
- guidance_scale=guidance_scale,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  )
180
-
181
- raw_output = output_seq if i == 1 else torch.cat([raw_output, output_seq[:, input_ids.shape[-1]:]], dim=1)
182
-
183
- # Process generated tokens
184
- ids = raw_output[0].cpu().numpy()
185
- soa_idx = np.where(ids == mmtokenizer.soa)[0]
186
- eoa_idx = np.where(ids == mmtokenizer.eoa)[0]
187
- vocals, instrumentals = [], []
188
-
189
- for i in range(len(soa_idx)):
190
- codec_ids = ids[soa_idx[i]+1:eoa_idx[i]]
191
- codec_ids = codec_ids[:2 * (len(codec_ids) // 2)]
192
- vocals.append(codectool.ids2npy(rearrange(codec_ids, "(n b) -> b n", b=2)[0]))
193
- instrumentals.append(codectool.ids2npy(rearrange(codec_ids, "(n b) -> b n", b=2)[1]))
194
-
195
- # Generate audio
196
- vocals = np.concatenate(vocals, axis=1)
197
- instrumentals = np.concatenate(instrumentals, axis=1)
198
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  with torch.no_grad():
200
- vocal_audio = codec_model.decode(torch.tensor(vocals.astype(np.int16)).long().unsqueeze(0).permute(1, 0, 2).to(device))
201
- inst_audio = codec_model.decode(torch.tensor(instrumentals.astype(np.int16)).long().unsqueeze(0).permute(1, 0, 2).to(device))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
- # Mix and save audio
204
- final_audio = (vocal_audio.cpu().squeeze() + inst_audio.cpu().squeeze()) / 2
205
- output_path = os.path.join(output_dir, "final_output.wav")
206
- save_audio(final_audio.unsqueeze(0), output_path, 16000)
207
 
208
- return output_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
- except Exception as e:
211
- print(f"Error during inference: {str(e)}")
212
- raise gr.Error(f"Generation failed: {str(e)}")
213
 
214
- # Gradio UI
215
  with gr.Blocks() as demo:
216
- gr.Markdown("# YuE Music Generator")
217
- with gr.Row():
218
- with gr.Column():
219
- genre_txt = gr.Textbox(label="Genre Tags", placeholder="e.g., female vocal, jazz, piano")
220
- lyrics_txt = gr.Textbox(label="Lyrics", lines=10, placeholder="Enter lyrics with sections like [verse], [chorus]")
221
- num_segments = gr.Slider(1, 10, value=2, label="Number of Segments")
222
- max_tokens = gr.Slider(500, 3000, value=2000, label="Max Tokens")
223
- btn = gr.Button("Generate Music")
224
- with gr.Column():
225
- audio_out = gr.Audio(label="Generated Music")
226
-
227
- examples = gr.Examples(
228
- examples=[
229
- ["female blues airy vocal bright vocal piano sad romantic guitar jazz",
230
- """[verse]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
  In the quiet of the evening, shadows start to fall
232
  Whispers of the night wind echo through the hall
233
  Lost within the silence, I hear your gentle voice
@@ -237,23 +462,39 @@ Guiding me back homeward, making my heart rejoice
237
  Don't let this moment fade, hold me close tonight
238
  With you here beside me, everything's alright
239
  Can't imagine life alone, don't want to let you go
240
- Stay with me forever, let our love just flow"""],
241
- ["rap piano street tough piercing vocal hip-hop synthesizer clear vocal male",
242
- """[verse]
 
 
 
243
  Woke up in the morning, sun is shining bright
244
  Chasing all my dreams, gotta get my mind right
245
  City lights are fading, but my vision's clear
246
- Got my team beside me, no room for fear"""]
247
- ],
248
- inputs=[genre_txt, lyrics_txt],
249
- outputs=audio_out
250
- )
251
 
252
- btn.click(
253
- fn=run_inference,
254
- inputs=[genre_txt, lyrics_txt, num_segments, max_tokens],
255
- outputs=audio_out
256
- )
 
 
 
 
 
 
 
 
 
257
 
258
- if __name__ == "__main__":
259
- demo.launch()
 
 
 
 
 
7
  from transformers import AutoTokenizer, AutoModelForCausalLM, LogitsProcessor, LogitsProcessorList
8
  import torch
9
 
10
+ is_shared_ui = True if "innova-ai/YuE-music-generator-demo" in os.environ['SPACE_ID'] else False
11
 
12
  # Install required package
13
  def install_flash_attn():
 
49
  sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xcodec_mini_infer'))
50
  sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'xcodec_mini_infer', 'descriptaudiocodec'))
51
 
52
+ import argparse
 
 
 
53
  import numpy as np
54
  import json
 
 
 
 
55
  from omegaconf import OmegaConf
56
  import torchaudio
57
  from torchaudio.transforms import Resample
58
  import soundfile as sf
59
+
60
+ import uuid
61
+ from tqdm import tqdm
62
  from einops import rearrange
63
+ from codecmanipulator import CodecManipulator
64
+ from mmtokenizer import _MMSentencePieceTokenizer
65
  from transformers import AutoTokenizer, AutoModelForCausalLM, LogitsProcessor, LogitsProcessorList
66
+ import glob
67
+ import time
68
+ import copy
69
+ from collections import Counter
70
  from models.soundstream_hubert_new import SoundStream
71
  from vocoder import build_codec_model, process_audio
72
  from post_process_audio import replace_low_freq_with_energy_matched
73
+ import re
74
 
 
 
 
 
 
 
 
75
 
76
+ # --- Arguments and Model Loading from infer.py ---
77
+ parser = argparse.ArgumentParser()
78
+ # Model Configuration:
79
+ parser.add_argument("--stage1_model", type=str, default="m-a-p/YuE-s1-7B-anneal-en-cot", help="The model checkpoint path or identifier for the Stage 1 model.")
80
+ parser.add_argument("--max_new_tokens", type=int, default=3000, help="The maximum number of new tokens to generate in one pass during text generation.")
81
+ parser.add_argument("--run_n_segments", type=int, default=2, help="The number of segments to process during the generation.")
82
+ # Prompt
83
+ parser.add_argument("--genre_txt", type=str, default="", help="The file path to a text file containing genre tags that describe the musical style or characteristics (e.g., instrumental, genre, mood, vocal timbre, vocal gender). This is used as part of the generation prompt.") # Modified: removed required=True and using default=""
84
+ parser.add_argument("--lyrics_txt", type=str, default="", help="The file path to a text file containing the lyrics for the music generation. These lyrics will be processed and split into structured segments to guide the generation process.") # Modified: removed required=True and using default=""
85
+ parser.add_argument("--use_audio_prompt", action="store_true", help="If set, the model will use an audio file as a prompt during generation. The audio file should be specified using --audio_prompt_path.")
86
+ parser.add_argument("--audio_prompt_path", type=str, default="", help="The file path to an audio file to use as a reference prompt when --use_audio_prompt is enabled.")
87
+ parser.add_argument("--prompt_start_time", type=float, default=0.0, help="The start time in seconds to extract the audio prompt from the given audio file.")
88
+ parser.add_argument("--prompt_end_time", type=float, default=30.0, help="The end time in seconds to extract the audio prompt from the given audio file.")
89
+ # Output
90
+ parser.add_argument("--output_dir", type=str, default="./output", help="The directory where generated outputs will be saved.")
91
+ parser.add_argument("--keep_intermediate", action="store_true", help="If set, intermediate outputs will be saved during processing.")
92
+ parser.add_argument("--disable_offload_model", action="store_true", help="If set, the model will not be offloaded from the GPU to CPU after Stage 1 inference.")
93
+ parser.add_argument("--cuda_idx", type=int, default=0)
94
+ # Config for xcodec and upsampler
95
+ parser.add_argument('--basic_model_config', default='./xcodec_mini_infer/final_ckpt/config.yaml', help='YAML files for xcodec configurations.')
96
+ parser.add_argument('--resume_path', default='./xcodec_mini_infer/final_ckpt/ckpt_00360000.pth', help='Path to the xcodec checkpoint.')
97
+ parser.add_argument('--config_path', type=str, default='./xcodec_mini_infer/decoders/config.yaml', help='Path to Vocos config file.')
98
+ parser.add_argument('--vocal_decoder_path', type=str, default='./xcodec_mini_infer/decoders/decoder_131000.pth', help='Path to Vocos decoder weights.')
99
+ parser.add_argument('--inst_decoder_path', type=str, default='./xcodec_mini_infer/decoders/decoder_151000.pth', help='Path to Vocos decoder weights.')
100
+ parser.add_argument('-r', '--rescale', action='store_true', help='Rescale output to avoid clipping.')
101
+
102
+
103
+ args = parser.parse_args([]) # Modified: Pass empty list to parse_args to avoid command line parsing in Gradio
104
+
105
+ if args.use_audio_prompt and not args.audio_prompt_path:
106
+ raise FileNotFoundError("Please offer audio prompt filepath using '--audio_prompt_path', when you enable 'use_audio_prompt'!")
107
+ model_name = args.stage1_model # Modified: Renamed 'model' to 'model_name' to avoid shadowing the loaded model later
108
+ cuda_idx = args.cuda_idx
109
+ max_new_tokens_config = args.max_new_tokens # Modified: Renamed 'max_new_tokens' to 'max_new_tokens_config' to avoid shadowing the Gradio input
110
+ stage1_output_dir = os.path.join(args.output_dir, f"stage1")
111
+ os.makedirs(stage1_output_dir, exist_ok=True)
112
+
113
+ # load tokenizer and model
114
+ device = torch.device(f"cuda:{cuda_idx}" if torch.cuda.is_available() else "cpu")
115
+
116
+ # Now you can use `device` to move your tensors or models to the GPU (if available)
117
+ print(f"Using device: {device}")
118
 
 
 
119
  mmtokenizer = _MMSentencePieceTokenizer("./mm_tokenizer_v0.2_hf/tokenizer.model")
 
120
 
121
+ codectool = CodecManipulator("xcodec", 0, 1)
122
+ model_config = OmegaConf.load(args.basic_model_config)
 
123
  codec_model = eval(model_config.generator.name)(**model_config.generator.config).to(device)
124
+ parameter_dict = torch.load(args.resume_path, map_location='cpu')
125
  codec_model.load_state_dict(parameter_dict['codec_model'])
126
  codec_model.to(device)
127
  codec_model.eval()
128
 
 
 
 
 
 
 
 
 
129
  class BlockTokenRangeProcessor(LogitsProcessor):
130
  def __init__(self, start_id, end_id):
131
  self.blocked_token_ids = list(range(start_id, end_id))
 
134
  scores[:, self.blocked_token_ids] = -float("inf")
135
  return scores
136
 
137
+ def load_audio_mono(filepath, sampling_rate=16000):
138
+ audio, sr = torchaudio.load(filepath)
139
+ # Convert to mono
140
+ audio = torch.mean(audio, dim=0, keepdim=True)
141
+ # Resample if needed
142
+ if sr != sampling_rate:
143
+ resampler = Resample(orig_freq=sr, new_freq=sampling_rate)
144
+ audio = resampler(audio)
145
+ return audio
146
+
147
  def split_lyrics(lyrics):
148
  pattern = r"\[(\w+)\](.*?)\n(?=\[|\Z)"
149
  segments = re.findall(pattern, lyrics, re.DOTALL)
150
+ structured_lyrics = [f"[{seg[0]}]\n{seg[1].strip()}\n\n" for seg in segments]
151
+ return structured_lyrics
152
+
153
+ def generate_music(genres, lyrics_content, num_segments_run, max_new_tokens_run): # Modified: Function to encapsulate generation logic
154
+ stage1_output_set_local = [] # Modified: Local variable to store output paths
155
+
156
+ lyrics = split_lyrics(lyrics_content)
157
+ print(len(lyrics))
158
+ # intruction
159
+ full_lyrics = "\n".join(lyrics)
160
+ prompt_texts = [f"Generate music from the given lyrics segment by segment.\n[Genre] {genres}\n{full_lyrics}"]
161
+ prompt_texts += lyrics
162
+
163
+ random_id = uuid.uuid4()
164
+ output_seq = None
165
+
166
+ # Here is suggested decoding config
167
+ top_p = 0.93
168
+ temperature = 1.0
169
+ repetition_penalty = 1.2
170
+ # special tokens
171
+ start_of_segment = mmtokenizer.tokenize('[start_of_segment]')
172
+ end_of_segment = mmtokenizer.tokenize('[end_of_segment]')
173
+
174
+ raw_output = None
175
+
176
+ # Format text prompt
177
+ run_n_segments = min(num_segments_run+1, len(lyrics)) # Modified: Use passed num_segments_run
178
+
179
+ print(list(enumerate(tqdm(prompt_texts[:run_n_segments]))))
180
+
181
+ global model # Modified: Declare model as global to use the loaded model in Gradio scope
182
+
183
+ for i, p in enumerate(tqdm(prompt_texts[:run_n_segments])):
184
+ section_text = p.replace('[start_of_segment]', '').replace('[end_of_segment]', '')
185
+ guidance_scale = 1.5 if i <=1 else 1.2
186
+ if i==0:
187
+ continue
188
+ if i==1:
189
+ if args.use_audio_prompt:
190
+ audio_prompt = load_audio_mono(args.audio_prompt_path)
191
+ audio_prompt.unsqueeze_(0)
192
+ with torch.no_grad():
193
+ raw_codes = codec_model.encode(audio_prompt.to(device), target_bw=0.5)
194
+ raw_codes = raw_codes.transpose(0, 1)
195
+ raw_codes = raw_codes.cpu().numpy().astype(np.int16)
196
+ # Format audio prompt
197
+ code_ids = codectool.npy2ids(raw_codes[0])
198
+ audio_prompt_codec = code_ids[int(args.prompt_start_time *50): int(args.prompt_end_time *50)] # 50 is tps of xcodec
199
+ audio_prompt_codec_ids = [mmtokenizer.soa] + codectool.sep_ids + audio_prompt_codec + [mmtokenizer.eoa]
200
+ sentence_ids = mmtokenizer.tokenize("[start_of_reference]") + audio_prompt_codec_ids + mmtokenizer.tokenize("[end_of_reference]")
201
+ head_id = mmtokenizer.tokenize(prompt_texts[0]) + sentence_ids
202
+ else:
203
+ head_id = mmtokenizer.tokenize(prompt_texts[0])
204
+ prompt_ids = head_id + start_of_segment + mmtokenizer.tokenize(section_text) + [mmtokenizer.soa] + codectool.sep_ids
205
+ else:
206
+ prompt_ids = end_of_segment + start_of_segment + mmtokenizer.tokenize(section_text) + [mmtokenizer.soa] + codectool.sep_ids
207
+
208
+ prompt_ids = torch.as_tensor(prompt_ids).unsqueeze(0).to(device)
209
+ input_ids = torch.cat([raw_output, prompt_ids], dim=1) if i > 1 else prompt_ids
210
+ # Use window slicing in case output sequence exceeds the context of model
211
+ max_context = 16384-max_new_tokens_config-1 # Modified: Use max_new_tokens_config
212
+ if input_ids.shape[-1] > max_context:
213
+ print(f'Section {i}: output length {input_ids.shape[-1]} exceeding context length {max_context}, now using the last {max_context} tokens.')
214
+ input_ids = input_ids[:, -(max_context):]
215
+ with torch.no_grad():
216
+ output_seq = model.generate(
217
+ input_ids=input_ids,
218
+ max_new_tokens=max_new_tokens_run, # Modified: Use max_new_tokens_run
219
+ min_new_tokens=100,
220
+ do_sample=True,
221
+ top_p=top_p,
222
+ temperature=temperature,
223
+ repetition_penalty=repetition_penalty,
224
+ eos_token_id=mmtokenizer.eoa,
225
+ pad_token_id=mmtokenizer.eoa,
226
+ logits_processor=LogitsProcessorList([BlockTokenRangeProcessor(0, 32002), BlockTokenRangeProcessor(32016, 32016)]),
227
+ guidance_scale=guidance_scale,
228
  )
229
+ if output_seq[0][-1].item() != mmtokenizer.eoa:
230
+ tensor_eoa = torch.as_tensor([[mmtokenizer.eoa]]).to(model.device)
231
+ output_seq = torch.cat((output_seq, tensor_eoa), dim=1)
232
+ if i > 1:
233
+ raw_output = torch.cat([raw_output, prompt_ids, output_seq[:, input_ids.shape[-1]:]], dim=1)
234
+ else:
235
+ raw_output = output_seq
236
+ print(len(raw_output))
237
+
238
+ # save raw output and check sanity
239
+ ids = raw_output[0].cpu().numpy()
240
+ soa_idx = np.where(ids == mmtokenizer.soa)[0].tolist()
241
+ eoa_idx = np.where(ids == mmtokenizer.eoa)[0].tolist()
242
+ if len(soa_idx)!=len(eoa_idx):
243
+ raise ValueError(f'invalid pairs of soa and eoa, Num of soa: {len(soa_idx)}, Num of eoa: {len(eoa_idx)}')
244
+
245
+ vocals = []
246
+ instrumentals = []
247
+ range_begin = 1 if args.use_audio_prompt else 0
248
+ for i in range(range_begin, len(soa_idx)):
249
+ codec_ids = ids[soa_idx[i]+1:eoa_idx[i]]
250
+ if codec_ids[0] == 32016:
251
+ codec_ids = codec_ids[1:]
252
+ codec_ids = codec_ids[:2 * (codec_ids.shape[0] // 2)]
253
+ vocals_ids = codectool.ids2npy(rearrange(codec_ids,"(n b) -> b n", b=2)[0])
254
+ vocals.append(vocals_ids)
255
+ instrumentals_ids = codectool.ids2npy(rearrange(codec_ids,"(n b) -> b n", b=2)[1])
256
+ instrumentals.append(instrumentals_ids)
257
+ vocals = np.concatenate(vocals, axis=1)
258
+ instrumentals = np.concatenate(instrumentals, axis=1)
259
+ vocal_save_path = os.path.join(stage1_output_dir, f"cot_{genres.replace(' ', '-')}_tp{top_p}_T{temperature}_rp{repetition_penalty}_maxtk{max_new_tokens_run}_vocal_{random_id}".replace('.', '@')+'.npy') # Modified: Use max_new_tokens_run in filename
260
+ inst_save_path = os.path.join(stage1_output_dir, f"cot_{genres.replace(' ', '-')}_tp{top_p}_T{temperature}_rp{repetition_penalty}_maxtk{max_new_tokens_run}_instrumental_{random_id}".replace('.', '@')+'.npy') # Modified: Use max_new_tokens_run in filename
261
+ np.save(vocal_save_path, vocals)
262
+ np.save(inst_save_path, instrumentals)
263
+ stage1_output_set_local.append(vocal_save_path)
264
+ stage1_output_set_local.append(inst_save_path)
265
+
266
+
267
+ # offload model - Removed offloading for gradio integration to keep model loaded
268
+ # if not args.disable_offload_model:
269
+ # model.cpu()
270
+ # del model
271
+ # torch.cuda.empty_cache()
272
+
273
+ print("Converting to Audio...")
274
+
275
+ # convert audio tokens to audio
276
+ def save_audio(wav: torch.Tensor, path, sample_rate: int, rescale: bool = False):
277
+ folder_path = os.path.dirname(path)
278
+ if not os.path.exists(folder_path):
279
+ os.makedirs(folder_path)
280
+ limit = 0.99
281
+ max_val = wav.abs().max()
282
+ wav = wav * min(limit / max_val, 1) if rescale else wav.clamp(-limit, limit)
283
+ torchaudio.save(str(path), wav, sample_rate=sample_rate, encoding='PCM_S', bits_per_sample=16)
284
+ # reconstruct tracks
285
+ recons_output_dir = os.path.join(args.output_dir, "recons")
286
+ recons_mix_dir = os.path.join(recons_output_dir, 'mix')
287
+ os.makedirs(recons_mix_dir, exist_ok=True)
288
+ tracks = []
289
+ for npy in stage1_output_set_local: # Modified: Use stage1_output_set_local
290
+ codec_result = np.load(npy)
291
+ decodec_rlt=[]
292
  with torch.no_grad():
293
+ decoded_waveform = codec_model.decode(torch.as_tensor(codec_result.astype(np.int16), dtype=torch.long).unsqueeze(0).permute(1, 0, 2).to(device))
294
+ decoded_waveform = decoded_waveform.cpu().squeeze(0)
295
+ decodec_rlt.append(torch.as_tensor(decoded_waveform))
296
+ decodec_rlt = torch.cat(decodec_rlt, dim=-1)
297
+ save_path = os.path.join(recons_output_dir, os.path.splitext(os.path.basename(npy))[0] + ".mp3")
298
+ tracks.append(save_path)
299
+ save_audio(decodec_rlt, save_path, 16000)
300
+ # mix tracks
301
+ for inst_path in tracks:
302
+ try:
303
+ if (inst_path.endswith('.wav') or inst_path.endswith('.mp3')) \
304
+ and 'instrumental' in inst_path:
305
+ # find pair
306
+ vocal_path = inst_path.replace('instrumental', 'vocal')
307
+ if not os.path.exists(vocal_path):
308
+ continue
309
+ # mix
310
+ recons_mix = os.path.join(recons_mix_dir, os.path.basename(inst_path).replace('instrumental', 'mixed'))
311
+ vocal_stem, sr = sf.read(inst_path)
312
+ instrumental_stem, _ = sf.read(vocal_path)
313
+ mix_stem = (vocal_stem + instrumental_stem) / 1
314
+ sf.write(recons_mix, mix_stem, sr)
315
+ except Exception as e:
316
+ print(e)
317
+
318
+ # vocoder to upsample audios
319
+ vocal_decoder, inst_decoder = build_codec_model(args.config_path, args.vocal_decoder_path, args.inst_decoder_path)
320
+ vocoder_output_dir = os.path.join(args.output_dir, 'vocoder')
321
+ vocoder_stems_dir = os.path.join(vocoder_output_dir, 'stems')
322
+ vocoder_mix_dir = os.path.join(vocoder_output_dir, 'mix')
323
+ os.makedirs(vocoder_mix_dir, exist_ok=True)
324
+ os.makedirs(vocoder_stems_dir, exist_ok=True)
325
+
326
+ instrumental_output = None # Initialize outside try block
327
+ vocal_output = None # Initialize outside try block
328
+ recons_mix_path = "" # Initialize outside try block
329
+
330
+
331
+ for npy in stage1_output_set_local: # Modified: Use stage1_output_set_local
332
+ if 'instrumental' in npy:
333
+ # Process instrumental
334
+ instrumental_output = process_audio(
335
+ npy,
336
+ os.path.join(vocoder_stems_dir, 'instrumental.mp3'),
337
+ args.rescale,
338
+ args,
339
+ inst_decoder,
340
+ codec_model
341
+ )
342
+ else:
343
+ # Process vocal
344
+ vocal_output = process_audio(
345
+ npy,
346
+ os.path.join(vocoder_stems_dir, 'vocal.mp3'),
347
+ args.rescale,
348
+ args,
349
+ vocal_decoder,
350
+ codec_model
351
+ )
352
+ # mix tracks
353
+ try:
354
+ mix_output = instrumental_output + vocal_output
355
+ recons_mix_path_temp = os.path.join(recons_mix_dir, os.path.basename(recons_mix)) # Use recons_mix from previous step
356
+ save_audio(mix_output, recons_mix_path_temp, 44100, args.rescale)
357
+ print(f"Created mix: {recons_mix_path_temp}")
358
+ recons_mix_path = recons_mix_path_temp # Assign to outer scope variable
359
+ except RuntimeError as e:
360
+ print(e)
361
+ print(f"mix {recons_mix_path} failed! inst: {instrumental_output.shape}, vocal: {vocal_output.shape}")
362
+
363
+ # Post process
364
+ final_output_path = os.path.join(args.output_dir, os.path.basename(recons_mix_path)) # Use recons_mix_path from previous step
365
+ replace_low_freq_with_energy_matched(
366
+ a_file=recons_mix_path, # 16kHz # Use recons_mix_path
367
+ b_file=recons_mix_path_temp, # 48kHz # Use recons_mix_path_temp
368
+ c_file=final_output_path,
369
+ cutoff_freq=5500.0
370
+ )
371
+ print("All process Done")
372
+ return final_output_path # Modified: Return the final output audio path
373
 
 
 
 
 
374
 
375
+ # Gradio UI
376
+ model = AutoModelForCausalLM.from_pretrained( # Load model here for Gradio scope
377
+ "m-a-p/YuE-s1-7B-anneal-en-cot",
378
+ torch_dtype=torch.float16,
379
+ attn_implementation="flash_attention_2", # To enable flashattn, you have to install flash-attn
380
+ ).to(device).eval() # Modified: Load model globally for Gradio to access
381
+
382
+ def empty_output_folder(output_dir):
383
+ # List all files in the output directory
384
+ files = os.listdir(output_dir)
385
+
386
+ # Iterate over the files and remove them
387
+ for file in files:
388
+ file_path = os.path.join(output_dir, file)
389
+ try:
390
+ if os.path.isdir(file_path):
391
+ # If it's a directory, remove it recursively
392
+ shutil.rmtree(file_path)
393
+ else:
394
+ # If it's a file, delete it
395
+ os.remove(file_path)
396
+ except Exception as e:
397
+ print(f"Error deleting file {file_path}: {e}")
398
+
399
+ @spaces.GPU(duration=120)
400
+ def infer_gradio(genre_txt_content, lyrics_txt_content, num_segments=2, max_new_tokens=200): # Modified: Renamed infer to infer_gradio to avoid conflict
401
+
402
+ # Ensure the output folder exists
403
+ output_dir = "./output"
404
+ os.makedirs(output_dir, exist_ok=True)
405
+ print(f"Output folder ensured at: {output_dir}")
406
+
407
+ empty_output_folder(output_dir)
408
+
409
+ # Call the generation function directly
410
+ output_audio_path = generate_music(genre_txt_content, lyrics_txt_content, int(num_segments), int(max_new_tokens)) # Modified: Call generate_music and pass num_segments and max_new_tokens as int
411
+
412
+ if output_audio_path and os.path.exists(output_audio_path):
413
+ print("Generated audio file:", output_audio_path)
414
+ return output_audio_path
415
+ else:
416
+ print("No audio file generated or path is invalid.")
417
+ return None
418
 
 
 
 
419
 
 
420
  with gr.Blocks() as demo:
421
+ with gr.Column():
422
+ gr.Markdown("# YuE: Open Music Foundation Models for Full-Song Generation")
423
+ gr.HTML("""
424
+ <div style="display:flex;column-gap:4px;">
425
+ <a href="https://github.com/multimodal-art-projection/YuE">
426
+ <img src='https://img.shields.io/badge/GitHub-Repo-blue'>
427
+ </a>
428
+ <a href="https://map-yue.github.io">
429
+ <img src='https://img.shields.io/badge/Project-Page-green'>
430
+ </a>
431
+ <a href="https://huggingface.co/spaces/innova-ai/YuE-music-generator-demo?duplicate=true">
432
+ <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-sm.svg" alt="Duplicate this Space">
433
+ </a>
434
+ </div>
435
+ """)
436
+ with gr.Row():
437
+ with gr.Column():
438
+ genre_txt = gr.Textbox(label="Genre")
439
+ lyrics_txt = gr.Textbox(label="Lyrics")
440
+
441
+ with gr.Column():
442
+ if is_shared_ui:
443
+ num_segments = gr.Number(label="Number of Segments", value=2, interactive=True)
444
+ max_new_tokens = gr.Slider(label="Max New Tokens", minimum=500, maximum="3000", step=500, value=500, interactive=True) # increase it after testing
445
+ else:
446
+ num_segments = gr.Number(label="Number of Song Segments", value=2, interactive=True)
447
+ max_new_tokens = gr.Slider(label="Max New Tokens", minimum=500, maximum="24000", step=500, value=3000, interactive=True)
448
+ submit_btn = gr.Button("Submit")
449
+ music_out = gr.Audio(label="Audio Result")
450
+
451
+ gr.Examples(
452
+ examples = [
453
+ [
454
+ "female blues airy vocal bright vocal piano sad romantic guitar jazz",
455
+ """[verse]
456
  In the quiet of the evening, shadows start to fall
457
  Whispers of the night wind echo through the hall
458
  Lost within the silence, I hear your gentle voice
 
462
  Don't let this moment fade, hold me close tonight
463
  With you here beside me, everything's alright
464
  Can't imagine life alone, don't want to let you go
465
+ Stay with me forever, let our love just flow
466
+ """
467
+ ],
468
+ [
469
+ "rap piano street tough piercing vocal hip-hop synthesizer clear vocal male",
470
+ """[verse]
471
  Woke up in the morning, sun is shining bright
472
  Chasing all my dreams, gotta get my mind right
473
  City lights are fading, but my vision's clear
474
+ Got my team beside me, no room for fear
475
+ Walking through the streets, beats inside my head
476
+ Every step I take, closer to the bread
477
+ People passing by, they don't understand
478
+ Building up my future with my own two hands
479
 
480
+ [chorus]
481
+ This is my life, and I'm aiming for the top
482
+ Never gonna quit, no, I'm never gonna stop
483
+ Through the highs and lows, I'mma keep it real
484
+ Living out my dreams with this mic and a deal
485
+ """
486
+ ]
487
+ ],
488
+ inputs = [genre_txt, lyrics_txt],
489
+ outputs = [music_out],
490
+ cache_examples = False,
491
+ # cache_mode="lazy",
492
+ fn=infer_gradio # Modified: Use infer_gradio
493
+ )
494
 
495
+ submit_btn.click(
496
+ fn = infer_gradio, # Modified: Use infer_gradio
497
+ inputs = [genre_txt, lyrics_txt, num_segments, max_new_tokens],
498
+ outputs = [music_out]
499
+ )
500
+ demo.queue().launch(show_api=False, show_error=True)