Spaces:
Building
on
L40S
Building
on
L40S
import gradio as gr | |
import subprocess | |
import os | |
import shutil | |
import tempfile | |
import torch | |
import logging | |
import numpy as np | |
import re | |
from concurrent.futures import ThreadPoolExecutor | |
from functools import lru_cache | |
# ๋ก๊น ์ค์ | |
logging.basicConfig( | |
level=logging.INFO, | |
format='%(asctime)s - %(levelname)s - %(message)s', | |
handlers=[ | |
logging.FileHandler('yue_generation.log'), | |
logging.StreamHandler() | |
] | |
) | |
# ์ธ์ด ๊ฐ์ง ๋ฐ ๋ชจ๋ธ ์ ํ ํจ์ | |
def detect_and_select_model(text): | |
if re.search(r'[\u3131-\u318E\uAC00-\uD7A3]', text): # ํ๊ธ | |
return "m-a-p/YuE-s1-7B-anneal-jp-kr-cot" | |
elif re.search(r'[\u4e00-\u9fff]', text): # ์ค๊ตญ์ด | |
return "m-a-p/YuE-s1-7B-anneal-zh-cot" | |
elif re.search(r'[\u3040-\u309F\u30A0-\u30FF]', text): # ์ผ๋ณธ์ด | |
return "m-a-p/YuE-s1-7B-anneal-jp-kr-cot" | |
else: # ์์ด/๊ธฐํ | |
return "m-a-p/YuE-s1-7B-anneal-en-cot" | |
def optimize_model_selection(lyrics, genre): | |
model_path = detect_and_select_model(lyrics) | |
model_config = { | |
"m-a-p/YuE-s1-7B-anneal-en-cot": { | |
"max_tokens": 24000, | |
"temperature": 0.8, | |
"batch_size": 8 | |
}, | |
"m-a-p/YuE-s1-7B-anneal-jp-kr-cot": { | |
"max_tokens": 24000, | |
"temperature": 0.7, | |
"batch_size": 8 | |
}, | |
"m-a-p/YuE-s1-7B-anneal-zh-cot": { | |
"max_tokens": 24000, | |
"temperature": 0.7, | |
"batch_size": 8 | |
} | |
} | |
return model_path, model_config[model_path] | |
# GPU ์ค์ ์ต์ ํ | |
def optimize_gpu_settings(): | |
if torch.cuda.is_available(): | |
torch.backends.cuda.matmul.allow_tf32 = True | |
torch.backends.cudnn.benchmark = True | |
torch.backends.cudnn.deterministic = False | |
torch.backends.cudnn.enabled = True | |
torch.cuda.empty_cache() | |
torch.cuda.set_device(0) | |
logging.info(f"Using GPU: {torch.cuda.get_device_name(0)}") | |
logging.info(f"Available GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB") | |
else: | |
logging.warning("GPU not available!") | |
def install_flash_attn(): | |
try: | |
logging.info("Installing flash-attn...") | |
subprocess.run( | |
["pip", "install", "flash-attn", "--no-build-isolation"], | |
check=True, | |
capture_output=True | |
) | |
logging.info("flash-attn installed successfully!") | |
except subprocess.CalledProcessError as e: | |
logging.error(f"Failed to install flash-attn: {e}") | |
raise | |
def initialize_system(): | |
optimize_gpu_settings() | |
install_flash_attn() | |
from huggingface_hub import snapshot_download | |
folder_path = './inference/xcodec_mini_infer' | |
os.makedirs(folder_path, exist_ok=True) | |
logging.info(f"Created folder at: {folder_path}") | |
snapshot_download( | |
repo_id="m-a-p/xcodec_mini_infer", | |
local_dir="./inference/xcodec_mini_infer", | |
resume_download=True | |
) | |
try: | |
os.chdir("./inference") | |
logging.info(f"Working directory changed to: {os.getcwd()}") | |
except FileNotFoundError as e: | |
logging.error(f"Directory error: {e}") | |
raise | |
def get_cached_file_path(content_hash, prefix): | |
return create_temp_file(content_hash, prefix) | |
def empty_output_folder(output_dir): | |
try: | |
shutil.rmtree(output_dir) | |
os.makedirs(output_dir) | |
logging.info(f"Output folder cleaned: {output_dir}") | |
except Exception as e: | |
logging.error(f"Error cleaning output folder: {e}") | |
raise | |
def create_temp_file(content, prefix, suffix=".txt"): | |
temp_file = tempfile.NamedTemporaryFile(delete=False, mode="w", prefix=prefix, suffix=suffix) | |
content = content.strip() + "\n\n" | |
content = content.replace("\r\n", "\n").replace("\r", "\n") | |
temp_file.write(content) | |
temp_file.close() | |
logging.debug(f"Temporary file created: {temp_file.name}") | |
return temp_file.name | |
def get_last_mp3_file(output_dir): | |
mp3_files = [f for f in os.listdir(output_dir) if f.endswith('.mp3')] | |
if not mp3_files: | |
logging.warning("No MP3 files found") | |
return None | |
mp3_files_with_path = [os.path.join(output_dir, f) for f in mp3_files] | |
mp3_files_with_path.sort(key=os.path.getmtime, reverse=True) | |
return mp3_files_with_path[0] | |
def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens): | |
try: | |
# ๋ชจ๋ธ ์ ํ ๋ฐ ์ค์ | |
model_path, config = optimize_model_selection(lyrics_txt_content, genre_txt_content) | |
logging.info(f"Selected model: {model_path}") | |
# ์์ ํ์ผ ์์ฑ | |
genre_txt_path = create_temp_file(genre_txt_content, prefix="genre_") | |
lyrics_txt_path = create_temp_file(lyrics_txt_content, prefix="lyrics_") | |
output_dir = "./output" | |
os.makedirs(output_dir, exist_ok=True) | |
empty_output_folder(output_dir) | |
# ๋ช ๋ น์ด ๊ตฌ์ฑ | |
command = [ | |
"python", "infer.py", | |
"--stage1_model", model_path, | |
"--stage2_model", "m-a-p/YuE-s2-1B-general", | |
"--genre_txt", genre_txt_path, | |
"--lyrics_txt", lyrics_txt_path, | |
"--run_n_segments", str(num_segments), | |
"--stage2_batch_size", str(config['batch_size']), | |
"--output_dir", output_dir, | |
"--cuda_idx", "0", | |
"--max_new_tokens", str(config['max_tokens']), | |
"--temperature", str(config['temperature']), | |
"--disable_offload_model", | |
"--use_flash_attention_2", | |
"--bf16" | |
] | |
# CUDA ํ๊ฒฝ ๋ณ์ ์ค์ | |
env = os.environ.copy() | |
env.update({ | |
"CUDA_VISIBLE_DEVICES": "0", | |
"CUDA_HOME": "/usr/local/cuda", | |
"PATH": f"/usr/local/cuda/bin:{env.get('PATH', '')}", | |
"LD_LIBRARY_PATH": f"/usr/local/cuda/lib64:{env.get('LD_LIBRARY_PATH', '')}", | |
"PYTORCH_CUDA_ALLOC_CONF": "max_split_size_mb:512" | |
}) | |
# ๋ช ๋ น ์คํ | |
process = subprocess.run(command, env=env, check=True, capture_output=True) | |
logging.info("Inference completed successfully") | |
# ๊ฒฐ๊ณผ ์ฒ๋ฆฌ | |
last_mp3 = get_last_mp3_file(output_dir) | |
if last_mp3: | |
logging.info(f"Generated audio file: {last_mp3}") | |
return last_mp3 | |
else: | |
logging.warning("No output audio file generated") | |
return None | |
except Exception as e: | |
logging.error(f"Inference error: {e}") | |
raise | |
finally: | |
# ์์ ํ์ผ ์ ๋ฆฌ | |
for file in [genre_txt_path, lyrics_txt_path]: | |
try: | |
os.remove(file) | |
logging.debug(f"Removed temporary file: {file}") | |
except Exception as e: | |
logging.warning(f"Failed to remove temporary file {file}: {e}") | |
# Gradio ์ธํฐํ์ด์ค | |
with gr.Blocks() as demo: | |
with gr.Column(): | |
gr.Markdown("# YuE: Open Music Foundation Models for Full-Song Generation (Multi-Language Support)") | |
gr.HTML(""" | |
<div style="display:flex;column-gap:4px;"> | |
<a href="https://github.com/multimodal-art-projection/YuE"> | |
<img src='https://img.shields.io/badge/GitHub-Repo-blue'> | |
</a> | |
<a href="https://map-yue.github.io"> | |
<img src='https://img.shields.io/badge/Project-Page-green'> | |
</a> | |
</div> | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
genre_txt = gr.Textbox( | |
label="Genre", | |
placeholder="Enter music genre and style descriptions..." | |
) | |
lyrics_txt = gr.Textbox( | |
label="Lyrics (Supports English, Korean, Japanese, Chinese)", | |
placeholder="Enter song lyrics...", | |
lines=10 | |
) | |
with gr.Column(): | |
num_segments = gr.Number( | |
label="Number of Song Segments", | |
value=2, | |
minimum=1, | |
maximum=4, | |
step=1, | |
interactive=True | |
) | |
max_new_tokens = gr.Slider( | |
label="Max New Tokens", | |
minimum=500, | |
maximum=32000, | |
step=500, | |
value=4000, | |
interactive=True | |
) | |
submit_btn = gr.Button("Generate Music", variant="primary") | |
music_out = gr.Audio(label="Generated Audio") | |
# ๋ค๊ตญ์ด ์์ ์ถ๊ฐ | |
gr.Examples( | |
examples=[ | |
# ์์ด ์์ | |
[ | |
"female blues airy vocal bright vocal piano sad romantic guitar jazz", | |
"""[verse] | |
In the quiet of the evening, shadows start to fall | |
Whispers of the night wind echo through the hall | |
Lost within the silence, I hear your gentle voice | |
Guiding me back homeward, making my heart rejoice | |
[chorus] | |
Don't let this moment fade, hold me close tonight | |
With you here beside me, everything's alright | |
Can't imagine life alone, don't want to let you go | |
Stay with me forever, let our love just flow | |
""" | |
], | |
# ํ๊ตญ์ด ์์ | |
[ | |
"K-pop bright energetic synth dance electronic", | |
"""[verse] | |
๋น๋๋ ๋ณ๋ค์ฒ๋ผ ์ฐ๋ฆฌ์ ๊ฟ์ด | |
์ ํ๋์ ์๋์ ๋ฐ์ง์ด๋ค | |
ํจ๊ป๋ผ๋ฉด ์ด๋๋ ๊ฐ ์ ์์ด | |
์ฐ๋ฆฌ์ ์ด์ผ๊ธฐ๊ฐ ์์๋๋ค | |
[chorus] | |
๋ฌ๋ ค๊ฐ์ ๋ ๋์ด ๋ ๋ฉ๋ฆฌ | |
๋๋ ค์์ ์์ด ๋์ ํจ๊ป๋ผ๋ฉด | |
์์ํ ๊ณ์๋ ์ฐ๋ฆฌ์ ๋ ธ๋ | |
์ด ์๊ฐ์ ๊ธฐ์ตํด forever | |
""" | |
], | |
# ์ผ๋ณธ์ด ์์ | |
[ | |
"J-pop melodic soft piano emotional", | |
"""[verse] | |
ๆฅใฎ้ขจใซไนใฃใฆ | |
ๆใๅบใๆตใใ | |
ใใฎๆฅใฎ็ดๆใ | |
ไปใงใ่ฆใใฆใ | |
[chorus] | |
ๅใจ่ฆใ็ฉบใฏ | |
ไปใๅคใใใชใใ | |
ใฉใใพใงใ็ถใ | |
ใใฎ้ใฎๅ ใง | |
""" | |
], | |
# ์ค๊ตญ์ด ์์ | |
[ | |
"Chinese pop traditional fusion modern", | |
"""[verse] | |
ๆจๅ ็ งไบฎๅคฉ้ | |
ๆฐ็ไธๅคฉๅผๅง | |
่ฟฝ้็ๆขฆๆณๅ่ฟ | |
ไธๅๆญ็่ๆญฅ | |
[chorus] | |
่ฎฉๅธๆ็ งไบฎๅๆน | |
่ฎฉๅๆฐไผด้่บซๆ | |
่ฟไธ่ทฏๆไฝ ็ธไผด | |
ๆฐธ่ฟไธไผๅญคๅ | |
""" | |
] | |
], | |
inputs=[genre_txt, lyrics_txt] | |
) | |
# ์์คํ ์ด๊ธฐํ | |
initialize_system() | |
# ์ด๋ฒคํธ ํธ๋ค๋ฌ | |
submit_btn.click( | |
fn=infer, | |
inputs=[genre_txt, lyrics_txt, num_segments, max_new_tokens], | |
outputs=[music_out] | |
) | |
# ์๋ฒ ์ค์ ์ผ๋ก ์คํ | |
demo.queue(concurrency_count=2).launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
share=True, | |
enable_queue=True, | |
show_api=True, | |
show_error=True | |
) |