Update app.py
Browse files
app.py
CHANGED
@@ -71,14 +71,6 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
71 |
|
72 |
mmtokenizer = _MMSentencePieceTokenizer("./mm_tokenizer_v0.2_hf/tokenizer.model")
|
73 |
|
74 |
-
codectool = CodecManipulator("xcodec", 0, 1)
|
75 |
-
model_config = OmegaConf.load('./xcodec_mini_infer/final_ckpt/config.yaml')
|
76 |
-
codec_model = eval(model_config.generator.name)(**model_config.generator.config).to(device)
|
77 |
-
parameter_dict = torch.load('./xcodec_mini_infer/final_ckpt/ckpt_00360000.pth', map_location='cpu')
|
78 |
-
codec_model.load_state_dict(parameter_dict['codec_model'])
|
79 |
-
codec_model.to(device)
|
80 |
-
codec_model.eval()
|
81 |
-
|
82 |
print("Models Loaded!")
|
83 |
|
84 |
|
@@ -159,6 +151,13 @@ def generate_music(
|
|
159 |
cuda_idx=0,
|
160 |
rescale=False,
|
161 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
|
163 |
if use_audio_prompt and not audio_prompt_path:
|
164 |
raise FileNotFoundError("Please offer audio prompt filepath using '--audio_prompt_path', when you enable 'use_audio_prompt'!")
|
|
|
71 |
|
72 |
mmtokenizer = _MMSentencePieceTokenizer("./mm_tokenizer_v0.2_hf/tokenizer.model")
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
print("Models Loaded!")
|
75 |
|
76 |
|
|
|
151 |
cuda_idx=0,
|
152 |
rescale=False,
|
153 |
):
|
154 |
+
codectool = CodecManipulator("xcodec", 0, 1)
|
155 |
+
model_config = OmegaConf.load('./xcodec_mini_infer/final_ckpt/config.yaml')
|
156 |
+
codec_model = eval(model_config.generator.name)(**model_config.generator.config).to(device)
|
157 |
+
parameter_dict = torch.load('./xcodec_mini_infer/final_ckpt/ckpt_00360000.pth', map_location='cpu')
|
158 |
+
codec_model.load_state_dict(parameter_dict['codec_model'])
|
159 |
+
codec_model.to(device)
|
160 |
+
codec_model.eval()
|
161 |
|
162 |
if use_audio_prompt and not audio_prompt_path:
|
163 |
raise FileNotFoundError("Please offer audio prompt filepath using '--audio_prompt_path', when you enable 'use_audio_prompt'!")
|