from typing import Dict, List, Any from transformers import AutoProcessor, MusicgenForConditionalGeneration import torch import librosa import os class EndpointHandler: def __init__(self, path=""): # load model and processor from path self.processor = AutoProcessor.from_pretrained(path) self.model = MusicgenForConditionalGeneration.from_pretrained(path).to("cuda") self.ref_sample_y, self.ref_sr = librosa.load(os.path.join(path,'accompaniment.mp3'),sr=32000) def __call__(self, data: Dict[str, Any]) -> Dict[str, str]: """ Args: data (:dict:): The payload with the text prompt and generation parameters. """ # process input inputs = data.pop("inputs", data) parameters = data.pop("parameters", None) self.ref_sample_y = self.ref_sample_y[:480213] #audio=self.ref_sample_y, #sampling_rate=self.ref_sr, # preprocess time = 60 # in seconds rate = 32000 final_audio_values = None audio_values = None while (final_audio_values is None) or (len(final_audio_values[0][0])/rate)1: input_audio_values = audio_values.permute(1,2,0).squeeze(0) else: input_audio_values = audio_values.squeeze(0)''' input_audio_values = audio_values.squeeze(0).squeeze(0) #permute(2,1,0) print("shape",input_audio_values.shape) #input_audio_values = input_audio_values.squeeze(0) inputs = self.processor( audio=input_audio_values[:48021], sampling_rate=self.ref_sr, text=["80s blues track with groovy saxophone"], padding=True, return_tensors="pt", ) else: inputs = self.processor( audio=self.ref_sample_y[:48021], sampling_rate=self.ref_sr, text=["80s blues track with groovy saxophone"], padding=True, return_tensors="pt", ) audio_values = self.model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) if final_audio_values is not None: final_audio_values = torch.cat((final_audio_values,audio_values),-1) else: final_audio_values = audio_values '''inputs = self.processor( audio=self.ref_sample_y, sampling_rate=self.ref_sr, text=[inputs], padding=True, return_tensors="pt",).to("cuda") # pass inputs with all kwargs in data if parameters is not None: outputs = self.model.generate(**inputs,do_sample=True, guidance_scale=4, max_new_tokens=256) else: outputs = self.model.generate(**inputs,do_sample=True, guidance_scale=4, max_new_tokens=256)''' # postprocess the prediction prediction = final_audio_values[0].cpu().numpy() return [{"generated_text": prediction}]