quazim commited on
Commit
fa9bb88
·
1 Parent(s): 5bb310c
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -68,10 +68,13 @@ def load_model():
68
  print("Initial model loading...")
69
  _processor = AutoProcessor.from_pretrained("facebook/musicgen-large")
70
  _model = MusicgenForConditionalGeneration.from_pretrained(
71
- "facebook/musicgen-large",
72
- torch_dtype=torch.float16,
73
- device="cuda" if torch.cuda.is_available() else "cpu"
 
 
74
  )
 
75
  return _processor, _model
76
 
77
  def generate_music(text_prompt, duration=10, temperature=1.0, top_k=250, top_p=0.0):
@@ -80,11 +83,15 @@ def generate_music(text_prompt, duration=10, temperature=1.0, top_k=250, top_p=0
80
  processor, model = load_model()
81
 
82
  # Process the text prompt
 
83
  inputs = processor(
84
  text=[text_prompt],
85
  padding=True,
86
  return_tensors="pt",
87
  ).to("cuda")
 
 
 
88
  # Generate audio
89
  with torch.no_grad():
90
  audio_values = model.generate(
 
68
  print("Initial model loading...")
69
  _processor = AutoProcessor.from_pretrained("facebook/musicgen-large")
70
  _model = MusicgenForConditionalGeneration.from_pretrained(
71
+ "facebook/musicgen-large",
72
+ torch_dtype=torch.float16,
73
+ device="cuda",
74
+ mode="S",
75
+ __paged=True,
76
  )
77
+ _model.eval()
78
  return _processor, _model
79
 
80
  def generate_music(text_prompt, duration=10, temperature=1.0, top_k=250, top_p=0.0):
 
83
  processor, model = load_model()
84
 
85
  # Process the text prompt
86
+ print("Processor start")
87
  inputs = processor(
88
  text=[text_prompt],
89
  padding=True,
90
  return_tensors="pt",
91
  ).to("cuda")
92
+ print("Processor end")
93
+ print(inputs.device)
94
+
95
  # Generate audio
96
  with torch.no_grad():
97
  audio_values = model.generate(