Manjushri commited on
Commit
e5c5aed
·
1 Parent(s): 6d69013

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -23
app.py DELETED
@@ -1,23 +0,0 @@
1
- import torchaudio
2
- from audiocraft.models import MusicGen
3
- from audiocraft.data.audio import audio_write
4
- import gradio as gr
5
- import modin.pandas as pd
6
- from tempfile import NamedTemporaryFile
7
-
8
- model = MusicGen.get_pretrained('large')
9
-
10
- def genie(Prompt, Duration):
11
- model.set_generation_params(duration=Duration)
12
- wav = model.generate(Prompt)
13
- for idx, one_wav in enumerate(wav):
14
- with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
15
- audio_write(
16
- file.name, one_wav.cpu(), model.sample_rate, strategy="loudness",
17
- loudness_headroom_db=16, add_suffix=False)
18
- return file.name
19
-
20
- title = 'MusicGen'
21
- description = ("Audiocraft provides the code and models for MusicGen, a simple and controllable model for music generation. MusicGen is a single stage auto-regressive Transformer model trained over a 32kHz EnCodec tokenizer with 4 codebooks sampled at 50 Hz. Unlike existing methods like MusicLM, MusicGen doesn't not require a self-supervised semantic representation, and it generates all 4 codebooks in one pass. By introducing a small delay between the codebooks, we show we can predict them in parallel, thus having only 50 auto-regressive steps per second of audio.")
22
- article = ('MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation. <br><br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>')
23
- gr.Interface(fn=genie, inputs=[gr.Textbox(label='Text Prompt. Warning: Longer Prompts may cause reset.'), gr.Slider(minimum=1, maximum=8, value=6, label='Duration')], outputs=gr.Audio(), title=title, description=description, article=article).queue(max_size=2).launch(debug=True)