Spaces:
Running
on
Zero
Running
on
Zero
chong.zhang
commited on
Commit
·
84d3675
1
Parent(s):
2237355
update
Browse files
app.py
CHANGED
@@ -12,12 +12,7 @@ os.system('nvidia-smi')
|
|
12 |
print(torch.backends.cudnn.version())
|
13 |
|
14 |
def generate_filename():
|
15 |
-
|
16 |
-
seconds_since_epoch = int(now.timestamp())
|
17 |
-
# Convert seconds to string
|
18 |
-
seconds_str = str(seconds_since_epoch)
|
19 |
-
# Hash the string using SHA-256
|
20 |
-
hash_object = hashlib.sha256(seconds_str.encode())
|
21 |
hash_string = hash_object.hexdigest()
|
22 |
return hash_string
|
23 |
|
@@ -60,6 +55,15 @@ def get_args(
|
|
60 |
print(args)
|
61 |
return args
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
@spaces.GPU
|
64 |
def music_generation(args):
|
65 |
set_env_variables()
|
@@ -90,24 +94,6 @@ def music_generation(args):
|
|
90 |
trim=args["trim"])
|
91 |
return output_path
|
92 |
|
93 |
-
def update_text():
|
94 |
-
global text_input # Declare as global to modify the outer scope variable
|
95 |
-
text_input = "New value set by button click"
|
96 |
-
return text_input
|
97 |
-
|
98 |
-
default_prompts = [
|
99 |
-
"Experience soothing and sensual instrumental jazz with a touch of Bossa Nova, perfect for a relaxing restaurant or spa ambiance.",
|
100 |
-
"Compose an uplifting R&B song.",
|
101 |
-
"Create an emotional, introspective folk song with acoustic guitar and soft vocals."
|
102 |
-
]
|
103 |
-
|
104 |
-
def trim_audio(audio_file, cut_seconds=5):
|
105 |
-
audio, sr = torchaudio.load(audio_file)
|
106 |
-
num_samples = cut_seconds * sr
|
107 |
-
cutted_audio = audio[:, :num_samples]
|
108 |
-
output_path = os.path.join(os.getcwd(), "audio_prompt_" + generate_filename() + ".wav")
|
109 |
-
torchaudio.save(output_path, cutted_audio, sr)
|
110 |
-
return output_path
|
111 |
|
112 |
@spaces.GPU
|
113 |
def demo_inspiremusic_t2m(text, model_name, chorus,
|
@@ -151,7 +137,6 @@ def main():
|
|
151 |
value=30)
|
152 |
|
153 |
with gr.Row(equal_height=True):
|
154 |
-
# Textbox for custom input
|
155 |
text_input = gr.Textbox(label="Input Text (For Text-to-Music Task)", value="Experience soothing and sensual instrumental jazz with a touch of Bossa Nova, perfect for a relaxing restaurant or spa ambiance.")
|
156 |
|
157 |
audio_input = gr.Audio(label="Input Audio Prompt (For Music Continuation Task)",
|
@@ -177,8 +162,8 @@ def main():
|
|
177 |
demo.launch()
|
178 |
|
179 |
if __name__ == '__main__':
|
180 |
-
model_list = ["InspireMusic-
|
181 |
-
|
182 |
for model_name in model_list:
|
183 |
model_dir = f"pretrained_models/{model_name}"
|
184 |
if not os.path.isdir(model_dir):
|
|
|
12 |
print(torch.backends.cudnn.version())
|
13 |
|
14 |
def generate_filename():
|
15 |
+
hash_object = hashlib.sha256(str(int(datetime.datetime.now().timestamp())).encode())
|
|
|
|
|
|
|
|
|
|
|
16 |
hash_string = hash_object.hexdigest()
|
17 |
return hash_string
|
18 |
|
|
|
55 |
print(args)
|
56 |
return args
|
57 |
|
58 |
+
|
59 |
+
def trim_audio(audio_file, cut_seconds=5):
|
60 |
+
audio, sr = torchaudio.load(audio_file)
|
61 |
+
num_samples = cut_seconds * sr
|
62 |
+
cutted_audio = audio[:, :num_samples]
|
63 |
+
output_path = os.path.join(os.getcwd(), "audio_prompt_" + generate_filename() + ".wav")
|
64 |
+
torchaudio.save(output_path, cutted_audio, sr)
|
65 |
+
return output_path
|
66 |
+
|
67 |
@spaces.GPU
|
68 |
def music_generation(args):
|
69 |
set_env_variables()
|
|
|
94 |
trim=args["trim"])
|
95 |
return output_path
|
96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
|
98 |
@spaces.GPU
|
99 |
def demo_inspiremusic_t2m(text, model_name, chorus,
|
|
|
137 |
value=30)
|
138 |
|
139 |
with gr.Row(equal_height=True):
|
|
|
140 |
text_input = gr.Textbox(label="Input Text (For Text-to-Music Task)", value="Experience soothing and sensual instrumental jazz with a touch of Bossa Nova, perfect for a relaxing restaurant or spa ambiance.")
|
141 |
|
142 |
audio_input = gr.Audio(label="Input Audio Prompt (For Music Continuation Task)",
|
|
|
162 |
demo.launch()
|
163 |
|
164 |
if __name__ == '__main__':
|
165 |
+
model_list = ["InspireMusic-1.5B-Long", "InspireMusic-1.5B", "InspireMusic-Base"]
|
166 |
+
|
167 |
for model_name in model_list:
|
168 |
model_dir = f"pretrained_models/{model_name}"
|
169 |
if not os.path.isdir(model_dir):
|