Spaces:
Running
on
Zero
Running
on
Zero
Upload app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ print('=' * 70)
|
|
| 6 |
print('Guided Accompaniment Transformer Gradio App')
|
| 7 |
|
| 8 |
print('=' * 70)
|
| 9 |
-
print('Loading core
|
| 10 |
|
| 11 |
import os
|
| 12 |
|
|
@@ -15,7 +15,7 @@ import datetime
|
|
| 15 |
from pytz import timezone
|
| 16 |
|
| 17 |
print('=' * 70)
|
| 18 |
-
print('Loading main
|
| 19 |
|
| 20 |
os.environ['USE_FLASH_ATTENTION'] = '1'
|
| 21 |
|
|
@@ -40,7 +40,7 @@ from x_transformer_1_23_2 import *
|
|
| 40 |
import random
|
| 41 |
|
| 42 |
print('=' * 70)
|
| 43 |
-
print('Loading aux
|
| 44 |
|
| 45 |
import matplotlib.pyplot as plt
|
| 46 |
|
|
@@ -105,7 +105,7 @@ def load_model(model_selector):
|
|
| 105 |
print('=' * 70)
|
| 106 |
print('Loading model checkpoint...')
|
| 107 |
|
| 108 |
-
model_checkpoint = hf_hub_download(repo_id='asigalov61/
|
| 109 |
|
| 110 |
model.load_state_dict(torch.load(model_checkpoint, map_location='cpu', weights_only=True))
|
| 111 |
|
|
@@ -188,13 +188,13 @@ def save_midi(tokens, batch_number=None, model_selector=''):
|
|
| 188 |
song_f.append(['note', time, dur, 0, pitch, vel, 0])
|
| 189 |
|
| 190 |
if batch_number == None:
|
| 191 |
-
fname = '
|
| 192 |
|
| 193 |
else:
|
| 194 |
-
fname = '
|
| 195 |
|
| 196 |
data = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
|
| 197 |
-
output_signature = '
|
| 198 |
output_file_name = fname,
|
| 199 |
track_name='Project Los Angeles',
|
| 200 |
list_of_MIDI_patches=patches,
|
|
@@ -307,7 +307,7 @@ def generate_callback(input_midi,
|
|
| 307 |
)
|
| 308 |
|
| 309 |
# File name
|
| 310 |
-
fname = '
|
| 311 |
|
| 312 |
# Save audio to a temporary file
|
| 313 |
midi_audio = midi_to_colab_audio(fname + '.mid',
|
|
@@ -403,12 +403,12 @@ def add_batch(batch_number, final_composition, generated_batches, block_lines, m
|
|
| 403 |
|
| 404 |
# MIDI plot
|
| 405 |
midi_plot = TMIDIX.plot_ms_SONG(midi_score,
|
| 406 |
-
plot_title='
|
| 407 |
block_lines_times_list=block_lines[:-1],
|
| 408 |
return_plt=True)
|
| 409 |
|
| 410 |
# File name
|
| 411 |
-
fname = '
|
| 412 |
|
| 413 |
# Save audio to a temporary file
|
| 414 |
midi_audio = midi_to_colab_audio(fname + '.mid',
|
|
@@ -440,12 +440,12 @@ def remove_batch(batch_number, num_tokens, final_composition, generated_batches,
|
|
| 440 |
|
| 441 |
# MIDI plot
|
| 442 |
midi_plot = TMIDIX.plot_ms_SONG(midi_score,
|
| 443 |
-
plot_title='
|
| 444 |
block_lines_times_list=block_lines[:-1],
|
| 445 |
return_plt=True)
|
| 446 |
|
| 447 |
# File name
|
| 448 |
-
fname = '
|
| 449 |
|
| 450 |
# Save audio to a temporary file
|
| 451 |
midi_audio = midi_to_colab_audio(fname + '.mid',
|
|
@@ -500,16 +500,16 @@ with gr.Blocks() as demo:
|
|
| 500 |
|
| 501 |
#==================================================================================
|
| 502 |
|
| 503 |
-
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>
|
| 504 |
-
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>
|
| 505 |
gr.HTML("""
|
| 506 |
-
Check out <a href="https://github.com/asigalov61/monsterpianotransformer">
|
| 507 |
|
| 508 |
<p>
|
| 509 |
<a href="https://pypi.org/project/monsterpianotransformer/">
|
| 510 |
<img src="https://upload.wikimedia.org/wikipedia/commons/6/64/PyPI_logo.svg" alt="PyPI Project" style="width: 100px; height: auto;">
|
| 511 |
</a> or
|
| 512 |
-
<a href="https://huggingface.co/spaces/asigalov61/
|
| 513 |
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-md.svg" alt="Duplicate in Hugging Face">
|
| 514 |
</a>
|
| 515 |
</p>
|
|
|
|
| 6 |
print('Guided Accompaniment Transformer Gradio App')
|
| 7 |
|
| 8 |
print('=' * 70)
|
| 9 |
+
print('Loading core Guided Accompaniment Transformer modules...')
|
| 10 |
|
| 11 |
import os
|
| 12 |
|
|
|
|
| 15 |
from pytz import timezone
|
| 16 |
|
| 17 |
print('=' * 70)
|
| 18 |
+
print('Loading main Guided Accompaniment Transformer modules...')
|
| 19 |
|
| 20 |
os.environ['USE_FLASH_ATTENTION'] = '1'
|
| 21 |
|
|
|
|
| 40 |
import random
|
| 41 |
|
| 42 |
print('=' * 70)
|
| 43 |
+
print('Loading aux Guided Accompaniment Transformer modules...')
|
| 44 |
|
| 45 |
import matplotlib.pyplot as plt
|
| 46 |
|
|
|
|
| 105 |
print('=' * 70)
|
| 106 |
print('Loading model checkpoint...')
|
| 107 |
|
| 108 |
+
model_checkpoint = hf_hub_download(repo_id='asigalov61/Guided-Accompaniment-Transformer', filename=MODEL_CHECKPOINTS[model_selector])
|
| 109 |
|
| 110 |
model.load_state_dict(torch.load(model_checkpoint, map_location='cpu', weights_only=True))
|
| 111 |
|
|
|
|
| 188 |
song_f.append(['note', time, dur, 0, pitch, vel, 0])
|
| 189 |
|
| 190 |
if batch_number == None:
|
| 191 |
+
fname = 'Guided-Accompaniment-Transformer-Music-Composition'
|
| 192 |
|
| 193 |
else:
|
| 194 |
+
fname = 'Guided-Accompaniment-Transformer-Music-Composition_'+str(batch_number)
|
| 195 |
|
| 196 |
data = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
|
| 197 |
+
output_signature = 'Guided Accompaniment Transformer',
|
| 198 |
output_file_name = fname,
|
| 199 |
track_name='Project Los Angeles',
|
| 200 |
list_of_MIDI_patches=patches,
|
|
|
|
| 307 |
)
|
| 308 |
|
| 309 |
# File name
|
| 310 |
+
fname = 'Guided-Accompaniment-Transformer-Music-Composition_'+str(i)
|
| 311 |
|
| 312 |
# Save audio to a temporary file
|
| 313 |
midi_audio = midi_to_colab_audio(fname + '.mid',
|
|
|
|
| 403 |
|
| 404 |
# MIDI plot
|
| 405 |
midi_plot = TMIDIX.plot_ms_SONG(midi_score,
|
| 406 |
+
plot_title='Guided Accompaniment Transformer Composition',
|
| 407 |
block_lines_times_list=block_lines[:-1],
|
| 408 |
return_plt=True)
|
| 409 |
|
| 410 |
# File name
|
| 411 |
+
fname = 'Guided-Accompaniment-Transformer-Music-Composition'
|
| 412 |
|
| 413 |
# Save audio to a temporary file
|
| 414 |
midi_audio = midi_to_colab_audio(fname + '.mid',
|
|
|
|
| 440 |
|
| 441 |
# MIDI plot
|
| 442 |
midi_plot = TMIDIX.plot_ms_SONG(midi_score,
|
| 443 |
+
plot_title='Guided Accompaniment Transformer Composition',
|
| 444 |
block_lines_times_list=block_lines[:-1],
|
| 445 |
return_plt=True)
|
| 446 |
|
| 447 |
# File name
|
| 448 |
+
fname = 'Guided-Accompaniment-Transformer-Music-Composition'
|
| 449 |
|
| 450 |
# Save audio to a temporary file
|
| 451 |
midi_audio = midi_to_colab_audio(fname + '.mid',
|
|
|
|
| 500 |
|
| 501 |
#==================================================================================
|
| 502 |
|
| 503 |
+
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Guided Accompaniment Transformer</h1>")
|
| 504 |
+
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Guided melody accompaniment generation with transformers</h1>")
|
| 505 |
gr.HTML("""
|
| 506 |
+
Check out <a href="https://github.com/asigalov61/monsterpianotransformer">Guided Accompaniment Transformer</a> on GitHub or on
|
| 507 |
|
| 508 |
<p>
|
| 509 |
<a href="https://pypi.org/project/monsterpianotransformer/">
|
| 510 |
<img src="https://upload.wikimedia.org/wikipedia/commons/6/64/PyPI_logo.svg" alt="PyPI Project" style="width: 100px; height: auto;">
|
| 511 |
</a> or
|
| 512 |
+
<a href="https://huggingface.co/spaces/asigalov61/Guided-Accompaniment-Transformer?duplicate=true">
|
| 513 |
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-md.svg" alt="Duplicate in Hugging Face">
|
| 514 |
</a>
|
| 515 |
</p>
|