asigalov61's picture
Update app.py
9cd9af8 verified
raw
history blame
8.43 kB
#=========================================================================
# https://huggingface.co/spaces/asigalov61/Parsons-Code-Melody-Transformer
#=========================================================================
import time as reqtime
import datetime
from pytz import timezone
import re
import gradio as gr
from x_transformer_1_23_2 import *
import random
from midi_to_colab_audio import midi_to_colab_audio
import TMIDIX
import matplotlib.pyplot as plt
#=====================================================================================
def parsons_code_to_tokens(parsons_code_str):
tokens = [388]
for chr in parsons_code_str[1:]:
if chr == 'D':
tokens.extend([385])
elif chr == 'R':
tokens.extend([386])
elif chr == 'U':
tokens.extend([387])
return tokens
#====================================================================================
def Generate_Melody(input_parsons_code,
input_first_note_duration,
iinput_first_note_MIDI_pitch,
):
print('=' * 70)
print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
start_time = reqtime.time()
print('=' * 70)
print('Requested settings:')
print('-' * 70)
print('Parsons code:', input_parsons_code)
print('First note duration:', input_first_note_duration)
print('First note MIDI pitch:', iinput_first_note_MIDI_pitch)
print('=' * 70)
#===============================================================================
print('Instantiating Parsons Code Melody Transformer model...')
SEQ_LEN = 322
PAD_IDX = 392
model = TransformerWrapper(
num_tokens = PAD_IDX+1,
max_seq_len = SEQ_LEN,
attn_layers = Decoder(dim = 1024,
depth = 4,
heads = 8,
rotary_pos_emb = True,
attn_flash = True
)
)
model = AutoregressiveWrapper(model, ignore_index = PAD_IDX, pad_value=PAD_IDX)
print('=' * 70)
print('Loading model checkpoint...')
model_path = 'Parsons_Code_Melody_Transformer_Trained_Model_13786_steps_0.3058_loss_0.8819_acc.pth'
model.load_state_dict(torch.load(model_path, map_location='cpu'))
model.cpu()
model.eval()
dtype = torch.bfloat16
ctx = torch.amp.autocast(device_type='cpu', dtype=dtype)
print('Done!')
print('=' * 70)
#===============================================================================
print('Prepping Parsons code string...')
td_str = '*DUDUA'
td_str = re.sub('[^*DRU]', '', td_str)
print(len(td_str))
print('=' * 70)
if '*' in td_str and len(td_str) > 1:
code_mult = (64 // len(td_str[1:]))+1
mult_code = ('*' + (td_str[1:] * code_mult))[:64]
else:
mult_code = '*UUUUUUUDDDDDDDUUUUUUUDDDDDDDUUUUUUUDDDDDDDUUUUUUUDDDDDDDUUUUUUU'
print('Done!')
print('=' * 70)
#===============================================================================
#===============================================================================
print('Rendering results...')
print('=' * 70)
print('Sample INTs', mixed_song[:5])
print('=' * 70)
output_score, patches, overflow_patches = TMIDIX.patch_enhanced_score_notes(mixed_song)
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(output_score,
output_signature = 'Harmonic Melody MIDI Mixer',
output_file_name = fn1,
track_name='Project Los Angeles',
list_of_MIDI_patches=patches,
timings_multiplier=16
)
new_fn = fn1+'.mid'
audio = midi_to_colab_audio(new_fn,
soundfont_path=soundfont,
sample_rate=16000,
volume_scale=10,
output_for_gradio=True
)
print('Done!')
print('=' * 70)
#========================================================
output_midi_title = str(fn1)
output_midi_summary = str(MIDI_Summary)
output_midi = str(new_fn)
output_audio = (16000, audio)
for o in output_score:
o[1] *= 16
o[2] *= 16
output_plot = TMIDIX.plot_ms_SONG(output_score, plot_title=output_midi_title, return_plt=True)
print('Output MIDI file name:', output_midi)
print('Output MIDI title:', output_midi_title)
print('Output MIDI summary:', MIDI_Summary)
print('=' * 70)
#========================================================
print('-' * 70)
print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
print('-' * 70)
print('Req execution time:', (reqtime.time() - start_time), 'sec')
return output_midi_title, output_midi_summary, output_midi, output_audio, output_plot
# =================================================================================================
if __name__ == "__main__":
PDT = timezone('US/Pacific')
print('=' * 70)
print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
print('=' * 70)
soundfont = "SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2"
app = gr.Blocks()
with app:
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Parsons Code Melody Transformer</h1>")
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Generate unique melodies from Parsons codes</h1>")
gr.Markdown(
"![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Parsons-Code-Melody-Transformer&style=flat)\n\n"
"This is a demo for Clean Melodies subset of Tegridy MIDI Dataset\n\n"
"Check out [Tegridy MIDI Dataset](https://github.com/asigalov61/Tegridy-MIDI-Dataset) on GitHub!\n\n"
)
gr.Markdown("## Enter Parsons code below")
input_parsons_code = gr.Textbox(label="Enter your Parsons code here",
info="Make sure your code starts with *",
lines=1,
value="*",
),
gr.Markdown("## Select generation options")
input_first_note_duration = gr.Slider(1, 127, value=15, step=1, label="First note duration value")
iinput_first_note_MIDI_pitch = gr.Slider(1, 127, value=60, step=1, label="First note MIDI pitch")
run_btn = gr.Button("Generate melody", variant="primary")
clear_btn = gr.ClearButton(components=[input_parsons_code])
gr.Markdown("## Output results")
output_midi_title = gr.Textbox(label="Output MIDI title")
output_midi_summary = gr.Textbox(label="Output MIDI summary")
output_audio = gr.Audio(label="Output MIDI audio", format="mp3", elem_id="midi_audio")
output_plot = gr.Plot(label="Output MIDI score plot")
output_midi = gr.File(label="Output MIDI file", file_types=[".mid"])
run_event = run_btn.click(Generate_Melody, [input_parsons_code,
input_first_note_duration,
iinput_first_note_MIDI_pitch,
],
[output_midi_title, output_midi_summary, output_midi, output_audio, output_plot])
gr.Examples(
[["*UUUUUUUDDDDDDDUUUUUUUDDDDDDDUUUUUUUDDDDDDDUUUUUUUDDDDDDDUUUUUUU", 15, 60],
],
[input_parsons_code,
input_first_note_duration,
iinput_first_note_MIDI_pitch,
],
[output_midi_title, output_midi_summary, output_midi, output_audio, output_plot],
Generate_Melody,
cache_examples=True,
)
app.queue().launch()