asigalov61 commited on
Commit
fbc5bf3
·
verified ·
1 Parent(s): c6ee9c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -43
app.py CHANGED
@@ -5,7 +5,6 @@ import datetime
5
  from pytz import timezone
6
 
7
  import torch
8
- import torch.nn.functional as F
9
 
10
  import gradio as gr
11
  import spaces
@@ -18,12 +17,11 @@ from midi_to_colab_audio import midi_to_colab_audio
18
 
19
  import matplotlib.pyplot as plt
20
 
21
- in_space = os.getenv("SYSTEM") == "spaces"
22
-
23
  # =================================================================================================
24
 
25
  @spaces.GPU
26
- def GenerateMIDI(num_tok, idrums, iinstr):
 
27
  print('=' * 70)
28
  print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
29
  start_time = time.time()
@@ -32,6 +30,7 @@ def GenerateMIDI(num_tok, idrums, iinstr):
32
  print('Req num tok:', num_tok)
33
  print('Req instr:', iinstr)
34
  print('Drums:', idrums)
 
35
  print('-' * 70)
36
 
37
  if idrums:
@@ -49,35 +48,7 @@ def GenerateMIDI(num_tok, idrums, iinstr):
49
  print(start_tokens)
50
  print('-' * 70)
51
 
52
- output_signature = 'Allegro Music Transformer'
53
- output_file_name = 'Allegro-Music-Transformer-Music-Composition'
54
- track_name = 'Project Los Angeles'
55
- list_of_MIDI_patches = [0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0]
56
- number_of_ticks_per_quarter = 500
57
- text_encoding = 'ISO-8859-1'
58
-
59
- output_header = [number_of_ticks_per_quarter,
60
- [['track_name', 0, bytes(output_signature, text_encoding)]]]
61
-
62
- patch_list = [['patch_change', 0, 0, list_of_MIDI_patches[0]],
63
- ['patch_change', 0, 1, list_of_MIDI_patches[1]],
64
- ['patch_change', 0, 2, list_of_MIDI_patches[2]],
65
- ['patch_change', 0, 3, list_of_MIDI_patches[3]],
66
- ['patch_change', 0, 4, list_of_MIDI_patches[4]],
67
- ['patch_change', 0, 5, list_of_MIDI_patches[5]],
68
- ['patch_change', 0, 6, list_of_MIDI_patches[6]],
69
- ['patch_change', 0, 7, list_of_MIDI_patches[7]],
70
- ['patch_change', 0, 8, list_of_MIDI_patches[8]],
71
- ['patch_change', 0, 9, list_of_MIDI_patches[9]],
72
- ['patch_change', 0, 10, list_of_MIDI_patches[10]],
73
- ['patch_change', 0, 11, list_of_MIDI_patches[11]],
74
- ['patch_change', 0, 12, list_of_MIDI_patches[12]],
75
- ['patch_change', 0, 13, list_of_MIDI_patches[13]],
76
- ['patch_change', 0, 14, list_of_MIDI_patches[14]],
77
- ['patch_change', 0, 15, list_of_MIDI_patches[15]],
78
- ['track_name', 0, bytes(track_name, text_encoding)]]
79
-
80
- output = output_header + [patch_list]
81
 
82
  print('Loading model...')
83
 
@@ -109,7 +80,7 @@ def GenerateMIDI(num_tok, idrums, iinstr):
109
 
110
  print('Done!')
111
  print('=' * 70)
112
-
113
 
114
  inp = torch.LongTensor([start_tokens]).cuda()
115
 
@@ -122,41 +93,51 @@ def GenerateMIDI(num_tok, idrums, iinstr):
122
  verbose=False)
123
 
124
  out0 = out[0].tolist()
 
 
125
 
126
  ctime = 0
127
- dur = 0
128
  vel = 90
129
- pitch = 0
130
  channel = 0
131
 
132
  for ss1 in out0:
133
 
134
  if 0 < ss1 < 256:
 
135
  ctime += ss1 * 8
136
 
137
  if 256 <= ss1 < 1280:
 
138
  dur = ((ss1 - 256) // 8) * 32
139
  vel = (((ss1 - 256) % 8) + 1) * 15
140
 
141
  if 1280 <= ss1 < 2816:
 
142
  channel = (ss1 - 1280) // 128
143
  pitch = (ss1 - 1280) % 128
144
 
145
  if channel != 9:
146
- pat = list_of_MIDI_patches[channel]
147
  else:
148
  pat = 128
149
 
150
  event = ['note', ctime, dur, channel, pitch, vel, pat]
151
 
152
- output[-1].append(event)
153
 
154
- midi_data = TMIDIX.score2midi(output, text_encoding)
 
155
 
156
- with open(f"Allegro-Music-Transformer-Composition.mid", 'wb') as f:
157
- f.write(midi_data)
 
 
 
 
158
 
159
- output_plot = TMIDIX.plot_ms_SONG(output[2], plot_title='Allegro-Music-Transformer-Composition', return_plt=True)
160
 
161
  audio = midi_to_colab_audio('Allegro-Music-Transformer-Composition.mid',
162
  soundfont_path="SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2",
@@ -184,8 +165,11 @@ if __name__ == "__main__":
184
  print('=' * 70)
185
 
186
  app = gr.Blocks()
 
187
  with app:
 
188
  gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Allegro Music Transformer</h1>")
 
189
  gr.Markdown(
190
  "![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Allegro-Music-Transformer&style=flat)\n\n"
191
  "Full-attention multi-instrumental music transformer featuring asymmetrical encoding with octo-velocity, and chords counters tokens, optimized for speed and performance\n\n"
@@ -200,6 +184,7 @@ if __name__ == "__main__":
200
  ["Piano", "Guitar", "Bass", "Violin", "Cello", "Harp", "Trumpet", "Sax", "Flute", "Choir", "Organ"],
201
  value="Piano", label="Lead Instrument Controls", info="Desired lead instrument")
202
  input_drums = gr.Checkbox(label="Add Drums", value=False, info="Add drums to the composition")
 
203
  input_num_tokens = gr.Slider(16, 1024, value=512, label="Number of Tokens", info="Number of tokens to generate")
204
 
205
  run_btn = gr.Button("generate", variant="primary")
@@ -207,6 +192,7 @@ if __name__ == "__main__":
207
  output_audio = gr.Audio(label="output audio", format="mp3", elem_id="midi_audio")
208
  output_plot = gr.Plot(label='output plot')
209
  output_midi = gr.File(label="output midi", file_types=[".mid"])
210
- run_event = run_btn.click(GenerateMIDI, [input_num_tokens, input_drums, input_instrument],
 
211
  [output_plot, output_midi, output_audio])
212
  app.queue().launch()
 
5
  from pytz import timezone
6
 
7
  import torch
 
8
 
9
  import gradio as gr
10
  import spaces
 
17
 
18
  import matplotlib.pyplot as plt
19
 
 
 
20
  # =================================================================================================
21
 
22
  @spaces.GPU
23
+ def GenerateMIDI(num_tok, idrums, iinstr, input_align):
24
+
25
  print('=' * 70)
26
  print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
27
  start_time = time.time()
 
30
  print('Req num tok:', num_tok)
31
  print('Req instr:', iinstr)
32
  print('Drums:', idrums)
33
+ print('Align:', input_align)
34
  print('-' * 70)
35
 
36
  if idrums:
 
48
  print(start_tokens)
49
  print('-' * 70)
50
 
51
+ output = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  print('Loading model...')
54
 
 
80
 
81
  print('Done!')
82
  print('=' * 70)
83
+ print('Generating...')
84
 
85
  inp = torch.LongTensor([start_tokens]).cuda()
86
 
 
93
  verbose=False)
94
 
95
  out0 = out[0].tolist()
96
+
97
+ patches = [0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0]
98
 
99
  ctime = 0
100
+ dur = 1
101
  vel = 90
102
+ pitch = 60
103
  channel = 0
104
 
105
  for ss1 in out0:
106
 
107
  if 0 < ss1 < 256:
108
+
109
  ctime += ss1 * 8
110
 
111
  if 256 <= ss1 < 1280:
112
+
113
  dur = ((ss1 - 256) // 8) * 32
114
  vel = (((ss1 - 256) % 8) + 1) * 15
115
 
116
  if 1280 <= ss1 < 2816:
117
+
118
  channel = (ss1 - 1280) // 128
119
  pitch = (ss1 - 1280) % 128
120
 
121
  if channel != 9:
122
+ pat = patches[channel]
123
  else:
124
  pat = 128
125
 
126
  event = ['note', ctime, dur, channel, pitch, vel, pat]
127
 
128
+ output.append(event)
129
 
130
+ if input_align:
131
+ output = TMIDIX.align_escore_notes_to_bars(output)
132
 
133
+ detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(output,
134
+ output_signature = 'Allegro Music Transformer',
135
+ output_file_name = 'Allegro-Music-Transformer-Composition',
136
+ track_name='Project Los Angeles',
137
+ list_of_MIDI_patches=patches
138
+ )
139
 
140
+ output_plot = TMIDIX.plot_ms_SONG(output, plot_title='Allegro-Music-Transformer-Composition', return_plt=True)
141
 
142
  audio = midi_to_colab_audio('Allegro-Music-Transformer-Composition.mid',
143
  soundfont_path="SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2",
 
165
  print('=' * 70)
166
 
167
  app = gr.Blocks()
168
+
169
  with app:
170
+
171
  gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Allegro Music Transformer</h1>")
172
+
173
  gr.Markdown(
174
  "![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Allegro-Music-Transformer&style=flat)\n\n"
175
  "Full-attention multi-instrumental music transformer featuring asymmetrical encoding with octo-velocity, and chords counters tokens, optimized for speed and performance\n\n"
 
184
  ["Piano", "Guitar", "Bass", "Violin", "Cello", "Harp", "Trumpet", "Sax", "Flute", "Choir", "Organ"],
185
  value="Piano", label="Lead Instrument Controls", info="Desired lead instrument")
186
  input_drums = gr.Checkbox(label="Add Drums", value=False, info="Add drums to the composition")
187
+ input_align = gr.Checkbox(label="Align output to bars", value=False, info="Align output to bars")
188
  input_num_tokens = gr.Slider(16, 1024, value=512, label="Number of Tokens", info="Number of tokens to generate")
189
 
190
  run_btn = gr.Button("generate", variant="primary")
 
192
  output_audio = gr.Audio(label="output audio", format="mp3", elem_id="midi_audio")
193
  output_plot = gr.Plot(label='output plot')
194
  output_midi = gr.File(label="output midi", file_types=[".mid"])
195
+
196
+ run_event = run_btn.click(GenerateMIDI, [input_num_tokens, input_drums, input_instrument, input_align],
197
  [output_plot, output_midi, output_audio])
198
  app.queue().launch()