asigalov61 commited on
Commit
7069050
·
verified ·
1 Parent(s): ffd38a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -62
app.py CHANGED
@@ -120,32 +120,32 @@ def load_midi(midi_file):
120
  print('=' * 70)
121
 
122
  src_melody_chords_f = []
123
- melody_chords_f = []
124
 
125
- for i in range(0, len(melody_chords), 300):
126
 
127
  chunk = melody_chords[i:i+300]
128
 
129
  src = []
130
- src1 = []
131
- trg = []
132
 
133
- if len(chunk) == 300:
 
134
 
135
- for mm in chunk:
136
- src.extend([mm[0], mm[2]+256])
137
- src1.append([mm[0], mm[2]+256, mm[1]+384, mm[3]+640])
138
- trg.extend([mm[0], mm[2]+256, mm[1]+384, mm[3]+640])
139
 
140
- src_melody_chords_f.append(src1)
141
- melody_chords_f.append([768] + src + [769] + trg + [770])
 
 
 
 
 
142
 
143
  print('Done!')
144
  print('=' * 70)
145
- print('Number of composition chunks:', len(melody_chords_f))
146
  print('=' * 70)
147
 
148
- return melody_chords_f, src_melody_chords_f
149
 
150
  # =================================================================================================
151
 
@@ -198,9 +198,7 @@ def Convert_Score_to_Performance(input_midi,
198
  model.eval()
199
 
200
  #==================================================================
201
-
202
- composition_chunk_idx = 0 # Composition chunk idx to generate durations and velocities for. Each chunk is 300 notes
203
-
204
  num_prime_notes = input_number_prime_notes # Priming improves the results but it is not necessary and you can set it to zero
205
  dur_top_k = input_model_dur_top_k # Use k == 1 if src composition is score and k > 1 if src composition is performance
206
 
@@ -209,73 +207,145 @@ def Convert_Score_to_Performance(input_midi,
209
 
210
  #==================================================================
211
 
212
- song_chunk = src_melody_chords_f[composition_chunk_idx]
 
 
 
 
 
 
 
 
 
 
213
 
214
- song = [768]
 
 
 
 
 
215
 
216
- for m in song_chunk:
217
- song.extend(m[:2])
218
 
219
- song.append(769)
220
 
221
- for i in tqdm.tqdm(range(len(song_chunk))):
 
 
222
 
223
- song.extend(song_chunk[i][:2])
224
 
225
- # Durations
226
 
227
- if i < num_prime_notes:
228
- song.append(song_chunk[i][2])
 
 
 
 
 
 
 
229
 
230
  else:
 
 
 
 
231
 
232
- x = torch.LongTensor(song).cuda()
233
 
234
- y = 0
 
 
 
 
 
 
 
235
 
236
- while not 384 < y < 640:
 
 
 
 
 
237
 
238
- with ctx:
239
- out = model.generate(x,
240
- 1,
241
- temperature=dur_temperature,
242
- filter_logits_fn=top_k,
243
- filter_kwargs={'k': dur_top_k},
244
- return_prime=False,
245
- verbose=False)
246
 
247
- y = out.tolist()[0][0]
248
-
249
- song.append(y)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
 
251
 
252
- # Velocities
253
 
254
- if i < num_prime_notes:
255
- song.append(song_chunk[i][3])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
 
257
  else:
 
258
 
259
- x = torch.LongTensor(song).cuda()
260
-
261
- y = 0
 
 
 
 
 
 
262
 
263
- while not 640 < y < 768:
264
-
265
- with ctx:
266
- out = model.generate(x,
267
- 1,
268
- temperature=vel_temperature,
269
- #filter_logits_fn=top_k,
270
- #filter_kwargs={'k': 10},
271
- return_prime=False,
272
- verbose=False)
273
-
274
- y = out.tolist()[0][0]
275
-
276
- song.append(y)
277
-
278
-
279
  print('=' * 70)
280
  print('Done!')
281
  print('=' * 70)
 
120
  print('=' * 70)
121
 
122
  src_melody_chords_f = []
 
123
 
124
+ for i in range(0, len(melody_chords), 150):
125
 
126
  chunk = melody_chords[i:i+300]
127
 
128
  src = []
 
 
129
 
130
+ for mm in chunk:
131
+ src.append([mm[0], mm[2]+256, mm[1]+384, mm[3]+640])
132
 
133
+ clen = len(src)
 
 
 
134
 
135
+ if clen < 300:
136
+
137
+ chunk_mult = (300 // clen) + 1
138
+
139
+ src += src * chunk_mult
140
+
141
+ src_melody_chords_f.append([clen, src[:300]])
142
 
143
  print('Done!')
144
  print('=' * 70)
145
+ print('Number of composition chunks:', len(src_melody_chords_f))
146
  print('=' * 70)
147
 
148
+ return src_melody_chords_f
149
 
150
  # =================================================================================================
151
 
 
198
  model.eval()
199
 
200
  #==================================================================
201
+
 
 
202
  num_prime_notes = input_number_prime_notes # Priming improves the results but it is not necessary and you can set it to zero
203
  dur_top_k = input_model_dur_top_k # Use k == 1 if src composition is score and k > 1 if src composition is performance
204
 
 
207
 
208
  #==================================================================
209
 
210
+ if input_midi_type == 'Score':
211
+
212
+ dur_top_k = 1
213
+ dur_temperature = 1.1
214
+ vel_temperature = 1.5
215
+
216
+ elif input_midi_type == 'Performance':
217
+
218
+ dur_top_k = 10
219
+ dur_temperature = 1.5
220
+ vel_temperature = 1.5
221
 
222
+ else:
223
+
224
+ dur_top_k = input_model_dur_top_k # Use k == 1 if src composition is score and k > 1 if src composition is performance
225
+
226
+ dur_temperature = input_model_dur_temperature # For best results, durations temperature should be more than 1.0 but less than velocities temperature
227
+ vel_temperature = input_model_vel_temperature
228
 
229
+ final_song = []
 
230
 
231
+ for cc, (song_chunk_len, song_chunk) in enumerate(src_melody_chords_f):
232
 
233
+ print('=' * 70)
234
+ print('Rendering song chunk #', cc)
235
+ print('=' * 70)
236
 
237
+ #========================================================================
238
 
239
+ song = [768]
240
 
241
+ if cc == 0:
242
+
243
+ for m in song_chunk:
244
+ song.extend(m[:2])
245
+
246
+ song.append(769)
247
+
248
+ sidx = 0
249
+ eidx = 300
250
 
251
  else:
252
+ for m in song_chunk[:150]:
253
+ psrc.extend(m[:2])
254
+
255
+ psrc.append(769)
256
 
257
+ song = copy.deepcopy(psrc + ptrg)
258
 
259
+ sidx = 150
260
+ eidx = 300
261
+
262
+ #========================================================================
263
+
264
+ for i in tqdm.tqdm(range(sidx, eidx)):
265
+
266
+ song.extend(song_chunk[i][:2])
267
 
268
+ if 'Durations' in input_conv_type:
269
+
270
+ if i < num_prime_notes and cc == 0:
271
+ song.append(song_chunk[i][2])
272
+
273
+ else:
274
 
275
+ # Durations
 
 
 
 
 
 
 
276
 
277
+ x = torch.LongTensor(song).cuda()
278
+
279
+ y = 0
280
+
281
+ while not 384 < y < 640:
282
+
283
+ with ctx:
284
+ out = model.generate(x,
285
+ 1,
286
+ temperature=dur_temperature,
287
+ filter_logits_fn=top_k,
288
+ filter_kwargs={'k': dur_top_k},
289
+ return_prime=False,
290
+ verbose=False)
291
+
292
+ y = out.tolist()[0][0]
293
+
294
+ song.append(y)
295
+
296
+ else:
297
+ song.append(song_chunk[i][2])
298
 
299
+ #========================================================================
300
 
301
+ if 'Velocities' in input_conv_type:
302
 
303
+
304
+ if i < num_prime_notes and cc == 0:
305
+ song.append(song_chunk[i][3])
306
+
307
+ else:
308
+
309
+ # Velocities
310
+
311
+ x = torch.LongTensor(song).cuda()
312
+
313
+ y = 0
314
+
315
+ while not 640 < y < 768:
316
+
317
+ with ctx:
318
+ out = model.generate(x,
319
+ 1,
320
+ temperature=vel_temperature,
321
+ return_prime=False,
322
+ verbose=False)
323
+
324
+ y = out.tolist()[0][0]
325
+
326
+ song.append(y)
327
+
328
+ else:
329
+ song.append(song_chunk[i][3])
330
+
331
+ #========================================================================
332
+
333
+ if cc == 0:
334
+ final_song.extend(song[602:][:(song_chunk_len * 4)])
335
 
336
  else:
337
+ final_song.extend(song[602:][600:(song_chunk_len * 4)])
338
 
339
+ psrc = copy.deepcopy(song[1:301])
340
+ ptrg = copy.deepcopy(song[602:][:600])
341
+
342
+ #========================================================================
343
+
344
+ if len(final_song) >= input_number_conv_notes * 4:
345
+ break
346
+
347
+ #========================================================================
348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
  print('=' * 70)
350
  print('Done!')
351
  print('=' * 70)