asigalov61 commited on
Commit
8dfb8cd
·
verified ·
1 Parent(s): bb288c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -49
app.py CHANGED
@@ -278,7 +278,7 @@ def GenerateAccompaniment(input_midi, input_num_tokens, input_conditioning_type,
278
 
279
  print('Done!')
280
  print('=' * 70)
281
- print(len(melody_chords))
282
  print('=' * 70)
283
 
284
  #==================================================================
@@ -289,56 +289,37 @@ def GenerateAccompaniment(input_midi, input_num_tokens, input_conditioning_type,
289
  print('=' * 70)
290
  print('Generating...')
291
 
292
- output = []
293
-
294
- max_chords_limit = 8
295
  temperature=0.9
296
- num_memory_tokens=4096
297
 
298
  output = []
299
 
300
- idx = 0
 
 
 
301
 
302
- for c in chords[:input_num_tokens]:
303
-
304
- output.append(c)
305
 
306
- if input_conditioning_type == 'Chords-Times' or input_conditioning_type == 'Chords-Times-Durations':
307
- output.append(times[idx])
308
 
309
- if input_conditioning_type == 'Chords-Times-Durations':
310
- output.append(durs[idx])
311
-
312
- x = torch.tensor([output] * 1, dtype=torch.long, device='cuda')
313
-
314
- o = 0
315
-
316
- ncount = 0
317
-
318
- while o < 384 and ncount < max_chords_limit:
319
- with ctx:
320
- out = model.generate(x[-num_memory_tokens:],
321
- 1,
322
- temperature=temperature,
323
- return_prime=False,
324
- verbose=False)
325
-
326
- o = out.tolist()[0][0]
327
-
328
- if 256 <= o < 384:
329
- ncount += 1
330
-
331
- if o < 384:
332
- x = torch.cat((x, out), 1)
333
-
334
- outy = x.tolist()[0][len(output):]
335
-
336
- output.extend(outy)
337
 
338
- idx += 1
339
-
340
- if idx == len(chords[:input_num_tokens])-1:
341
- break
 
 
 
 
 
 
 
 
 
 
 
 
342
 
343
  print('=' * 70)
344
  print('Done!')
@@ -351,17 +332,15 @@ def GenerateAccompaniment(input_midi, input_num_tokens, input_conditioning_type,
351
  print('Sample INTs', output[:12])
352
  print('=' * 70)
353
 
354
- out1 = output
355
-
356
- if len(out1) != 0:
357
 
358
- song = out1
359
  song_f = []
360
 
361
  time = 0
362
- dur = 0
363
  vel = 90
364
- pitch = 0
365
  channel = 0
366
 
367
  patches = [0] * 16
 
278
 
279
  print('Done!')
280
  print('=' * 70)
281
+ print('Melody chords length:', len(melody_chords))
282
  print('=' * 70)
283
 
284
  #==================================================================
 
289
  print('=' * 70)
290
  print('Generating...')
291
 
 
 
 
292
  temperature=0.9
 
293
 
294
  output = []
295
 
296
+ num_prime_chords = 1
297
+
298
+ for m in melody_chords2[:num_prime_chords]:
299
+ output.extend(m)
300
 
301
+ for ct in tqdm.tqdm(melody_chords2[num_prime_chords:]):
 
 
302
 
303
+ output.extend(ct[:2])
 
304
 
305
+ y = 646
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
 
307
+ while y > 645:
308
+
309
+ x = torch.tensor(song, dtype=torch.long, device=DEVICE)
310
+
311
+ with ctx:
312
+ out = model.generate(x,
313
+ 1,
314
+ temperature=temperature,
315
+ eos_token=2237,
316
+ return_prime=False,
317
+ verbose=False)
318
+
319
+ y = out.tolist()[0][0]
320
+
321
+ if y > 645:
322
+ output.append(y)
323
 
324
  print('=' * 70)
325
  print('Done!')
 
332
  print('Sample INTs', output[:12])
333
  print('=' * 70)
334
 
335
+ if len(output) != 0:
 
 
336
 
337
+ song = output
338
  song_f = []
339
 
340
  time = 0
341
+ dur = 4
342
  vel = 90
343
+ pitch = 60
344
  channel = 0
345
 
346
  patches = [0] * 16