admin commited on
Commit
db34083
Β·
1 Parent(s): 6764fe5
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. README.md +1 -3
  3. app.py +41 -51
  4. utils.py +3 -3
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -8,6 +8,4 @@ sdk_version: 5.22.0
8
  app_file: app.py
9
  pinned: false
10
  license: lgpl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
8
  app_file: app.py
9
  pinned: false
10
  license: lgpl-3.0
11
+ ---
 
 
app.py CHANGED
@@ -398,18 +398,26 @@ if __name__ == "__main__":
398
  shutil.rmtree("./flagged")
399
 
400
  with gr.Blocks() as demo:
401
- gr.Markdown(
402
- "## The current CPU-based version on HuggingFace has slow inference, you can access the GPU-based mirror on [ModelScope](https://www.modelscope.cn/studios/monetjoe/EMusicGen)"
403
- )
404
  with gr.Row():
405
  with gr.Column():
 
 
 
 
 
 
406
  dataset_option = gr.Dropdown(
407
  ["VGMIDI", "EMOPIA", "Rough4Q"],
408
  label="Dataset",
409
  value="Rough4Q",
410
  )
411
- gr.Markdown(
412
- "# Generate by emotion condition<br><img width='100%' src='https://www.modelscope.cn/studio/monetjoe/EMusicGen/resolve/master/src/4q.jpg'>"
 
 
 
 
 
413
  )
414
  valence_radio = gr.Radio(
415
  ["Low", "High"],
@@ -427,16 +435,8 @@ if __name__ == "__main__":
427
  )
428
  gen_btn = gr.Button("Generate")
429
  gr.Markdown("# Generate by feature control")
430
- std_option = gr.Radio(
431
- ["Low", "High"],
432
- label="Pitch SD",
433
- value="High",
434
- )
435
- mode_option = gr.Radio(
436
- ["Minor", "Major"],
437
- label="Mode",
438
- value="Major",
439
- )
440
  tempo_option = gr.Slider(
441
  minimum=40,
442
  maximum=228,
@@ -468,51 +468,41 @@ if __name__ == "__main__":
468
  label="The emotion to which the current template belongs",
469
  )
470
  save_btn = gr.Button("Save template")
471
- gr.Markdown(
472
- """
473
- ## Cite
474
- ```bibtex
475
- @article{Zhou2024EMusicGen,
476
- title = {EMusicGen: Emotion-Conditioned Melody Generation in ABC Notation},
477
- author = {Monan Zhou, Xiaobing Li, Feng Yu and Wei Li},
478
- month = {Sep},
479
- year = {2024},
480
- publisher = {GitHub},
481
- version = {0.1},
482
- url = {https://github.com/monetjoe/EMusicGen}
483
- }
484
- ```
485
- """
486
- )
487
 
488
  with gr.Column():
489
- gr.Video(
490
- "https://www.modelscope.cn/studio/monetjoe/EMusicGen/resolve/master/src/tutorial.mp4",
491
- label="Tutorial",
492
- show_download_button=False,
493
- show_share_button=False,
494
- )
495
  wav_audio = gr.Audio(label="Audio", type="filepath")
496
  midi_file = gr.File(label="Download MIDI")
497
  pdf_file = gr.File(label="Download PDF score")
498
  xml_file = gr.File(label="Download MusicXML")
499
  mxl_file = gr.File(label="Download MXL")
500
- abc_textbox = gr.Textbox(
501
- label="ABC notation",
502
- show_copy_button=True,
503
- )
504
  staff_img = gr.Image(label="Staff", type="filepath")
505
 
506
- with gr.Row():
507
- gr.Interface(
508
- fn=feedback,
509
- inputs=gr.Radio(
510
- ["Q1", "Q2", "Q3", "Q4"],
511
- label="Feedback: the emotion you believe the generated result should belong to",
512
- ),
513
- outputs=gr.Textbox(show_copy_button=False, show_label=False),
514
- allow_flagging="never",
515
- )
516
 
517
  gen_btn.click(
518
  fn=inference,
 
398
  shutil.rmtree("./flagged")
399
 
400
  with gr.Blocks() as demo:
 
 
 
401
  with gr.Row():
402
  with gr.Column():
403
+ gr.Video(
404
+ "./tutorial.mp4",
405
+ label="Tutorial",
406
+ show_download_button=False,
407
+ show_share_button=False,
408
+ )
409
  dataset_option = gr.Dropdown(
410
  ["VGMIDI", "EMOPIA", "Rough4Q"],
411
  label="Dataset",
412
  value="Rough4Q",
413
  )
414
+ gr.Markdown("# Generate by emotion condition")
415
+ gr.Image(
416
+ "https://www.modelscope.cn/studio/monetjoe/EMelodyGen/resolve/master/src/4q.jpg",
417
+ show_label=False,
418
+ show_download_button=False,
419
+ show_fullscreen_button=False,
420
+ show_share_button=False,
421
  )
422
  valence_radio = gr.Radio(
423
  ["Low", "High"],
 
435
  )
436
  gen_btn = gr.Button("Generate")
437
  gr.Markdown("# Generate by feature control")
438
+ std_option = gr.Radio(["Low", "High"], label="Pitch SD", value="High")
439
+ mode_option = gr.Radio(["Minor", "Major"], label="Mode", value="Major")
 
 
 
 
 
 
 
 
440
  tempo_option = gr.Slider(
441
  minimum=40,
442
  maximum=228,
 
468
  label="The emotion to which the current template belongs",
469
  )
470
  save_btn = gr.Button("Save template")
471
+ # gr.Markdown(
472
+ # """
473
+ # ## Cite
474
+ # ```bibtex
475
+ # @article{Zhou2024EMelodyGen,
476
+ # title = {EMelodyGen: Emotion-Conditioned Melody Generation in ABC Notation},
477
+ # author = {Monan Zhou, Xiaobing Li, Feng Yu and Wei Li},
478
+ # month = {Sep},
479
+ # year = {2024},
480
+ # publisher = {GitHub},
481
+ # version = {0.1},
482
+ # url = {https://github.com/monetjoe/EMelodyGen}
483
+ # }
484
+ # ```
485
+ # """
486
+ # )
487
 
488
  with gr.Column():
 
 
 
 
 
 
489
  wav_audio = gr.Audio(label="Audio", type="filepath")
490
  midi_file = gr.File(label="Download MIDI")
491
  pdf_file = gr.File(label="Download PDF score")
492
  xml_file = gr.File(label="Download MusicXML")
493
  mxl_file = gr.File(label="Download MXL")
494
+ abc_textbox = gr.Textbox(label="ABC notation", show_copy_button=True)
 
 
 
495
  staff_img = gr.Image(label="Staff", type="filepath")
496
 
497
+ gr.Interface(
498
+ fn=feedback,
499
+ inputs=gr.Radio(
500
+ ["Q1", "Q2", "Q3", "Q4"],
501
+ label="Feedback: the emotion you believe the generated result should belong to",
502
+ ),
503
+ outputs=gr.Textbox(show_copy_button=False, show_label=False),
504
+ allow_flagging="never",
505
+ )
 
506
 
507
  gen_btn.click(
508
  fn=inference,
utils.py CHANGED
@@ -5,10 +5,10 @@ import torch
5
  import requests
6
  import subprocess
7
  from tqdm import tqdm
8
- from modelscope import snapshot_download
9
 
10
  TEMP_DIR = "./flagged"
11
- WEIGHTS_DIR = snapshot_download("monetjoe/EMusicGen", cache_dir="./__pycache__")
12
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
  PATCH_LENGTH = 128 # Patch Length
14
  PATCH_SIZE = 32 # Patch Size
@@ -47,7 +47,7 @@ if sys.platform.startswith("linux"):
47
  extra_dir = "squashfs-root"
48
  download(
49
  filename=apkname,
50
- url="https://www.modelscope.cn/studio/Genius-Society/piano_transcription/resolve/master/MuseScore.AppImage",
51
  )
52
  if not os.path.exists(extra_dir):
53
  subprocess.run(["chmod", "+x", f"./{apkname}"])
 
5
  import requests
6
  import subprocess
7
  from tqdm import tqdm
8
+ from huggingface_hub import snapshot_download
9
 
10
  TEMP_DIR = "./flagged"
11
+ WEIGHTS_DIR = snapshot_download("monetjoe/EMelodyGen", cache_dir="./__pycache__")
12
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
  PATCH_LENGTH = 128 # Patch Length
14
  PATCH_SIZE = 32 # Patch Size
 
47
  extra_dir = "squashfs-root"
48
  download(
49
  filename=apkname,
50
+ url="https://master.dl.sourceforge.net/project/musescore-linux-mirror/MuseScore.AppImage?viasf=1",
51
  )
52
  if not os.path.exists(extra_dir):
53
  subprocess.run(["chmod", "+x", f"./{apkname}"])