soujanyaporia commited on
Commit
3e45609
·
verified ·
1 Parent(s): adac4ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -11
app.py CHANGED
@@ -53,13 +53,13 @@ class Tango:
53
  for i in range(0, len(lst), n):
54
  yield lst[i:i + n]
55
 
56
- def generate(self, prompt, steps=100, guidance=3, samples=1, disable_progress=True):
57
  """ Genrate audio for a single prompt string. """
58
  with torch.no_grad():
59
  latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
60
  mel = self.vae.decode_first_stage(latents)
61
  wave = self.vae.decode_to_waveform(mel)
62
- return wave[0]
63
 
64
  def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
65
  """ Genrate audio for a list of prompt strings. """
@@ -83,18 +83,27 @@ tango.vae.to(device_type)
83
  tango.stft.to(device_type)
84
  tango.model.to(device_type)
85
 
86
- @spaces.GPU(duration=60)
87
  def gradio_generate(prompt, output_format, steps, guidance):
88
  output_wave = tango.generate(prompt, steps, guidance)
89
  # output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
90
- output_filename = "temp.wav"
91
- wavio.write(output_filename, output_wave, rate=16000, sampwidth=2)
 
 
 
 
 
92
 
93
  if (output_format == "mp3"):
94
- AudioSegment.from_wav("temp.wav").export("temp.mp3", format = "mp3")
95
- output_filename = "temp.mp3"
 
 
 
 
96
 
97
- return output_filename
98
 
99
  # description_text = """
100
  # <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
@@ -123,8 +132,10 @@ Generate audio using Tango2 by providing a text prompt. Tango2 was built from Ta
123
  """
124
  # Gradio input and output components
125
  input_text = gr.Textbox(lines=2, label="Prompt")
126
- output_format = gr.Radio(label = "Output format", info = "The file you can dowload", choices = ["mp3", "wav"], value = "wav")
127
- output_audio = gr.Audio(label="Generated Audio", type="filepath")
 
 
128
  denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True)
129
  guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
130
 
@@ -132,7 +143,7 @@ guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guid
132
  gr_interface = gr.Interface(
133
  fn=gradio_generate,
134
  inputs=[input_text, output_format, denoising_steps, guidance_scale],
135
- outputs=[output_audio],
136
  title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
137
  description=description_text,
138
  allow_flagging=False,
 
53
  for i in range(0, len(lst), n):
54
  yield lst[i:i + n]
55
 
56
+ def generate(self, prompt, steps=100, guidance=3, samples=3, disable_progress=True):
57
  """ Genrate audio for a single prompt string. """
58
  with torch.no_grad():
59
  latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
60
  mel = self.vae.decode_first_stage(latents)
61
  wave = self.vae.decode_to_waveform(mel)
62
+ return wave
63
 
64
  def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
65
  """ Genrate audio for a list of prompt strings. """
 
83
  tango.stft.to(device_type)
84
  tango.model.to(device_type)
85
 
86
+ @spaces.GPU(duration=120)
87
  def gradio_generate(prompt, output_format, steps, guidance):
88
  output_wave = tango.generate(prompt, steps, guidance)
89
  # output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
90
+
91
+ output_filename_1 = "tmp1.wav"
92
+ wavio.write(output_filename_1, output_wave[0], rate=16000, sampwidth=2)
93
+ output_filename_2 = "tmp2.wav"
94
+ wavio.write(output_filename_2, output_wave[1], rate=16000, sampwidth=2)
95
+ output_filename_3 = "tmp3.wav"
96
+ wavio.write(output_filename_3, output_wave[2], rate=16000, sampwidth=2)
97
 
98
  if (output_format == "mp3"):
99
+ AudioSegment.from_wav("tmp1.wav").export("tmp1.mp3", format = "mp3")
100
+ output_filename_1 = "tmp1.mp3"
101
+ AudioSegment.from_wav("tmp2.wav").export("tmp2.mp3", format = "mp3")
102
+ output_filename_2 = "tmp2.mp3"
103
+ AudioSegment.from_wav("tmp3.wav").export("tmp3.mp3", format = "mp3")
104
+ output_filename_3 = "tmp3.mp3"
105
 
106
+ return [output_filename_1, output_filename_2, output_filename_3]
107
 
108
  # description_text = """
109
  # <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
 
132
  """
133
  # Gradio input and output components
134
  input_text = gr.Textbox(lines=2, label="Prompt")
135
+ output_format = gr.Radio(label = "Output format", info = "The file you can download", choices = ["mp3", "wav"], value = "wav")
136
+ output_audio_1 = gr.Audio(label="Generated Audio #1/3", type="filepath")
137
+ output_audio_2 = gr.Audio(label="Generated Audio #2/3", type="filepath")
138
+ output_audio_3 = gr.Audio(label="Generated Audio #3/3", type="filepath")
139
  denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True)
140
  guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
141
 
 
143
  gr_interface = gr.Interface(
144
  fn=gradio_generate,
145
  inputs=[input_text, output_format, denoising_steps, guidance_scale],
146
+ outputs=[output_audio_1, output_audio_2, output_audio_3],
147
  title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
148
  description=description_text,
149
  allow_flagging=False,