tsqn commited on
Commit
356509a
·
verified ·
1 Parent(s): a79a4d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -19
app.py CHANGED
@@ -159,19 +159,6 @@ pipe.to("cuda")
159
  # pipe.load_lora_weights("TODO/TODO", adapter_name="ltx-lora")
160
  # pipe.set_adapters(["lrx-lora"], adapter_weights=[1.0])
161
 
162
- INTERRUPT_PIPELINE = False
163
-
164
- def interrupt_inference():
165
- INTERRUPT_PIPELINE = True
166
-
167
- def interrupt_callback(pipeline, i, t, callback_kwargs):
168
- stop_idx = 19
169
- if i >= stop_idx:
170
- pipeline._interrupt = False
171
- return callback_kwargs
172
-
173
- pipeline._interrupt = INTERRUPT_PIPELINE
174
- return callback_kwargs
175
 
176
  @spaces.GPU(duration=120)
177
  @torch.inference_mode()
@@ -194,7 +181,6 @@ def generate_video(prompt, negative_prompt, height, width, num_frames, num_infer
194
  num_frames=num_frames,
195
  num_inference_steps=num_inference_steps,
196
  generator=torch.Generator(device='cuda').manual_seed(seed),
197
- callback_on_step_end=interrupt_callback
198
  ).frames[0]
199
 
200
  # Create output filename based on prompt and timestamp
@@ -233,7 +219,6 @@ with gr.Blocks() as demo:
233
 
234
  output_video = gr.Video(label="Generated Video", show_label=True)
235
  generate_button = gr.Button("Generate Video")
236
- cancel_button = gr.Button("Cancel")
237
  save_state_button = gr.Button("Save State")
238
 
239
  random_seed_button.click(randomize_seed, outputs=seed)
@@ -242,10 +227,6 @@ with gr.Blocks() as demo:
242
  inputs=[prompt, negative_prompt, height, width, num_frames, num_inference_steps, fps, seed],
243
  outputs=output_video
244
  )
245
- cancel_button.click(
246
- interrupt_inference,
247
- outputs=gr.Text(label="Interrupted.")
248
- )
249
  save_state_button.click(
250
  save_ui_state,
251
  inputs=[prompt, negative_prompt, height, width, num_frames, num_inference_steps, fps, seed],
 
159
  # pipe.load_lora_weights("TODO/TODO", adapter_name="ltx-lora")
160
  # pipe.set_adapters(["lrx-lora"], adapter_weights=[1.0])
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
  @spaces.GPU(duration=120)
164
  @torch.inference_mode()
 
181
  num_frames=num_frames,
182
  num_inference_steps=num_inference_steps,
183
  generator=torch.Generator(device='cuda').manual_seed(seed),
 
184
  ).frames[0]
185
 
186
  # Create output filename based on prompt and timestamp
 
219
 
220
  output_video = gr.Video(label="Generated Video", show_label=True)
221
  generate_button = gr.Button("Generate Video")
 
222
  save_state_button = gr.Button("Save State")
223
 
224
  random_seed_button.click(randomize_seed, outputs=seed)
 
227
  inputs=[prompt, negative_prompt, height, width, num_frames, num_inference_steps, fps, seed],
228
  outputs=output_video
229
  )
 
 
 
 
230
  save_state_button.click(
231
  save_ui_state,
232
  inputs=[prompt, negative_prompt, height, width, num_frames, num_inference_steps, fps, seed],