Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -337,6 +337,8 @@
|
|
| 337 |
|
| 338 |
|
| 339 |
|
|
|
|
|
|
|
| 340 |
import os
|
| 341 |
os.environ["GRADIO_TEMP_DIR"] = os.path.join(os.getcwd(), ".tmp_outputs")
|
| 342 |
|
|
@@ -371,15 +373,13 @@ Other times the user will not want modifications , but instead want a new image
|
|
| 371 |
Video descriptions must have the same num of words as examples below. Extra words will be ignored.
|
| 372 |
"""
|
| 373 |
|
| 374 |
-
task_semaphore = threading.Semaphore(1)
|
| 375 |
task_queue = Queue()
|
| 376 |
|
| 377 |
def process_task():
|
| 378 |
while True:
|
| 379 |
task = task_queue.get()
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
task['callback'](result)
|
| 383 |
task_queue.task_done()
|
| 384 |
|
| 385 |
threading.Thread(target=process_task, daemon=True).start()
|
|
@@ -608,9 +608,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 608 |
t = time()
|
| 609 |
video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
|
| 610 |
elapsed_time = time() - t
|
| 611 |
-
|
| 612 |
-
elapsed_time = gr.update(visible=True, value=f"{elapsed_time:.2f}s")
|
| 613 |
-
return video_path, video_update, elapsed_time
|
| 614 |
|
| 615 |
def generate_vs(prompt, num_inference_steps, guidance_scale, threshold, gap, progress=gr.Progress(track_tqdm=True)):
|
| 616 |
threshold = [int(i) for i in threshold.split(",")]
|
|
@@ -619,9 +617,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 619 |
t = time()
|
| 620 |
video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
|
| 621 |
elapsed_time = time() - t
|
| 622 |
-
|
| 623 |
-
elapsed_time = gr.update(visible=True, value=f"{elapsed_time:.2f}s")
|
| 624 |
-
return video_path, video_update, elapsed_time
|
| 625 |
|
| 626 |
def enhance_prompt_func(prompt):
|
| 627 |
return convert_prompt(prompt, retry_times=1)
|
|
@@ -660,20 +656,14 @@ with gr.Blocks(css=css) as demo:
|
|
| 660 |
task = {'function': func, 'args': args, 'callback': callback}
|
| 661 |
task_queue.put(task)
|
| 662 |
|
| 663 |
-
def on_generate_button_click(prompt, num_inference_steps, guidance_scale):
|
| 664 |
-
queue_task(generate_vanilla, (prompt, num_inference_steps, guidance_scale), lambda result: gr.update(*result))
|
| 665 |
-
|
| 666 |
-
def on_generate_button_vs_click(prompt, num_inference_steps, guidance_scale, threshold, gap):
|
| 667 |
-
queue_task(generate_vs, (prompt, num_inference_steps, guidance_scale, threshold, gap), lambda result: gr.update(*result))
|
| 668 |
-
|
| 669 |
generate_button.click(
|
| 670 |
-
|
| 671 |
inputs=[prompt, num_inference_steps, guidance_scale],
|
| 672 |
outputs=[video_output, download_video_button, elapsed_time],
|
| 673 |
)
|
| 674 |
|
| 675 |
generate_button_vs.click(
|
| 676 |
-
|
| 677 |
inputs=[prompt, num_inference_steps, guidance_scale, pab_threshold, pab_gap],
|
| 678 |
outputs=[video_output_vs, download_video_button_vs, elapsed_time_vs],
|
| 679 |
)
|
|
@@ -684,5 +674,5 @@ with gr.Blocks(css=css) as demo:
|
|
| 684 |
demo.load(update_server_status, outputs=[cpu_status, memory_status, disk_status, gpu_status], every=1)
|
| 685 |
|
| 686 |
if __name__ == "__main__":
|
| 687 |
-
demo.queue(
|
| 688 |
demo.launch()
|
|
|
|
| 337 |
|
| 338 |
|
| 339 |
|
| 340 |
+
|
| 341 |
+
|
| 342 |
import os
|
| 343 |
os.environ["GRADIO_TEMP_DIR"] = os.path.join(os.getcwd(), ".tmp_outputs")
|
| 344 |
|
|
|
|
| 373 |
Video descriptions must have the same num of words as examples below. Extra words will be ignored.
|
| 374 |
"""
|
| 375 |
|
|
|
|
| 376 |
task_queue = Queue()
|
| 377 |
|
| 378 |
def process_task():
|
| 379 |
while True:
|
| 380 |
task = task_queue.get()
|
| 381 |
+
result = task['function'](*task['args'])
|
| 382 |
+
task['callback'](result)
|
|
|
|
| 383 |
task_queue.task_done()
|
| 384 |
|
| 385 |
threading.Thread(target=process_task, daemon=True).start()
|
|
|
|
| 608 |
t = time()
|
| 609 |
video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
|
| 610 |
elapsed_time = time() - t
|
| 611 |
+
return video_path, gr.update(visible=True, value=video_path), gr.update(visible=True, value=f"{elapsed_time:.2f}s")
|
|
|
|
|
|
|
| 612 |
|
| 613 |
def generate_vs(prompt, num_inference_steps, guidance_scale, threshold, gap, progress=gr.Progress(track_tqdm=True)):
|
| 614 |
threshold = [int(i) for i in threshold.split(",")]
|
|
|
|
| 617 |
t = time()
|
| 618 |
video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
|
| 619 |
elapsed_time = time() - t
|
| 620 |
+
return video_path, gr.update(visible=True, value=video_path), gr.update(visible=True, value=f"{elapsed_time:.2f}s")
|
|
|
|
|
|
|
| 621 |
|
| 622 |
def enhance_prompt_func(prompt):
|
| 623 |
return convert_prompt(prompt, retry_times=1)
|
|
|
|
| 656 |
task = {'function': func, 'args': args, 'callback': callback}
|
| 657 |
task_queue.put(task)
|
| 658 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 659 |
generate_button.click(
|
| 660 |
+
generate_vanilla,
|
| 661 |
inputs=[prompt, num_inference_steps, guidance_scale],
|
| 662 |
outputs=[video_output, download_video_button, elapsed_time],
|
| 663 |
)
|
| 664 |
|
| 665 |
generate_button_vs.click(
|
| 666 |
+
generate_vs,
|
| 667 |
inputs=[prompt, num_inference_steps, guidance_scale, pab_threshold, pab_gap],
|
| 668 |
outputs=[video_output_vs, download_video_button_vs, elapsed_time_vs],
|
| 669 |
)
|
|
|
|
| 674 |
demo.load(update_server_status, outputs=[cpu_status, memory_status, disk_status, gpu_status], every=1)
|
| 675 |
|
| 676 |
if __name__ == "__main__":
|
| 677 |
+
demo.queue(concurrency_count=1, max_size=10)
|
| 678 |
demo.launch()
|