Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -221,11 +221,11 @@ def create_video_from_images(image_list, fps=4):
|
|
| 221 |
video.release()
|
| 222 |
return video_path
|
| 223 |
|
| 224 |
-
def loop_outpainting(image, width, height, overlap_width, num_inference_steps, resize_option, custom_resize_size, prompt_input, alignment, num_iterations, fps):
|
| 225 |
image_list = [image]
|
| 226 |
current_image = image
|
| 227 |
|
| 228 |
-
for _ in range(num_iterations):
|
| 229 |
# Generate new image
|
| 230 |
for step_result in infer(current_image, width, height, overlap_width, num_inference_steps, resize_option, custom_resize_size, prompt_input, alignment):
|
| 231 |
pass # Process all steps
|
|
@@ -241,22 +241,15 @@ def loop_outpainting(image, width, height, overlap_width, num_inference_steps, r
|
|
| 241 |
video_path = create_video_from_images(reverse_image_list, fps)
|
| 242 |
return video_path
|
| 243 |
|
|
|
|
|
|
|
| 244 |
css = """
|
| 245 |
.gradio-container {
|
| 246 |
width: 1200px !important;
|
| 247 |
}
|
| 248 |
"""
|
| 249 |
|
| 250 |
-
title = """<h1 align="center">
|
| 251 |
-
<div align="center">Drop an image you would like to extend, pick your expected ratio and hit Generate.</div>
|
| 252 |
-
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
|
| 253 |
-
<p style="display: flex;gap: 6px;">
|
| 254 |
-
<a href="https://huggingface.co/spaces/fffiloni/diffusers-image-outpout?duplicate=true">
|
| 255 |
-
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-md.svg" alt="Duplicate this Space">
|
| 256 |
-
</a> to skip the queue and enjoy faster inference on the GPU of your choice
|
| 257 |
-
</p>
|
| 258 |
-
</div>
|
| 259 |
-
"""
|
| 260 |
|
| 261 |
with gr.Blocks(css=css) as demo:
|
| 262 |
with gr.Column():
|
|
|
|
| 221 |
video.release()
|
| 222 |
return video_path
|
| 223 |
|
| 224 |
+
def loop_outpainting(image, width, height, overlap_width, num_inference_steps, resize_option, custom_resize_size, prompt_input, alignment, num_iterations, fps, progress=gr.Progress()):
|
| 225 |
image_list = [image]
|
| 226 |
current_image = image
|
| 227 |
|
| 228 |
+
for _ in progress.tqdm(range(num_iterations), desc="Generating frames"):
|
| 229 |
# Generate new image
|
| 230 |
for step_result in infer(current_image, width, height, overlap_width, num_inference_steps, resize_option, custom_resize_size, prompt_input, alignment):
|
| 231 |
pass # Process all steps
|
|
|
|
| 241 |
video_path = create_video_from_images(reverse_image_list, fps)
|
| 242 |
return video_path
|
| 243 |
|
| 244 |
+
loop_outpainting.zerogpu = True
|
| 245 |
+
|
| 246 |
css = """
|
| 247 |
.gradio-container {
|
| 248 |
width: 1200px !important;
|
| 249 |
}
|
| 250 |
"""
|
| 251 |
|
| 252 |
+
title = """<h1 align="center">Outpaint Video Zoom-In</h1>"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 253 |
|
| 254 |
with gr.Blocks(css=css) as demo:
|
| 255 |
with gr.Column():
|