Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -313,6 +313,7 @@ def propagate_to_all(video_in, checkpoint, stored_inference_state, stored_frame_
|
|
313 |
vis_frame_stride = 15
|
314 |
elif vis_frame_type == "render":
|
315 |
vis_frame_stride = 1
|
|
|
316 |
plt.close("all")
|
317 |
for out_frame_idx in range(0, len(frame_names), vis_frame_stride):
|
318 |
plt.figure(figsize=(6, 4))
|
@@ -332,7 +333,7 @@ def propagate_to_all(video_in, checkpoint, stored_inference_state, stored_frame_
|
|
332 |
jpeg_images.append(output_filename)
|
333 |
|
334 |
torch.cuda.empty_cache()
|
335 |
-
|
336 |
|
337 |
if vis_frame_type == "check":
|
338 |
return gr.update(value=jpeg_images), gr.update(value=None), gr.update(choices=jpeg_images)
|
@@ -403,7 +404,7 @@ with gr.Blocks() as demo:
|
|
403 |
video_in = gr.Video(label="Video IN")
|
404 |
|
405 |
with gr.Column():
|
406 |
-
working_frame = gr.Dropdown(label="working frame ID", choices=
|
407 |
output_result = gr.Image()
|
408 |
with gr.Row():
|
409 |
vis_frame_type = gr.Radio(label="Propagation level", choices=["check", "render"], value="check", scale=2)
|
|
|
313 |
vis_frame_stride = 15
|
314 |
elif vis_frame_type == "render":
|
315 |
vis_frame_stride = 1
|
316 |
+
|
317 |
plt.close("all")
|
318 |
for out_frame_idx in range(0, len(frame_names), vis_frame_stride):
|
319 |
plt.figure(figsize=(6, 4))
|
|
|
333 |
jpeg_images.append(output_filename)
|
334 |
|
335 |
torch.cuda.empty_cache()
|
336 |
+
print(f"JPEG_IMAGES: {jpeg_images}")
|
337 |
|
338 |
if vis_frame_type == "check":
|
339 |
return gr.update(value=jpeg_images), gr.update(value=None), gr.update(choices=jpeg_images)
|
|
|
404 |
video_in = gr.Video(label="Video IN")
|
405 |
|
406 |
with gr.Column():
|
407 |
+
working_frame = gr.Dropdown(label="working frame ID", choices=[], value=None)
|
408 |
output_result = gr.Image()
|
409 |
with gr.Row():
|
410 |
vis_frame_type = gr.Radio(label="Propagation level", choices=["check", "render"], value="check", scale=2)
|