freealise commited on
Commit
4bfc190
·
verified ·
1 Parent(s): e2e12e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -47,11 +47,11 @@ depths = []
47
  masks = []
48
  locations = []
49
 
50
- def zip_files(files_in, files_out):
51
  with ZipFile("depth_result.zip", "w") as zipObj:
52
  for idx, file in enumerate(files_in):
53
  zipObj.write(file, file.split("/")[-1])
54
- for idx, file in enumerate(files_out):
55
  zipObj.write(file, file.split("/")[-1])
56
  return "depth_result.zip"
57
 
@@ -243,7 +243,7 @@ def make_video(video_path, outdir='./vis_video_depth', encoder='vits', blur_data
243
 
244
  final_vid = create_video(comb_frames, frame_rate, "orig")
245
 
246
- final_zip = zip_files(orig_frames, depth_frames)
247
  raw_video.release()
248
  # out.release()
249
  cv2.destroyAllWindows()
@@ -645,6 +645,7 @@ with gr.Blocks(css=css, js=js, head=head) as demo:
645
  reset = gr.Button("Reset", size='sm')
646
  mouse.input(fn=draw_mask, show_progress="minimal", inputs=[boffset, bsize, mouse, output_mask], outputs=[output_mask])
647
  reset.click(fn=reset_mask, inputs=[output_mask], outputs=[output_mask])
 
648
 
649
  with gr.Column():
650
  model_type = gr.Dropdown([("small", "vits"), ("base", "vitb"), ("large", "vitl"), ("giant", "vitg")], type="value", value="vits", label='Model Type')
@@ -711,8 +712,14 @@ with gr.Blocks(css=css, js=js, head=head) as demo:
711
  blur_in.input(fn=update_blur, inputs=[blur_in], outputs=None)
712
  with gr.Group():
713
  with gr.Accordion(label="Locations", open=False):
714
- output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected])
715
- coords = gr.File(elem_id="coords", label="Alignment", interactive=False)
 
 
 
 
 
 
716
 
717
  model3d = gr.HTML(value="""
718
  <a style='color:white;font-weight:bold' href='https://freeali.se/freealise/transparent_video/' target='_blank'>Open renderer in new tab and upload your video there</a>
@@ -779,17 +786,10 @@ with gr.Blocks(css=css, js=js, head=head) as demo:
779
  # Process the video and get the path of the output video
780
  output_video_path = make_video(uploaded_video,encoder=model_type,blur_data=blurin,o=boffset,b=bsize)
781
 
782
- return output_video_path + ("orig_result.vtt",) #(json.dumps(locations),)
783
 
784
  submit.click(on_submit, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
785
 
786
- example_coords = """[
787
- {"lat": 50.07379596793083, "lng": 14.437146122950555, "heading": 152.70303, "pitch": 2.607833999999997},
788
- {"lat": 50.073799567020004, "lng": 14.437146774240507, "heading": 151.12973, "pitch": 2.8672300000000064},
789
- {"lat": 50.07377647505558, "lng": 14.437161000659017, "heading": 151.41025, "pitch": 3.4802200000000028},
790
- {"lat": 50.07379496839027, "lng": 14.437148958238538, "heading": 151.93391, "pitch": 2.843050000000005},
791
- {"lat": 50.073823157821664, "lng": 14.437124189538856, "heading": 152.95769, "pitch": 4.233024999999998}
792
- ]"""
793
  example_files = [["./examples/streetview.mp4", "vits", blurin, 1, 32, example_coords]]
794
  examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
795
 
 
47
  masks = []
48
  locations = []
49
 
50
+ def zip_files(files_in, subs):
51
  with ZipFile("depth_result.zip", "w") as zipObj:
52
  for idx, file in enumerate(files_in):
53
  zipObj.write(file, file.split("/")[-1])
54
+ for idx, file in enumerate(subs):
55
  zipObj.write(file, file.split("/")[-1])
56
  return "depth_result.zip"
57
 
 
243
 
244
  final_vid = create_video(comb_frames, frame_rate, "orig")
245
 
246
+ final_zip = zip_files(comb_frames, ["orig_result.vtt"])
247
  raw_video.release()
248
  # out.release()
249
  cv2.destroyAllWindows()
 
645
  reset = gr.Button("Reset", size='sm')
646
  mouse.input(fn=draw_mask, show_progress="minimal", inputs=[boffset, bsize, mouse, output_mask], outputs=[output_mask])
647
  reset.click(fn=reset_mask, inputs=[output_mask], outputs=[output_mask])
648
+ output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected])
649
 
650
  with gr.Column():
651
  model_type = gr.Dropdown([("small", "vits"), ("base", "vitb"), ("large", "vitl"), ("giant", "vitg")], type="value", value="vits", label='Model Type')
 
712
  blur_in.input(fn=update_blur, inputs=[blur_in], outputs=None)
713
  with gr.Group():
714
  with gr.Accordion(label="Locations", open=False):
715
+ example_coords = """[
716
+ {"lat": 50.07379596793083, "lng": 14.437146122950555, "heading": 152.70303, "pitch": 2.607833999999997},
717
+ {"lat": 50.073799567020004, "lng": 14.437146774240507, "heading": 151.12973, "pitch": 2.8672300000000064},
718
+ {"lat": 50.07377647505558, "lng": 14.437161000659017, "heading": 151.41025, "pitch": 3.4802200000000028},
719
+ {"lat": 50.07379496839027, "lng": 14.437148958238538, "heading": 151.93391, "pitch": 2.843050000000005},
720
+ {"lat": 50.073823157821664, "lng": 14.437124189538856, "heading": 152.95769, "pitch": 4.233024999999998}
721
+ ]"""
722
+ coords = gr.Textbox(elem_id="coords", value=example_coords, label="Alignment", interactive=False)
723
 
724
  model3d = gr.HTML(value="""
725
  <a style='color:white;font-weight:bold' href='https://freeali.se/freealise/transparent_video/' target='_blank'>Open renderer in new tab and upload your video there</a>
 
786
  # Process the video and get the path of the output video
787
  output_video_path = make_video(uploaded_video,encoder=model_type,blur_data=blurin,o=boffset,b=bsize)
788
 
789
+ return output_video_path + (vtt[0:-2],) #(json.dumps(locations),)
790
 
791
  submit.click(on_submit, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
792
 
 
 
 
 
 
 
 
793
  example_files = [["./examples/streetview.mp4", "vits", blurin, 1, 32, example_coords]]
794
  examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
795