freealise commited on
Commit
0890acc
·
verified ·
1 Parent(s): 848c466

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -851,7 +851,9 @@ with gr.Blocks(css=css, js=js) as demo:
851
  input_url.input(fn=loadfile, inputs=[input_url], outputs=[input_video])
852
  submit = gr.Button("Submit")
853
  output_frame = gr.Gallery(label="Frames", preview=True, columns=8192, interactive=False)
854
- output_switch = gr.Checkbox(label="Show depths")
 
 
855
  with gr.Accordion(label="Depths", open=False):
856
  output_depth = gr.Files(label="Depth files", interactive=False)
857
  output_switch.input(fn=switch_rows, inputs=[output_switch], outputs=[output_frame])
@@ -861,7 +863,7 @@ with gr.Blocks(css=css, js=js) as demo:
861
  with gr.Accordion(label="Border", open=False):
862
  boffset = gr.Slider(label="Offset", value=1, maximum=256, minimum=0, step=1)
863
  bsize = gr.Slider(label="Size", value=32, maximum=256, minimum=0, step=1)
864
- mouse = gr.Textbox(elem_id="mouse", value="""[]""", interactive=False)
865
  mouse.input(fn=draw_mask, show_progress="minimal", inputs=[boffset, bsize, mouse, output_mask], outputs=[output_mask])
866
  reset.click(fn=reset_mask, inputs=[output_mask], outputs=[output_mask])
867
 
@@ -924,7 +926,6 @@ with gr.Blocks(css=css, js=js) as demo:
924
  with gr.Accordion(label="Blur levels", open=False):
925
  blur_in = gr.Textbox(elem_id="blur_in", label="Kernel size", show_label=False, interactive=False, value=blurin)
926
  with gr.Accordion(label="Locations", open=False):
927
- selected = gr.Number(elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
928
  output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected])
929
  example_coords = """[
930
  {"lat": 50.07379596793083, "lng": 14.437146122950555, "heading": 152.70303, "pitch": 2.607833999999997},
@@ -935,7 +936,8 @@ with gr.Blocks(css=css, js=js) as demo:
935
  ]"""
936
  coords = gr.Textbox(elem_id="coords", value=example_coords, label="Coordinates", interactive=False)
937
  mesh_order = gr.Textbox(elem_id="order", value="", label="Order", interactive=False)
938
-
 
939
  html = gr.HTML(value="""<label for='zoom'>Zoom</label><input id='zoom' type='range' style='width:256px;height:1em;' value='0.8' min='0.157' max='1.57' step='0.001' oninput='
940
  if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) {
941
  var evt = document.createEvent(\"Event\");
@@ -982,7 +984,6 @@ with gr.Blocks(css=css, js=js) as demo:
982
  BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure = this.value;
983
  this.parentNode.childNodes[2].innerText = BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure;
984
  '/><span>1.0</span>""")
985
- load_all = gr.Checkbox(label="Load all")
986
  render = gr.Button("Render")
987
  input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
988
 
@@ -1015,9 +1016,11 @@ with gr.Blocks(css=css, js=js) as demo:
1015
  for k, location in enumerate(locations):
1016
  lat = vincenty((location["lat"], 0), (avg[0], 0)) * 1000
1017
  lng = vincenty((0, location["lng"]), (0, avg[1])) * 1000
1018
- locations[k]["lat"] = float(lat / 2.5 * 95 * np.sign(location["lat"]-avg[0]))
1019
- locations[k]["lng"] = float(lng / 2.5 * 95 * np.sign(location["lng"]-avg[1]))
1020
  print(locations)
 
 
1021
 
1022
  # Process the video and get the path of the output video
1023
  output_video_path = make_video(uploaded_video,encoder=model_type,blur_data=blurin,o=boffset,b=bsize)
 
851
  input_url.input(fn=loadfile, inputs=[input_url], outputs=[input_video])
852
  submit = gr.Button("Submit")
853
  output_frame = gr.Gallery(label="Frames", preview=True, columns=8192, interactive=False)
854
+ with gr.Row():
855
+ output_switch = gr.Checkbox(label="Show depths")
856
+ selected = gr.Number(label="Selected frame", elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
857
  with gr.Accordion(label="Depths", open=False):
858
  output_depth = gr.Files(label="Depth files", interactive=False)
859
  output_switch.input(fn=switch_rows, inputs=[output_switch], outputs=[output_frame])
 
863
  with gr.Accordion(label="Border", open=False):
864
  boffset = gr.Slider(label="Offset", value=1, maximum=256, minimum=0, step=1)
865
  bsize = gr.Slider(label="Size", value=32, maximum=256, minimum=0, step=1)
866
+ mouse = gr.Textbox(label="Mouse x,y", elem_id="mouse", value="""[]""", interactive=False)
867
  mouse.input(fn=draw_mask, show_progress="minimal", inputs=[boffset, bsize, mouse, output_mask], outputs=[output_mask])
868
  reset.click(fn=reset_mask, inputs=[output_mask], outputs=[output_mask])
869
 
 
926
  with gr.Accordion(label="Blur levels", open=False):
927
  blur_in = gr.Textbox(elem_id="blur_in", label="Kernel size", show_label=False, interactive=False, value=blurin)
928
  with gr.Accordion(label="Locations", open=False):
 
929
  output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected])
930
  example_coords = """[
931
  {"lat": 50.07379596793083, "lng": 14.437146122950555, "heading": 152.70303, "pitch": 2.607833999999997},
 
936
  ]"""
937
  coords = gr.Textbox(elem_id="coords", value=example_coords, label="Coordinates", interactive=False)
938
  mesh_order = gr.Textbox(elem_id="order", value="", label="Order", interactive=False)
939
+ load_all = gr.Checkbox(label="Load all")
940
+
941
  html = gr.HTML(value="""<label for='zoom'>Zoom</label><input id='zoom' type='range' style='width:256px;height:1em;' value='0.8' min='0.157' max='1.57' step='0.001' oninput='
942
  if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) {
943
  var evt = document.createEvent(\"Event\");
 
984
  BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure = this.value;
985
  this.parentNode.childNodes[2].innerText = BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.exposure;
986
  '/><span>1.0</span>""")
 
987
  render = gr.Button("Render")
988
  input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
989
 
 
1016
  for k, location in enumerate(locations):
1017
  lat = vincenty((location["lat"], 0), (avg[0], 0)) * 1000
1018
  lng = vincenty((0, location["lng"]), (0, avg[1])) * 1000
1019
+ locations[k]["lat"] = float(lat / 2.5 * 111 * np.sign(location["lat"]-avg[0]))
1020
+ locations[k]["lng"] = float(lng / 2.5 * 111 * np.sign(location["lng"]-avg[1]))
1021
  print(locations)
1022
+ # 2.5m is height of camera on google street view car,
1023
+ # distance from center of sphere to pavement roughly 255 - 144 = 111 units
1024
 
1025
  # Process the video and get the path of the output video
1026
  output_video_path = make_video(uploaded_video,encoder=model_type,blur_data=blurin,o=boffset,b=bsize)