freealise commited on
Commit
da463b3
·
verified ·
1 Parent(s): 4aef0cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -74,7 +74,7 @@ def predict_depth(image, model):
74
  #def predict_depth(model, image):
75
  # return model(image)["depth"]
76
 
77
- def make_video(video_path, outdir='./vis_video_depth', encoder='vits'):
78
  if encoder not in ["vitl","vitb","vits","vitg"]:
79
  encoder = "vits"
80
 
@@ -218,7 +218,7 @@ def make_video(video_path, outdir='./vis_video_depth', encoder='vits'):
218
  continue
219
  thumbnail_old = thumbnail
220
 
221
- cv2.imwrite(f"f{count}.png", raw_frame)
222
  orig_frames.append(f"f{count}.png")
223
 
224
  cv2.imwrite(f"f{count}_dmap.png", depth_color)
@@ -228,8 +228,8 @@ def make_video(video_path, outdir='./vis_video_depth', encoder='vits'):
228
  masks.append(f"f{count}_mask.png")
229
  count += 1
230
 
231
- #final_vid = create_video(orig_frames, frame_rate, "orig")
232
- final_vid = create_video(depth_frames, frame_rate, "depth")
233
 
234
  final_zip = zip_files(orig_frames, depth_frames)
235
  raw_video.release()
@@ -1140,7 +1140,7 @@ with gr.Blocks(css=css, js=js) as demo:
1140
  render = gr.Button("Render")
1141
  input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
1142
 
1143
- def on_submit(uploaded_video,model_type,coordinates):
1144
  global locations
1145
  locations = []
1146
  avg = [0, 0]
@@ -1174,16 +1174,16 @@ with gr.Blocks(css=css, js=js) as demo:
1174
  print(locations)
1175
 
1176
  # Process the video and get the path of the output video
1177
- output_video_path = make_video(uploaded_video,encoder=model_type)
1178
 
1179
  return output_video_path + (json.dumps(locations),)
1180
 
1181
- submit.click(on_submit, inputs=[input_video, model_type, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
1182
  render.click(None, inputs=[coords, mesh_order, bgcolor, output_frame, output_mask, selected, output_depth], outputs=None, js=load_model)
1183
  render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, result_file, mesh_order])
1184
 
1185
- example_files = [["./examples/streetview.mp4", "vits", example_coords]]
1186
- examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
1187
 
1188
 
1189
  if __name__ == '__main__':
 
74
  #def predict_depth(model, image):
75
  # return model(image)["depth"]
76
 
77
+ def make_video(video_path, outdir='./vis_video_depth', encoder='vits', blur_data):
78
  if encoder not in ["vitl","vitb","vits","vitg"]:
79
  encoder = "vits"
80
 
 
218
  continue
219
  thumbnail_old = thumbnail
220
 
221
+ cv2.imwrite(f"f{count}.png", blur_image(raw_frame, depth_color, blur_data))
222
  orig_frames.append(f"f{count}.png")
223
 
224
  cv2.imwrite(f"f{count}_dmap.png", depth_color)
 
228
  masks.append(f"f{count}_mask.png")
229
  count += 1
230
 
231
+ final_vid = create_video(orig_frames, frame_rate, "orig")
232
+ #final_vid = create_video(depth_frames, frame_rate, "depth")
233
 
234
  final_zip = zip_files(orig_frames, depth_frames)
235
  raw_video.release()
 
1140
  render = gr.Button("Render")
1141
  input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
1142
 
1143
+ def on_submit(uploaded_video,model_type,blur_in,coordinates):
1144
  global locations
1145
  locations = []
1146
  avg = [0, 0]
 
1174
  print(locations)
1175
 
1176
  # Process the video and get the path of the output video
1177
+ output_video_path = make_video(uploaded_video,encoder=model_type,blur_in=blur_in)
1178
 
1179
  return output_video_path + (json.dumps(locations),)
1180
 
1181
+ submit.click(on_submit, inputs=[input_video, model_type, blur_in, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
1182
  render.click(None, inputs=[coords, mesh_order, bgcolor, output_frame, output_mask, selected, output_depth], outputs=None, js=load_model)
1183
  render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, result_file, mesh_order])
1184
 
1185
+ example_files = [["./examples/streetview.mp4", "vits", blur_in, example_coords]]
1186
+ examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, blur_in, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
1187
 
1188
 
1189
  if __name__ == '__main__':