Update app.py
Browse files
app.py
CHANGED
|
@@ -242,7 +242,6 @@ def make_video(video_path, outdir='./vis_video_depth', encoder='vits', blur_data
|
|
| 242 |
count += 1
|
| 243 |
|
| 244 |
final_vid = create_video(comb_frames, frame_rate, "orig")
|
| 245 |
-
depth_vid = create_video(depth_frames, frame_rate, "depth")
|
| 246 |
|
| 247 |
final_zip = zip_files(orig_frames, depth_frames)
|
| 248 |
raw_video.release()
|
|
@@ -265,7 +264,7 @@ def make_video(video_path, outdir='./vis_video_depth', encoder='vits', blur_data
|
|
| 265 |
else:
|
| 266 |
gradient = cv2.imread('./gradient_small.png').astype(np.uint8)
|
| 267 |
|
| 268 |
-
return final_vid, final_zip, frames, masks[frame_selected], depths
|
| 269 |
|
| 270 |
def depth_edges_mask(depth):
|
| 271 |
"""Returns a mask of edges in the depth map.
|
|
@@ -453,38 +452,8 @@ def draw_mask(o, b, v, d, evt: gr.EventData):
|
|
| 453 |
switch_rows(False)
|
| 454 |
return gr.ImageEditor(value=d)
|
| 455 |
|
| 456 |
-
|
| 457 |
-
load_model="""
|
| 458 |
-
async()=>{
|
| 459 |
-
var intv = setInterval(function(){
|
| 460 |
-
if (document.getElementById("output_video").getElementsByTagName("video")) {
|
| 461 |
-
try {
|
| 462 |
-
const xhttp = new XMLHttpRequest();
|
| 463 |
-
xhttp.onreadystatechange = function() {
|
| 464 |
-
if (this.readyState == 4 && this.status == 200) {
|
| 465 |
-
|
| 466 |
-
const blob = new Blob([this.response], {
|
| 467 |
-
type: "video/mp4",
|
| 468 |
-
});
|
| 469 |
-
|
| 470 |
-
document.getElementById("model3d").src = document.getElementById("model3d").src.split("?")[0] + "?url=" + URL.createObjectURL(blob);
|
| 471 |
-
document.getElementById("newtab").href = document.getElementById("model3d").src;
|
| 472 |
-
}
|
| 473 |
-
};
|
| 474 |
-
xhttp.responseType = 'arraybuffer';
|
| 475 |
-
xhttp.open("GET", document.getElementById("output_video").getElementsByTagName("video")[0].src);
|
| 476 |
-
xhttp.send();
|
| 477 |
-
|
| 478 |
-
clearInterval(intv);
|
| 479 |
-
} catch(e) {alert(e)}
|
| 480 |
-
}
|
| 481 |
-
}, 40);
|
| 482 |
-
}
|
| 483 |
-
"""
|
| 484 |
-
|
| 485 |
js = """
|
| 486 |
async()=>{
|
| 487 |
-
console.log('Hi');
|
| 488 |
|
| 489 |
const chart = document.getElementById('chart');
|
| 490 |
const blur_in = document.getElementById('blur_in').getElementsByTagName('textarea')[0];
|
|
@@ -681,7 +650,6 @@ with gr.Blocks(css=css, js=js, head=head) as demo:
|
|
| 681 |
model_type = gr.Dropdown([("small", "vits"), ("base", "vitb"), ("large", "vitl"), ("giant", "vitg")], type="value", value="vits", label='Model Type')
|
| 682 |
processed_video = gr.Video(label="Output Video", format="mp4", elem_id="output_video", interactive=False)
|
| 683 |
processed_zip = gr.File(label="Output Archive", interactive=False)
|
| 684 |
-
depth_video = gr.Video(label="Depth Video", format="mp4", elem_id="depth_video", interactive=False, visible=True)
|
| 685 |
|
| 686 |
with gr.Tab("Blur"):
|
| 687 |
chart_c = gr.HTML(elem_id="chart_c", value="""<div id='chart' onpointermove='window.drawLine(event.clientX, event.clientY);' onpointerdown='window.pointerDown(event.clientX, event.clientY);' onpointerup='window.pointerUp();' onpointerleave='window.pointerUp();' onpointercancel='window.pointerUp();' onclick='window.resetLine();'></div>
|
|
@@ -752,9 +720,8 @@ with gr.Blocks(css=css, js=js, head=head) as demo:
|
|
| 752 |
{"lat": 50.073823157821664, "lng": 14.437124189538856, "heading": 152.95769, "pitch": 4.233024999999998}
|
| 753 |
]"""
|
| 754 |
coords = gr.Textbox(elem_id="coords", value=example_coords, label="Coordinates", interactive=False)
|
| 755 |
-
|
| 756 |
-
|
| 757 |
-
<iframe id='model3d' src='https://freeali.se/freealise/transparent_video/' width='100%' height='320'></iframe>
|
| 758 |
""")
|
| 759 |
input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
| 760 |
|
|
@@ -800,11 +767,10 @@ with gr.Blocks(css=css, js=js, head=head) as demo:
|
|
| 800 |
|
| 801 |
return output_video_path + (json.dumps(locations),)
|
| 802 |
|
| 803 |
-
|
| 804 |
-
submit.click(on_submit, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, depth_video, coords])
|
| 805 |
|
| 806 |
example_files = [["./examples/streetview.mp4", "vits", blurin, 1, 32, example_coords]]
|
| 807 |
-
examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth,
|
| 808 |
|
| 809 |
|
| 810 |
if __name__ == '__main__':
|
|
|
|
| 242 |
count += 1
|
| 243 |
|
| 244 |
final_vid = create_video(comb_frames, frame_rate, "orig")
|
|
|
|
| 245 |
|
| 246 |
final_zip = zip_files(orig_frames, depth_frames)
|
| 247 |
raw_video.release()
|
|
|
|
| 264 |
else:
|
| 265 |
gradient = cv2.imread('./gradient_small.png').astype(np.uint8)
|
| 266 |
|
| 267 |
+
return final_vid, final_zip, frames, masks[frame_selected], depths #output_path
|
| 268 |
|
| 269 |
def depth_edges_mask(depth):
|
| 270 |
"""Returns a mask of edges in the depth map.
|
|
|
|
| 452 |
switch_rows(False)
|
| 453 |
return gr.ImageEditor(value=d)
|
| 454 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 455 |
js = """
|
| 456 |
async()=>{
|
|
|
|
| 457 |
|
| 458 |
const chart = document.getElementById('chart');
|
| 459 |
const blur_in = document.getElementById('blur_in').getElementsByTagName('textarea')[0];
|
|
|
|
| 650 |
model_type = gr.Dropdown([("small", "vits"), ("base", "vitb"), ("large", "vitl"), ("giant", "vitg")], type="value", value="vits", label='Model Type')
|
| 651 |
processed_video = gr.Video(label="Output Video", format="mp4", elem_id="output_video", interactive=False)
|
| 652 |
processed_zip = gr.File(label="Output Archive", interactive=False)
|
|
|
|
| 653 |
|
| 654 |
with gr.Tab("Blur"):
|
| 655 |
chart_c = gr.HTML(elem_id="chart_c", value="""<div id='chart' onpointermove='window.drawLine(event.clientX, event.clientY);' onpointerdown='window.pointerDown(event.clientX, event.clientY);' onpointerup='window.pointerUp();' onpointerleave='window.pointerUp();' onpointercancel='window.pointerUp();' onclick='window.resetLine();'></div>
|
|
|
|
| 720 |
{"lat": 50.073823157821664, "lng": 14.437124189538856, "heading": 152.95769, "pitch": 4.233024999999998}
|
| 721 |
]"""
|
| 722 |
coords = gr.Textbox(elem_id="coords", value=example_coords, label="Coordinates", interactive=False)
|
| 723 |
+
model3d = gr.HTML(value="""
|
| 724 |
+
<a id='newtab' href='https://freeali.se/freealise/transparent_video/' target='_blank'>Open renderer in new tab and upload your video there</a>
|
|
|
|
| 725 |
""")
|
| 726 |
input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
| 727 |
|
|
|
|
| 767 |
|
| 768 |
return output_video_path + (json.dumps(locations),)
|
| 769 |
|
| 770 |
+
submit.click(on_submit, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
|
|
|
| 771 |
|
| 772 |
example_files = [["./examples/streetview.mp4", "vits", blurin, 1, 32, example_coords]]
|
| 773 |
+
examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
| 774 |
|
| 775 |
|
| 776 |
if __name__ == '__main__':
|