AnsenH commited on
Commit
79d80e3
·
1 Parent(s): ef1c94f

feat: add pipeline to predict

Browse files
Files changed (4) hide show
  1. app.py +67 -15
  2. requirements.txt +2 -1
  3. run_on_video/data_utils.py +1 -1
  4. run_on_video/run.py +2 -2
app.py CHANGED
@@ -1,20 +1,30 @@
1
  import gradio as gr
 
 
2
 
3
- TITLE = """<h2 align="center"> ✍️ Highlight Detection with MomentDETR </h2>"""
 
 
4
 
5
- def submit_video(input_video, retrieval_text):
6
- print(input_video)
7
- print(retrieval_text)
8
- return input_video
 
9
 
 
 
 
10
 
11
  with gr.Blocks() as demo:
12
- gr.HTML(TITLE)
 
 
13
  with gr.Row():
14
  with gr.Blocks():
15
  with gr.Column():
16
- gr.Markdown("### Input Video")
17
- input_video = gr.PlayableVideo().style(height=500)
18
  retrieval_text = gr.Textbox(
19
  placeholder="What should be highlighted?",
20
  visible=True
@@ -22,17 +32,59 @@ with gr.Blocks() as demo:
22
  submit =gr.Button("Submit")
23
  with gr.Blocks():
24
  with gr.Column():
25
- gr.Markdown("### Results")
26
- with gr.Row():
27
- output_video = gr.PlayableVideo().style(height=500)
 
 
 
 
 
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
 
 
 
 
 
31
 
32
  submit.click(
33
- fn=submit_video,
34
- inputs=[input_video, retrieval_text],
35
- outputs=[output_video]
36
- )
37
 
38
  demo.launch()
 
1
  import gradio as gr
2
+ from run_on_video.run import MomentDETRPredictor
3
+ from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
4
 
5
+ ckpt_path = "run_on_video/moment_detr_ckpt/model_best.ckpt"
6
+ clip_model_name_or_path = "ViT-B/32"
7
+ device = 'cpu'
8
 
9
+ moment_detr_predictor = MomentDETRPredictor(
10
+ ckpt_path=ckpt_path,
11
+ clip_model_name_or_path=clip_model_name_or_path,
12
+ device=device
13
+ )
14
 
15
+ def trim_video(video_path, start, end, output_file='result.mp4'):
16
+ ffmpeg_extract_subclip(video_path, start, end, targetname=output_file)
17
+ return output_file
18
 
19
  with gr.Blocks() as demo:
20
+ output_videos = gr.State([])
21
+ moment_scores = gr.State([])
22
+ gr.HTML("""<h2 align="center"> ✍️ Highlight Detection with MomentDETR </h2>""")
23
  with gr.Row():
24
  with gr.Blocks():
25
  with gr.Column():
26
+ gr.HTML("""<h3 align="center"> Input Video </h3>""")
27
+ input_video = gr.PlayableVideo()
28
  retrieval_text = gr.Textbox(
29
  placeholder="What should be highlighted?",
30
  visible=True
 
32
  submit =gr.Button("Submit")
33
  with gr.Blocks():
34
  with gr.Column():
35
+ gr.HTML("""<h3 align="center"> Highlight Videos </h3>""")
36
+ display_score = gr.Markdown("### Moment Score: ")
37
+ radio_button = gr.Radio(
38
+ choices=[i for i in range(10)],
39
+ label="Moments",
40
+ value=0
41
+ )
42
+ playable_video = gr.PlayableVideo(visible=True)
43
 
44
+ def update_video_player(radio_value, output_videos, moment_scores):
45
+ return {
46
+ playable_video: output_videos[radio_value],
47
+ display_score: f'### Moment Score: {moment_scores[radio_value]}'
48
+ }
49
+
50
+ def submit_video(input_video, retrieval_text):
51
+ print(f'== video path: {input_video}')
52
+ print(f'== retrieval_text: {retrieval_text}')
53
+ if retrieval_text is None:
54
+ retrieval_text = ''
55
+ predictions, video_frames = moment_detr_predictor.localize_moment(
56
+ video_path=input_video,
57
+ query_list=[retrieval_text]
58
+ )
59
+ pred_windows = [[pred[0], pred[1]]for pred in predictions[0]['pred_relevant_windows']]
60
+ scores = [pred[-1] for pred in predictions[0]['pred_relevant_windows']]
61
 
62
+ print(f'== predict start end time: {pred_windows}')
63
+ print(f'== prediction scores: {scores}')
64
+ output_files = [ trim_video(
65
+ video_path=input_video,
66
+ start=pred_windows[i][0],
67
+ end=pred_windows[i][1],
68
+ output_file=f'{i}.mp4'
69
+ ) for i in range(10)]
70
+ print(f'== output_files: {output_files}')
71
+ return {
72
+ output_videos: output_files,
73
+ moment_scores: scores,
74
+ playable_video: output_files[0],
75
+ display_score: f'### Moment Score: {scores[0]}'
76
+ }
77
 
78
+ radio_button.change(
79
+ fn=update_video_player,
80
+ inputs=[radio_button, output_videos, moment_scores],
81
+ outputs=[playable_video, display_score]
82
+ )
83
 
84
  submit.click(
85
+ fn=submit_video,
86
+ inputs=[input_video, retrieval_text],
87
+ outputs=[output_videos, moment_scores, playable_video, display_score]
88
+ )
89
 
90
  demo.launch()
requirements.txt CHANGED
@@ -11,4 +11,5 @@ pandas
11
  ffmpeg-python
12
  ftfy
13
  regex
14
- Pillow
 
 
11
  ffmpeg-python
12
  ftfy
13
  regex
14
+ Pillow
15
+ moviepy
run_on_video/data_utils.py CHANGED
@@ -3,7 +3,7 @@ import os
3
  import numpy as np
4
  import ffmpeg
5
  import math
6
- import clip
7
 
8
 
9
  class ClipFeatureExtractor:
 
3
  import numpy as np
4
  import ffmpeg
5
  import math
6
+ import run_on_video.clip as clip
7
 
8
 
9
  class ClipFeatureExtractor:
run_on_video/run.py CHANGED
@@ -1,7 +1,7 @@
1
  import torch
2
 
3
- from data_utils import ClipFeatureExtractor
4
- from model_utils import build_inference_model
5
  from utils.tensor_utils import pad_sequences_1d
6
  from moment_detr.span_utils import span_cxw_to_xx
7
  from utils.basic_utils import l2_normalize_np_array
 
1
  import torch
2
 
3
+ from run_on_video.data_utils import ClipFeatureExtractor
4
+ from run_on_video.model_utils import build_inference_model
5
  from utils.tensor_utils import pad_sequences_1d
6
  from moment_detr.span_utils import span_cxw_to_xx
7
  from utils.basic_utils import l2_normalize_np_array