File size: 7,143 Bytes
880de81
 
 
 
 
02abdab
880de81
02abdab
880de81
02abdab
4c7362f
1167d4f
0be0bad
4a445e6
880de81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a445e6
 
 
 
 
 
 
880de81
 
 
4a445e6
 
880de81
 
 
 
1167d4f
880de81
 
 
 
 
 
 
 
 
 
 
3699ee5
880de81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e50a69b
 
880de81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36e4433
 
 
880de81
02abdab
6c8ddcc
880de81
6c8ddcc
880de81
84d7c6f
880de81
 
36e4433
880de81
6c8ddcc
880de81
6c8ddcc
880de81
84d7c6f
 
 
75813eb
880de81
 
 
 
 
02abdab
880de81
e50a69b
 
 
880de81
9c021ca
167ab4b
9c021ca
880de81
9c021ca
880de81
9c021ca
880de81
9c021ca
 
 
880de81
e50a69b
880de81
9c021ca
 
 
 
 
 
 
880de81
9c021ca
e50a69b
880de81
 
9c021ca
 
 
 
 
 
 
880de81
9c021ca
 
 
 
 
 
 
880de81
167ab4b
 
880de81
9c021ca
 
 
 
 
 
 
880de81
 
 
 
 
4c7362f
1167d4f
 
 
880de81
4c7362f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
import os
import json
import gradio as gr
import tempfile
from PIL import Image, ImageDraw, ImageFont
import cv2
from typing import Tuple, Optional
import torch
from pathlib import Path
import time
import torch
import spaces
import os


from video_highlight_detector import (
    load_model,
    BatchedVideoHighlightDetector,
    get_video_duration_seconds
)

def load_examples(json_path: str) -> dict:
    with open(json_path, 'r') as f:
        return json.load(f)

def format_duration(seconds: int) -> str:
    hours = seconds // 3600
    minutes = (seconds % 3600) // 60
    secs = seconds % 60
    if hours > 0:
        return f"{hours}:{minutes:02d}:{secs:02d}"
    return f"{minutes}:{secs:02d}"

# def add_watermark(video_path: str, output_path: str):
#     watermark_text = "🤗 SmolVLM2 Highlight"
#     command = f"""ffmpeg -i {video_path} -vf \
#         "drawtext=text='{watermark_text}':fontcolor=white:fontsize=24:box=1:[email protected]:\
#         boxborderw=5:x=w-tw-10:y=h-th-10" \
#         -codec:a copy {output_path}"""
#     os.system(command)
def add_watermark(video_path: str, output_path: str):
    watermark_text = "🤗 SmolVLM2 Highlight"
    command = f"""ffmpeg -i {video_path} -vf \
        "drawtext=text='{watermark_text}':fontfile=NotoColorEmoji.ttf:\
        fontcolor=white:fontsize=24:box=1:[email protected]:\
        boxborderw=5:x=w-tw-10:y=h-th-10" \
        -codec:a copy {output_path}"""
    os.system(command)

@spaces.GPU
def process_video(
    video_path: str,
    progress = gr.Progress()
) -> Tuple[str, str, str, str]:
    try:
        duration = get_video_duration_seconds(video_path)
        if duration > 1200:  # 20 minutes
            return None, None, None, "Video must be shorter than 20 minutes"

        progress(0.1, desc="Loading model...")
        model, processor = load_model()
        detector = BatchedVideoHighlightDetector(model, processor, batch_size=16)

        progress(0.2, desc="Analyzing video content...")
        video_description = detector.analyze_video_content(video_path)
        
        progress(0.3, desc="Determining highlight types...")
        highlight_types = detector.determine_highlights(video_description)

        progress(0.4, desc="Detecting and extracting highlights...")
        with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
            temp_output = tmp_file.name
        
        detector.create_highlight_video(video_path, temp_output)
        
        progress(0.9, desc="Adding watermark...")
        output_path = temp_output.replace('.mp4', '_watermark.mp4')
        add_watermark(temp_output, output_path)
        
        os.unlink(temp_output)
        progress(1.0, desc="Complete!")

        video_description = video_description[:500] + "..." if len(video_description) > 500 else video_description
        highlight_types = highlight_types[:500] + "..." if len(highlight_types) > 500 else highlight_types
        
        return output_path, video_description, highlight_types, None

    except Exception as e:
        return None, None, None, f"Error processing video: {str(e)}"

def create_ui(examples_path: str):
    examples_data = load_examples(examples_path)

    with gr.Blocks() as app:
        gr.Markdown("# Video Highlight Generator")
        gr.Markdown("Upload a video (max 20 minutes) and get an automated highlight reel!")
        
        with gr.Row():
            gr.Markdown("## Example Results")

        with gr.Row():
            for example in examples_data["examples"]:
                with gr.Column():
                    gr.Video(
                        value=example["original"]["url"],
                        label=f"Original ({format_duration(example['original']['duration_seconds'])})",
                        interactive=False
                    )
                    gr.Markdown(f"### {example['title']}")
                
                with gr.Column():

                    gr.Video(
                        value=example["highlights"]["url"],
                        label=f"Highlights ({format_duration(example['highlights']['duration_seconds'])})",
                        interactive=False
                    )
                    with gr.Accordion("Model chain of thought details", open=False):
                        gr.Markdown(f"#Summary: {example['analysis']['video_description']}")
                        gr.Markdown(f"#Highlights to search for: {example['analysis']['highlight_types']}")


        gr.Markdown("## Try It Yourself!")
        with gr.Row():
            input_video = gr.Video(
                label="Upload your video (max 20 minutes)",
                interactive=True
            )
        
        gr.Progress()
        process_btn = gr.Button("Process Video", variant="primary")

        status = gr.Markdown(visible=True)
        
        with gr.Row() as results_row:
            with gr.Column():
                video_description = gr.Markdown(visible=False)
            with gr.Column():
                highlight_types = gr.Markdown(visible=False)
        
        with gr.Row() as output_row:
            output_video = gr.Video(label="Highlight Video", visible=False)
            download_btn = gr.Button("Download Highlights", visible=False)

        def on_process(video, progress=gr.Progress()):
            if not video:
                return {
                    status: "Please upload a video",
                    video_description: gr.update(visible=False),
                    highlight_types: gr.update(visible=False),
                    output_video: gr.update(visible=False),
                    download_btn: gr.update(visible=False)
                }
            
            status.value = "Processing video..."
            output_path, desc, highlights, err = process_video(video, progress=progress)
            
            if err:
                return {
                    status: f"Error: {err}",
                    video_description: gr.update(visible=False),
                    highlight_types: gr.update(visible=False),
                    output_video: gr.update(visible=False),
                    download_btn: gr.update(visible=False)
                }
            
            return {
                status: "Processing complete!",
                video_description: gr.update(value=desc, visible=True),
                highlight_types: gr.update(value=highlights, visible=True),
                output_video: gr.update(value=output_path, visible=True),
                download_btn: gr.update(visible=True)
            }

        process_btn.click(
            on_process,
            inputs=[input_video],
            outputs=[status, video_description, highlight_types, output_video, download_btn]
        )

        download_btn.click(
            lambda x: x,
            inputs=[output_video],
            outputs=[output_video]
        )

    return app

if __name__ == "__main__":
    # Initialize CUDA
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    zero = torch.Tensor([0]).to(device)

    app = create_ui("video_spec.json")
    app.launch()