File size: 4,737 Bytes
dd3f4f3
 
 
 
 
 
 
 
 
 
 
2914f1a
dd3f4f3
 
 
 
 
 
 
 
 
 
2a2b9f5
dd3f4f3
 
 
 
2a2b9f5
dd3f4f3
2a2b9f5
dd3f4f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a2b9f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd3f4f3
 
 
2914f1a
dd3f4f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2914f1a
 
dd3f4f3
 
2914f1a
dd3f4f3
 
 
 
 
 
 
 
 
 
 
 
2914f1a
dd3f4f3
 
2914f1a
dd3f4f3
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import warnings
warnings.filterwarnings("ignore")
import gradio as gr
import re
from typing import Dict, List
import csv
import os
import torch
from src.video_model import describe_video 
from src.utils import parse_string, parse_annotations



# Function to save data to a CSV file
def save_to_csv(observations: List[Dict], output_dir: str = "outputs") -> str:
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    csv_file = os.path.join(output_dir, "video_observations.csv")
    
    with open(csv_file, mode='w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow(["video_name", "standing", "hands.free", "indoors", "screen.interaction_yes"])
        for observation in observations:
            writer.writerow([
                observation['video_name'], 
                observation['standing'], 
                observation['hands.free'], 
                observation['indoors'], 
                observation['screen.interaction_yes']
            ])
    
    return csv_file

# Function to process a single video and return the observation data
def process_single_video(video_path: str, sitting, hands, location, screen) -> Dict:
    video_name = os.path.basename(video_path)  # Extract video name from the path
    query = "Describe this video in detail and answer the questions"
    additional_info = []
    if sitting:
        additional_info.append("Is the subject in the video standing or sitting?")
    if hands:
        additional_info.append("Is the subject holding any object in their hands, if so the hands are not free else they are free?")
    if location:
        additional_info.append("Is the subject present indoors or outdoors?")
    if screen:
        additional_info.append("Is the subject interacting with a screen in the background by facing the screen?")
    
    end_query = """Provide the results in <annotation> tags, where 0 indicates False, 1 indicates True, and None indicates that no information is present. Follow the below examples:
        <annotation>indoors: 0</annotation>
        <annotation>standing: 1</annotation>
        <annotation>hands.free: 0</annotation>
        <annotation>screen.interaction_yes: 0</annotation>
        """
    
    final_query = query + " " + " ".join(additional_info)
    final_prompt = final_query + " " + end_query
    
    # Assuming your describe_video function handles the video processing
    final_response = describe_video(video_path, final_prompt)

    conditions = {
        'standing': (standing, 'standing: 1', 'standing: None'),
        'hands': (hands, 'hands.free: 1', 'hands.free: None'),
        'location': (location, 'indoors: 1', 'indoors: None'),
        'screen': (screen, 'screen.interaction_yes: 1', 'screen.interaction_yes: None')
    }
    
    for key, (condition, to_replace, replacement) in conditions.items():
        if not condition:
            final_response = final_response.replace(to_replace, replacement)

    return final_response 
    


# Function to process all videos in a folder
def process_multiple_videos(video_files: List[str], sitting, hands, location, screen):
    all_observations = []

    for video_path in video_files:
        observation = process_single_video(video_path, sitting, hands, location, screen)
        if "error" not in observation:
            all_observations.append(observation)
        else:
            print(observation["error"])  # Log any errors

        # Clear GPU cache
        torch.cuda.empty_cache()

    # Save all observations to a CSV file and return the file path
    csv_file = save_to_csv(all_observations)
    return "Processing completed. Download the CSV file.", csv_file

# Gradio interface
def gradio_interface(video_files, sitting, hands, location, screen):
    return process_multiple_videos(video_files, sitting, hands, location, screen)

# Inputs
video_files = gr.File(file_count="multiple", file_types=["video"], label="Upload multiple videos")
sitting = gr.Checkbox(label="Sitting/Standing")
hands = gr.Checkbox(label="Hands Free/Not Free")
location = gr.Checkbox(label="Indoors/Outdoors")
screen = gr.Checkbox(label="Screen Interaction")

# Outputs
response = gr.Textbox(label="Status")
download_link = gr.File(label="Download CSV")

# Gradio interface setup
interface = gr.Interface(
    fn=gradio_interface,
    inputs=[video_files, sitting, hands, location, screen],
    outputs=[response, download_link],
    title="Batch Video Annotation",
    description="Upload multiple videos and process them sequentially, saving the results to a downloadable CSV file.",
    theme=gr.themes.Soft(primary_hue="red", secondary_hue="red"),
    allow_flagging="never"
)

# Launch interface
interface.launch(debug=False)