Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -7,38 +7,28 @@ from src.utils import parse_string, parse_annotations
|
|
7 |
import os
|
8 |
|
9 |
|
|
|
|
|
10 |
# --- Function to construct the final query ---
|
11 |
-
def process_video_and_questions(video,
|
12 |
# Extract the video name (filename)
|
13 |
video_name = os.path.basename(video)
|
14 |
|
15 |
# Construct the query with the video name included
|
16 |
query = f"Answer the questions from the video\n"
|
17 |
additional_info = []
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
"indoors": None,
|
22 |
-
"standing": None,
|
23 |
-
"hands.free": None,
|
24 |
-
"screen.interaction_yes": None
|
25 |
-
}
|
26 |
-
|
27 |
-
if sitting:
|
28 |
-
additional_info.append("Is the subject in the video standing or sitting?")
|
29 |
-
annotations["standing"] = 0 # Default value if selected
|
30 |
-
|
31 |
if hands:
|
32 |
-
additional_info.append("Is the subject holding any object in their hands
|
33 |
-
|
34 |
-
|
35 |
if location:
|
36 |
-
additional_info.append("Is the subject present indoors
|
37 |
-
|
38 |
-
|
39 |
if screen:
|
40 |
-
additional_info.append("Is the subject interacting with a screen in the background by facing the screen
|
41 |
-
|
42 |
|
43 |
end_query = """Provide the results in <annotation> tags, where 0 indicates False, 1 indicates True, and None indicates that no information is present. Follow the below examples\n:
|
44 |
<annotation>indoors: 0</annotation>
|
@@ -52,21 +42,14 @@ def process_video_and_questions(video, sitting, hands, location, screen):
|
|
52 |
|
53 |
# Assuming your describe_video function handles the video processing
|
54 |
response = describe_video(video, final_prompt)
|
|
|
|
|
|
|
|
|
55 |
|
56 |
-
|
57 |
-
for line in response.split('\n'):
|
58 |
-
if '<annotation>' in line:
|
59 |
-
key_value = line.replace('<annotation>', '').replace('</annotation>', '').strip().split(': ')
|
60 |
-
if len(key_value) == 2:
|
61 |
-
key, value = key_value
|
62 |
-
annotations[key] = value
|
63 |
|
64 |
-
# Construct the final response with all annotations
|
65 |
-
final_response = f"<video_name>{video_name}</video_name>\n"
|
66 |
-
for key, value in annotations.items():
|
67 |
-
final_response += f"<annotation>{key}: {value}</annotation>\n"
|
68 |
|
69 |
-
return final_response
|
70 |
|
71 |
|
72 |
def output_to_csv(final_response):
|
@@ -82,6 +65,8 @@ def output_to_csv(final_response):
|
|
82 |
# Combine the video name and annotation dictionary into a single row
|
83 |
df = pd.DataFrame([{'video_name': video_name, **annotations_dict}])
|
84 |
|
|
|
|
|
85 |
return df
|
86 |
|
87 |
|
@@ -117,9 +102,9 @@ with gr.Blocks(theme=custom_theme) as demo:
|
|
117 |
with gr.Row():
|
118 |
with gr.Column():
|
119 |
video = gr.Video(label="Video")
|
120 |
-
|
121 |
-
hands = gr.Checkbox(label="Hands Free
|
122 |
-
location = gr.Checkbox(label="Indoors
|
123 |
screen = gr.Checkbox(label="Screen Interaction")
|
124 |
submit_btn = gr.Button("Generate Annotations")
|
125 |
generate_csv_btn = gr.Button("Generate CSV")
|
@@ -131,7 +116,7 @@ with gr.Blocks(theme=custom_theme) as demo:
|
|
131 |
# Event handling for the Submit button
|
132 |
submit_btn.click(
|
133 |
fn=process_video_and_questions,
|
134 |
-
inputs=[video,
|
135 |
outputs=response
|
136 |
)
|
137 |
|
@@ -142,6 +127,6 @@ with gr.Blocks(theme=custom_theme) as demo:
|
|
142 |
outputs=csv_output
|
143 |
)
|
144 |
|
145 |
-
gr.Examples(examples=examples, inputs=[video,
|
146 |
|
147 |
-
demo.launch(debug=False)
|
|
|
7 |
import os
|
8 |
|
9 |
|
10 |
+
|
11 |
+
def parse_response()
|
12 |
# --- Function to construct the final query ---
|
13 |
+
def process_video_and_questions(video, standing, hands, location, screen):
|
14 |
# Extract the video name (filename)
|
15 |
video_name = os.path.basename(video)
|
16 |
|
17 |
# Construct the query with the video name included
|
18 |
query = f"Answer the questions from the video\n"
|
19 |
additional_info = []
|
20 |
+
if standing:
|
21 |
+
additional_info.append("Is the subject in the video standing or sitting?\n")
|
22 |
+
standing_flag = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
if hands:
|
24 |
+
additional_info.append("Is the subject holding any object in their hands?\n")
|
25 |
+
hands_flag = True
|
|
|
26 |
if location:
|
27 |
+
additional_info.append("Is the subject present indoors?\n")
|
28 |
+
location_flag = True
|
|
|
29 |
if screen:
|
30 |
+
additional_info.append("Is the subject interacting with a screen in the background by facing the screen?\n")
|
31 |
+
screen_flag = True
|
32 |
|
33 |
end_query = """Provide the results in <annotation> tags, where 0 indicates False, 1 indicates True, and None indicates that no information is present. Follow the below examples\n:
|
34 |
<annotation>indoors: 0</annotation>
|
|
|
42 |
|
43 |
# Assuming your describe_video function handles the video processing
|
44 |
response = describe_video(video, final_prompt)
|
45 |
+
final_response = f"<video_name>{video_name}</video_name>" + " \n" + response
|
46 |
+
|
47 |
+
if standing_flag == False:
|
48 |
+
final_response.replace('standing: 1', 'standing: None')
|
49 |
|
50 |
+
return final_response
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
|
|
|
|
|
|
|
|
52 |
|
|
|
53 |
|
54 |
|
55 |
def output_to_csv(final_response):
|
|
|
65 |
# Combine the video name and annotation dictionary into a single row
|
66 |
df = pd.DataFrame([{'video_name': video_name, **annotations_dict}])
|
67 |
|
68 |
+
|
69 |
+
|
70 |
return df
|
71 |
|
72 |
|
|
|
102 |
with gr.Row():
|
103 |
with gr.Column():
|
104 |
video = gr.Video(label="Video")
|
105 |
+
standing = gr.Checkbox(label="Standing")
|
106 |
+
hands = gr.Checkbox(label="Hands Free")
|
107 |
+
location = gr.Checkbox(label="Indoors")
|
108 |
screen = gr.Checkbox(label="Screen Interaction")
|
109 |
submit_btn = gr.Button("Generate Annotations")
|
110 |
generate_csv_btn = gr.Button("Generate CSV")
|
|
|
116 |
# Event handling for the Submit button
|
117 |
submit_btn.click(
|
118 |
fn=process_video_and_questions,
|
119 |
+
inputs=[video, standing, hands, location, screen],
|
120 |
outputs=response
|
121 |
)
|
122 |
|
|
|
127 |
outputs=csv_output
|
128 |
)
|
129 |
|
130 |
+
gr.Examples(examples=examples, inputs=[video, standing, hands, location, screen])
|
131 |
|
132 |
+
demo.launch(debug=False)
|