Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -34,7 +34,7 @@ def generate_descriptions_for_frames(video_path):
|
|
34 |
frames = frame_capture(video_path)
|
35 |
images = [PIL.Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in frames]
|
36 |
|
37 |
-
prompt = "Describe what is happening in each of these frames."
|
38 |
images_with_prompt = [prompt] + images
|
39 |
|
40 |
responses = model.generate_content(images_with_prompt)
|
@@ -52,7 +52,7 @@ video_input = gr.Video(label="Upload Video", autoplay=True)
|
|
52 |
output_text = gr.Textbox(label="What's in this video")
|
53 |
|
54 |
# Create Gradio app
|
55 |
-
gr.Interface(fn=generate_descriptions_for_frames, inputs=video_input, outputs=output_text, title="Video
|
56 |
|
57 |
|
58 |
|
|
|
34 |
frames = frame_capture(video_path)
|
35 |
images = [PIL.Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in frames]
|
36 |
|
37 |
+
prompt = "Describe what is happening in each of these frames in this video sequentially."
|
38 |
images_with_prompt = [prompt] + images
|
39 |
|
40 |
responses = model.generate_content(images_with_prompt)
|
|
|
52 |
output_text = gr.Textbox(label="What's in this video")
|
53 |
|
54 |
# Create Gradio app
|
55 |
+
gr.Interface(fn=generate_descriptions_for_frames, inputs=video_input, outputs=output_text, title="Video Analysis System").launch()
|
56 |
|
57 |
|
58 |
|