Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ import numpy as np
|
|
7 |
import os
|
8 |
import matplotlib.pyplot as plt
|
9 |
from io import BytesIO
|
10 |
-
import
|
11 |
|
12 |
# Check if CUDA is available, otherwise use CPU
|
13 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
@@ -70,7 +70,7 @@ def process_video(video_path, target, progress=gr.Progress()):
|
|
70 |
boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
|
71 |
|
72 |
for box, score, label in zip(boxes, scores, labels):
|
73 |
-
if score.item() >= 0.
|
74 |
box = [round(i, 2) for i in box.tolist()]
|
75 |
object_label = target
|
76 |
confidence = round(score.item(), 3)
|
@@ -100,12 +100,11 @@ def create_heatmap(frame_scores):
|
|
100 |
plt.yticks([])
|
101 |
plt.tight_layout()
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
buf.seek(0)
|
106 |
plt.close()
|
107 |
|
108 |
-
return
|
109 |
|
110 |
def load_sample_frame(video_path):
|
111 |
cap = cv2.VideoCapture(video_path)
|
@@ -120,7 +119,7 @@ def load_sample_frame(video_path):
|
|
120 |
|
121 |
def gradio_app():
|
122 |
with gr.Blocks() as app:
|
123 |
-
gr.Markdown("# Video Object Detection with Owlv2")
|
124 |
|
125 |
video_input = gr.Video(label="Upload Video")
|
126 |
target_input = gr.Textbox(label="Target Object", value="Elephant")
|
@@ -138,8 +137,8 @@ def gradio_app():
|
|
138 |
def process_and_update(video, target):
|
139 |
frames, scores, error = process_video(video, target, progress_bar)
|
140 |
if frames is not None:
|
141 |
-
|
142 |
-
return frames, scores, frames[0],
|
143 |
return None, None, None, None, error, gr.Slider(maximum=100, value=0)
|
144 |
|
145 |
def update_frame(frame_index, frames):
|
|
|
7 |
import os
|
8 |
import matplotlib.pyplot as plt
|
9 |
from io import BytesIO
|
10 |
+
import tempfile
|
11 |
|
12 |
# Check if CUDA is available, otherwise use CPU
|
13 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
|
70 |
boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
|
71 |
|
72 |
for box, score, label in zip(boxes, scores, labels):
|
73 |
+
if score.item() >= 0.25:
|
74 |
box = [round(i, 2) for i in box.tolist()]
|
75 |
object_label = target
|
76 |
confidence = round(score.item(), 3)
|
|
|
100 |
plt.yticks([])
|
101 |
plt.tight_layout()
|
102 |
|
103 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.png') as tmp_file:
|
104 |
+
plt.savefig(tmp_file.name, format='png')
|
|
|
105 |
plt.close()
|
106 |
|
107 |
+
return tmp_file.name
|
108 |
|
109 |
def load_sample_frame(video_path):
|
110 |
cap = cv2.VideoCapture(video_path)
|
|
|
119 |
|
120 |
def gradio_app():
|
121 |
with gr.Blocks() as app:
|
122 |
+
gr.Markdown("# Video Object Detection with Owlv2 (3 FPS, Batch Size 32)")
|
123 |
|
124 |
video_input = gr.Video(label="Upload Video")
|
125 |
target_input = gr.Textbox(label="Target Object", value="Elephant")
|
|
|
137 |
def process_and_update(video, target):
|
138 |
frames, scores, error = process_video(video, target, progress_bar)
|
139 |
if frames is not None:
|
140 |
+
heatmap_path = create_heatmap(scores)
|
141 |
+
return frames, scores, frames[0], heatmap_path, error, gr.Slider(maximum=len(frames) - 1, value=0)
|
142 |
return None, None, None, None, error, gr.Slider(maximum=100, value=0)
|
143 |
|
144 |
def update_frame(frame_index, frames):
|