Commit
·
cc7270d
1
Parent(s):
c75d2db
remove image size slider
Browse files
app.py
CHANGED
@@ -42,11 +42,6 @@ image = Image.open(requests.get(url, stream=True).raw)
|
|
42 |
inputs = processor(images=image, return_tensors="pt").to(device).to(torch.float16)
|
43 |
init_compiled_model()
|
44 |
|
45 |
-
|
46 |
-
css = """
|
47 |
-
.feedback textarea {font-size: 24px !important}
|
48 |
-
"""
|
49 |
-
|
50 |
BOUNDING_BOX_ANNOTATOR = sv.BoundingBoxAnnotator()
|
51 |
MASK_ANNOTATOR = sv.MaskAnnotator()
|
52 |
LABEL_ANNOTATOR = sv.LabelAnnotator()
|
@@ -69,7 +64,6 @@ def annotate_image(input_image, detections, labels) -> np.ndarray:
|
|
69 |
def process_video(
|
70 |
input_video,
|
71 |
confidence_threshold,
|
72 |
-
max_side,
|
73 |
progress=gr.Progress(track_tqdm=True),
|
74 |
):
|
75 |
video_info = sv.VideoInfo.from_video_path(input_video)
|
@@ -85,7 +79,7 @@ def process_video(
|
|
85 |
frame = next(frame_generator)
|
86 |
except StopIteration:
|
87 |
break
|
88 |
-
results, fps = query(frame, confidence_threshold
|
89 |
all_fps.append(fps)
|
90 |
final_labels = []
|
91 |
detections = []
|
@@ -108,11 +102,8 @@ def process_video(
|
|
108 |
)
|
109 |
|
110 |
|
111 |
-
def query(frame, confidence_threshold
|
112 |
-
|
113 |
-
image=frame, resolution_wh=(max_side, max_side), keep_aspect_ratio=True
|
114 |
-
)
|
115 |
-
image = Image.fromarray(frame_resized)
|
116 |
inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
|
117 |
with torch.no_grad():
|
118 |
start = time.time()
|
@@ -129,7 +120,7 @@ def query(frame, confidence_threshold, max_side=640):
|
|
129 |
return results, fps
|
130 |
|
131 |
|
132 |
-
with gr.Blocks(theme=gr.themes.Soft()
|
133 |
gr.Markdown("## Real Time Object Detection with compiled RT-DETR")
|
134 |
gr.Markdown(
|
135 |
"""
|
@@ -157,13 +148,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
157 |
value=0.3,
|
158 |
step=0.05,
|
159 |
)
|
160 |
-
max_side = gr.Slider(
|
161 |
-
label="Image Size",
|
162 |
-
minimum=240,
|
163 |
-
maximum=1080,
|
164 |
-
value=640,
|
165 |
-
step=10,
|
166 |
-
)
|
167 |
with gr.Row():
|
168 |
submit = gr.Button(variant="primary")
|
169 |
|
@@ -173,13 +157,13 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
173 |
["./cat.mp4", 0.3, 640],
|
174 |
["./safari2.mp4", 0.3, 640],
|
175 |
],
|
176 |
-
inputs=[input_video, conf
|
177 |
outputs=output_video,
|
178 |
)
|
179 |
|
180 |
submit.click(
|
181 |
fn=process_video,
|
182 |
-
inputs=[input_video, conf
|
183 |
outputs=[output_video, actual_fps],
|
184 |
)
|
185 |
|
|
|
42 |
inputs = processor(images=image, return_tensors="pt").to(device).to(torch.float16)
|
43 |
init_compiled_model()
|
44 |
|
|
|
|
|
|
|
|
|
|
|
45 |
BOUNDING_BOX_ANNOTATOR = sv.BoundingBoxAnnotator()
|
46 |
MASK_ANNOTATOR = sv.MaskAnnotator()
|
47 |
LABEL_ANNOTATOR = sv.LabelAnnotator()
|
|
|
64 |
def process_video(
|
65 |
input_video,
|
66 |
confidence_threshold,
|
|
|
67 |
progress=gr.Progress(track_tqdm=True),
|
68 |
):
|
69 |
video_info = sv.VideoInfo.from_video_path(input_video)
|
|
|
79 |
frame = next(frame_generator)
|
80 |
except StopIteration:
|
81 |
break
|
82 |
+
results, fps = query(frame, confidence_threshold)
|
83 |
all_fps.append(fps)
|
84 |
final_labels = []
|
85 |
detections = []
|
|
|
102 |
)
|
103 |
|
104 |
|
105 |
+
def query(frame, confidence_threshold):
|
106 |
+
image = Image.fromarray(frame)
|
|
|
|
|
|
|
107 |
inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
|
108 |
with torch.no_grad():
|
109 |
start = time.time()
|
|
|
120 |
return results, fps
|
121 |
|
122 |
|
123 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
124 |
gr.Markdown("## Real Time Object Detection with compiled RT-DETR")
|
125 |
gr.Markdown(
|
126 |
"""
|
|
|
148 |
value=0.3,
|
149 |
step=0.05,
|
150 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
with gr.Row():
|
152 |
submit = gr.Button(variant="primary")
|
153 |
|
|
|
157 |
["./cat.mp4", 0.3, 640],
|
158 |
["./safari2.mp4", 0.3, 640],
|
159 |
],
|
160 |
+
inputs=[input_video, conf],
|
161 |
outputs=output_video,
|
162 |
)
|
163 |
|
164 |
submit.click(
|
165 |
fn=process_video,
|
166 |
+
inputs=[input_video, conf],
|
167 |
outputs=[output_video, actual_fps],
|
168 |
)
|
169 |
|