Spaces:
Sleeping
Sleeping
AlshimaaGamalAlsaied
commited on
Commit
·
8e6947e
1
Parent(s):
5694e2c
update
Browse files
app.py
CHANGED
@@ -14,6 +14,7 @@ from huggingface_hub import HfApi
|
|
14 |
|
15 |
def yolov7_inference(
|
16 |
image: gr.inputs.Image = None,
|
|
|
17 |
model_path: gr.inputs.Dropdown = None,
|
18 |
image_size: gr.inputs.Slider = 640,
|
19 |
conf_threshold: gr.inputs.Slider = 0.25,
|
@@ -23,6 +24,7 @@ def yolov7_inference(
|
|
23 |
YOLOv7 inference function
|
24 |
Args:
|
25 |
image: Input image
|
|
|
26 |
model_path: Path to the model
|
27 |
image_size: Image size
|
28 |
conf_threshold: Confidence threshold
|
@@ -34,12 +36,13 @@ def yolov7_inference(
|
|
34 |
model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
|
35 |
model.conf = conf_threshold
|
36 |
model.iou = iou_threshold
|
37 |
-
results = model([image], size=image_size)
|
38 |
return results.render()[0]
|
39 |
|
40 |
|
41 |
inputs = [
|
42 |
gr.inputs.Image(type="pil", label="Input Image"),
|
|
|
43 |
gr.inputs.Dropdown(
|
44 |
choices=[
|
45 |
"alshimaa/model_baseline",
|
@@ -54,7 +57,7 @@ inputs = [
|
|
54 |
#gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
|
55 |
]
|
56 |
|
57 |
-
outputs = gr.outputs.Image(type="filepath", label="Output Image")
|
58 |
title = "Smart Environmental Eye (SEE)"
|
59 |
|
60 |
examples = [['image1.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45], ['image2.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45], ['image3.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45]]
|
|
|
14 |
|
15 |
def yolov7_inference(
|
16 |
image: gr.inputs.Image = None,
|
17 |
+
video: gr.inputs.Video = None,
|
18 |
model_path: gr.inputs.Dropdown = None,
|
19 |
image_size: gr.inputs.Slider = 640,
|
20 |
conf_threshold: gr.inputs.Slider = 0.25,
|
|
|
24 |
YOLOv7 inference function
|
25 |
Args:
|
26 |
image: Input image
|
27 |
+
video: Input video
|
28 |
model_path: Path to the model
|
29 |
image_size: Image size
|
30 |
conf_threshold: Confidence threshold
|
|
|
36 |
model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
|
37 |
model.conf = conf_threshold
|
38 |
model.iou = iou_threshold
|
39 |
+
results = model([image], [video], size=image_size)
|
40 |
return results.render()[0]
|
41 |
|
42 |
|
43 |
inputs = [
|
44 |
gr.inputs.Image(type="pil", label="Input Image"),
|
45 |
+
gr.inputs.Video(type="pil", label="Input Video"),
|
46 |
gr.inputs.Dropdown(
|
47 |
choices=[
|
48 |
"alshimaa/model_baseline",
|
|
|
57 |
#gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
|
58 |
]
|
59 |
|
60 |
+
outputs = [gr.outputs.Image(type="filepath", label="Output Image"), gr.outputs.Video(type="filepath", label="Output Video")]
|
61 |
title = "Smart Environmental Eye (SEE)"
|
62 |
|
63 |
examples = [['image1.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45], ['image2.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45], ['image3.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45]]
|