AlshimaaGamalAlsaied commited on
Commit
d334981
·
1 Parent(s): 8e6947e
Files changed (1) hide show
  1. app.py +30 -3
app.py CHANGED
@@ -14,7 +14,6 @@ from huggingface_hub import HfApi
14
 
15
  def yolov7_inference(
16
  image: gr.inputs.Image = None,
17
- video: gr.inputs.Video = None,
18
  model_path: gr.inputs.Dropdown = None,
19
  image_size: gr.inputs.Slider = 640,
20
  conf_threshold: gr.inputs.Slider = 0.25,
@@ -24,7 +23,6 @@ def yolov7_inference(
24
  YOLOv7 inference function
25
  Args:
26
  image: Input image
27
- video: Input video
28
  model_path: Path to the model
29
  image_size: Image size
30
  conf_threshold: Confidence threshold
@@ -36,9 +34,28 @@ def yolov7_inference(
36
  model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
37
  model.conf = conf_threshold
38
  model.iou = iou_threshold
39
- results = model([image], [video], size=image_size)
40
  return results.render()[0]
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  inputs = [
44
  gr.inputs.Image(type="pil", label="Input Image"),
@@ -70,4 +87,14 @@ demo_app = gr.Interface(
70
  cache_examples=True,
71
  theme='huggingface',
72
  )
 
 
 
 
 
 
 
 
 
73
  demo_app.launch(debug=True, enable_queue=True)
 
 
14
 
15
  def yolov7_inference(
16
  image: gr.inputs.Image = None,
 
17
  model_path: gr.inputs.Dropdown = None,
18
  image_size: gr.inputs.Slider = 640,
19
  conf_threshold: gr.inputs.Slider = 0.25,
 
23
  YOLOv7 inference function
24
  Args:
25
  image: Input image
 
26
  model_path: Path to the model
27
  image_size: Image size
28
  conf_threshold: Confidence threshold
 
34
  model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
35
  model.conf = conf_threshold
36
  model.iou = iou_threshold
37
+ results = model([image], size=image_size)
38
  return results.render()[0]
39
 
40
+ def yolov7_inference_video(
41
+ video: gr.inputs.Video = None,
42
+ model_path: gr.inputs.Dropdown = None,
43
+
44
+ ):
45
+ """
46
+ YOLOv7 inference function
47
+ Args:
48
+ video: Input video
49
+ model_path: Path to the model
50
+
51
+ Returns:
52
+ Rendered video
53
+ """
54
+
55
+ model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
56
+ results = model([video])
57
+ return results
58
+
59
 
60
  inputs = [
61
  gr.inputs.Image(type="pil", label="Input Image"),
 
87
  cache_examples=True,
88
  theme='huggingface',
89
  )
90
+ demo_app_video = gr.Interface(
91
+ fn=yolov7_inference_video,
92
+ inputs=inputs,
93
+ outputs=outputs,
94
+ title=title,
95
+ examples=examples,
96
+ cache_examples=True,
97
+ theme='huggingface',
98
+ )
99
  demo_app.launch(debug=True, enable_queue=True)
100
+ demo_app_video.launch(debug=True, enable_queue=True)