sfoy commited on
Commit
5626e2b
·
verified ·
1 Parent(s): 355b356

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -143
app.py CHANGED
@@ -1,156 +1,64 @@
1
  import gradio as gr
2
- import spaces
3
  from huggingface_hub import hf_hub_download
4
-
5
 
6
  def download_models(model_id):
7
- hf_hub_download("merve/yolov9", filename=f"{model_id}", local_dir=f"./")
8
- return f"./{model_id}"
9
-
10
- @spaces.GPU
11
- def yolov9_inference(img_path, model_id, image_size, conf_threshold, iou_threshold):
12
- """
13
- Performs object detection using a YOLOv9 model. This function loads a specified YOLOv9 model,
14
- configures it based on the provided parameters, and carries out inference on a given image.
15
- Additionally, it allows for optional modification of the input size and the application of
16
- test time augmentation to potentially improve detection accuracy.
17
-
18
- Parameters:
19
- model_path (str): The file path to the pre-trained YOLOv9 model.
20
- conf_threshold (float): The confidence threshold used during Non-Maximum Suppression (NMS)
21
- to filter detections. Detections with confidence levels below this threshold are discarded.
22
- iou_threshold (float): The Intersection over Union (IoU) threshold applied in NMS. Detections
23
- with IoU values above this threshold are considered overlapping and are hence merged.
24
- img_path (str): The file path to the image on which inference is to be performed.
25
- size (int, optional): The input size for inference. Altering this may affect the accuracy and
26
- speed of the detection process.
27
-
28
- Returns:
29
- tuple: A tuple containing the detection results. This includes the bounding boxes (boxes),
30
- confidence scores (scores), and detected categories (categories), alongside a results object
31
- for further processing, such as visualization.
32
- """
33
-
34
- # Import YOLOv9
35
- import yolov9
36
-
37
- # Load the model
 
 
 
 
38
  model_path = download_models(model_id)
39
- model = yolov9.load(model_path, device="cuda:0")
40
-
41
- # Set model parameters
42
- model.conf = conf_threshold
43
- model.iou = iou_threshold
44
-
45
- # Perform inference
46
- results = model(img_path, size=image_size)
47
 
48
- # Optionally, show detection bounding boxes on image
49
- output = results.render()
50
-
51
- return output[0]
52
 
 
 
53
 
54
- def app():
55
- with gr.Blocks():
56
- with gr.Row():
57
- with gr.Column():
58
- img_path = gr.Image(type="filepath", label="Image")
59
- model_path = gr.Dropdown(
60
- label="Model",
61
- choices=[
62
- "gelan-c.pt",
63
- "gelan-e.pt",
64
- "yolov9-c.pt",
65
- "yolov9-e.pt",
66
- ],
67
- value="gelan-e.pt",
68
- )
69
- image_size = gr.Slider(
70
- label="Image Size",
71
- minimum=320,
72
- maximum=1280,
73
- step=32,
74
- value=640,
75
- )
76
- conf_threshold = gr.Slider(
77
- label="Confidence Threshold",
78
- minimum=0.1,
79
- maximum=1.0,
80
- step=0.1,
81
- value=0.4,
82
- )
83
- iou_threshold = gr.Slider(
84
- label="IoU Threshold",
85
- minimum=0.1,
86
- maximum=1.0,
87
- step=0.1,
88
- value=0.5,
89
- )
90
- yolov9_infer = gr.Button(value="Inference")
91
-
92
- with gr.Column():
93
- output_numpy = gr.Image(type="numpy",label="Output")
94
-
95
- yolov9_infer.click(
96
- fn=yolov9_inference,
97
- inputs=[
98
- img_path,
99
- model_path,
100
- image_size,
101
- conf_threshold,
102
- iou_threshold,
103
- ],
104
- outputs=[output_numpy],
105
- )
106
-
107
- gr.Examples(
108
- examples=[
109
- [
110
- "image_data/IMG_3352.JPG",
111
- "gelan-e.pt",
112
- 640,
113
- 0.4,
114
- 0.5,
115
- ],
116
- [
117
- "image_data/IMG_3353.JPG",
118
- "yolov9-c.pt",
119
- 640,
120
- 0.4,
121
- 0.5,
122
- ],
123
- ],
124
- fn=yolov9_inference,
125
- inputs=[
126
- img_path,
127
- model_path,
128
- image_size,
129
- conf_threshold,
130
- iou_threshold,
131
- ],
132
- outputs=[output_numpy],
133
- cache_examples=True,
134
- )
135
-
136
- # Initialize a Gradio Blocks application.
137
- gradio_app = gr.Blocks()
138
 
139
- with gradio_app:
140
- # Display a title using HTML, centered.
141
- gr.HTML("""
142
- <h1 style='text-align: center; margin-bottom: 20px;'>
143
- YOLOv9 from PipYoloV9 on my data
144
- </h1>
145
- """)
146
 
147
- # Assuming 'app()' represents your main app functionality.
148
- # If 'app()' involves Gradio interface components, directly place those components here.
149
- # Example: gr.Image() for image input, gr.Button() for a button, etc.
150
- # Replace 'app()' with your actual Gradio components or function call.
151
- app()
152
 
153
- # Launch the Gradio app, enabling debug mode for detailed error logs and server information.
154
- gradio_app.launch(debug=True)
155
 
156
 
 
1
  import gradio as gr
 
2
  from huggingface_hub import hf_hub_download
3
+ import torch
4
 
5
  def download_models(model_id):
6
+ """
7
+ Downloads a model file from Hugging Face Hub to a specified local directory.
8
+
9
+ Parameters:
10
+ - model_id (str): Identifier of the model to download.
11
+
12
+ Returns:
13
+ - str: Path to the downloaded model file.
14
+ """
15
+ model_path = hf_hub_download(repo_id="merve/yolov9", filename=model_id)
16
+ return model_path
17
+
18
+ def yolov9_inference(img_path, model_id="model_weights.pth", image_size=640, conf_threshold=0.25, iou_threshold=0.45):
19
+ """
20
+ Performs object detection using a YOLOv9 model. This function loads a specified YOLOv9 model,
21
+ configures it based on the provided parameters, and carries out inference on a given image.
22
+ Additionally, it allows for optional modification of the input size and the application of
23
+ test time augmentation to potentially improve detection accuracy.
24
+
25
+ Parameters:
26
+ - img_path (str): The file path to the image on which inference is to be performed.
27
+ - model_id (str): Identifier of the model to use.
28
+ - image_size (int): The input size for inference.
29
+ - conf_threshold (float): The confidence threshold used during Non-Maximum Suppression.
30
+ - iou_threshold (float): The Intersection over Union threshold applied in NMS.
31
+
32
+ Returns:
33
+ - Image: An image with detection bounding boxes drawn on it.
34
+ """
35
+ # Import YOLOv9 and torch only when the function is called to save on initial script load time
36
+ from yolov9 import YOLOv9
37
+ from PIL import Image
38
+ import numpy as np
39
+
40
+ # Download and load the model
41
  model_path = download_models(model_id)
42
+ model = YOLOv9(model_path, conf_threshold=conf_threshold, iou_threshold=iou_threshold, img_size=image_size)
43
+ model.eval() # Set the model to evaluation mode
 
 
 
 
 
 
44
 
45
+ # Load image
46
+ img = Image.open(img_path).convert("RGB")
47
+ img = np.array(img)
 
48
 
49
+ # Perform inference
50
+ results = model.predict(img, size=image_size)
51
 
52
+ # Extract results and visualize
53
+ output_image = model.visualize(results, img)
54
+ return output_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
+ # Example Gradio interface setup (simplified for demonstration purposes)
57
+ def gradio_interface(img_path):
58
+ return yolov9_inference(img_path)
 
 
 
 
59
 
60
+ iface = gr.Interface(fn=gradio_interface, inputs="image", outputs="image", title="YOLOv9 Object Detection")
61
+ iface.launch()
 
 
 
62
 
 
 
63
 
64