Spaces:
Runtime error
Runtime error
File size: 5,162 Bytes
355b356 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import gradio as gr
import spaces
from huggingface_hub import hf_hub_download
def download_models(model_id):
hf_hub_download("merve/yolov9", filename=f"{model_id}", local_dir=f"./")
return f"./{model_id}"
@spaces.GPU
def yolov9_inference(img_path, model_id, image_size, conf_threshold, iou_threshold):
"""
Performs object detection using a YOLOv9 model. This function loads a specified YOLOv9 model,
configures it based on the provided parameters, and carries out inference on a given image.
Additionally, it allows for optional modification of the input size and the application of
test time augmentation to potentially improve detection accuracy.
Parameters:
model_path (str): The file path to the pre-trained YOLOv9 model.
conf_threshold (float): The confidence threshold used during Non-Maximum Suppression (NMS)
to filter detections. Detections with confidence levels below this threshold are discarded.
iou_threshold (float): The Intersection over Union (IoU) threshold applied in NMS. Detections
with IoU values above this threshold are considered overlapping and are hence merged.
img_path (str): The file path to the image on which inference is to be performed.
size (int, optional): The input size for inference. Altering this may affect the accuracy and
speed of the detection process.
Returns:
tuple: A tuple containing the detection results. This includes the bounding boxes (boxes),
confidence scores (scores), and detected categories (categories), alongside a results object
for further processing, such as visualization.
"""
# Import YOLOv9
import yolov9
# Load the model
model_path = download_models(model_id)
model = yolov9.load(model_path, device="cuda:0")
# Set model parameters
model.conf = conf_threshold
model.iou = iou_threshold
# Perform inference
results = model(img_path, size=image_size)
# Optionally, show detection bounding boxes on image
output = results.render()
return output[0]
def app():
with gr.Blocks():
with gr.Row():
with gr.Column():
img_path = gr.Image(type="filepath", label="Image")
model_path = gr.Dropdown(
label="Model",
choices=[
"gelan-c.pt",
"gelan-e.pt",
"yolov9-c.pt",
"yolov9-e.pt",
],
value="gelan-e.pt",
)
image_size = gr.Slider(
label="Image Size",
minimum=320,
maximum=1280,
step=32,
value=640,
)
conf_threshold = gr.Slider(
label="Confidence Threshold",
minimum=0.1,
maximum=1.0,
step=0.1,
value=0.4,
)
iou_threshold = gr.Slider(
label="IoU Threshold",
minimum=0.1,
maximum=1.0,
step=0.1,
value=0.5,
)
yolov9_infer = gr.Button(value="Inference")
with gr.Column():
output_numpy = gr.Image(type="numpy",label="Output")
yolov9_infer.click(
fn=yolov9_inference,
inputs=[
img_path,
model_path,
image_size,
conf_threshold,
iou_threshold,
],
outputs=[output_numpy],
)
gr.Examples(
examples=[
[
"image_data/IMG_3352.JPG",
"gelan-e.pt",
640,
0.4,
0.5,
],
[
"image_data/IMG_3353.JPG",
"yolov9-c.pt",
640,
0.4,
0.5,
],
],
fn=yolov9_inference,
inputs=[
img_path,
model_path,
image_size,
conf_threshold,
iou_threshold,
],
outputs=[output_numpy],
cache_examples=True,
)
# Initialize a Gradio Blocks application.
gradio_app = gr.Blocks()
with gradio_app:
# Display a title using HTML, centered.
gr.HTML("""
<h1 style='text-align: center; margin-bottom: 20px;'>
YOLOv9 from PipYoloV9 on my data
</h1>
""")
# Assuming 'app()' represents your main app functionality.
# If 'app()' involves Gradio interface components, directly place those components here.
# Example: gr.Image() for image input, gr.Button() for a button, etc.
# Replace 'app()' with your actual Gradio components or function call.
app()
# Launch the Gradio app, enabling debug mode for detailed error logs and server information.
gradio_app.launch(debug=True)
|