Spaces:
Runtime error
Runtime error
File size: 1,411 Bytes
0e0c3d6 1814284 fe2dc65 0e0c3d6 1814284 0e0c3d6 1814284 0e0c3d6 1814284 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import gradio as gr
import torch
from PIL import Image
# Define the function to load the YOLOv8 model and perform processing
def process_image(image_path, model_path="waste-detection-yolov8/best_p6.pt"):
"""
Processes an image using a YOLOv8 model and returns the processed image.
Args:
image_path (str): Path to the input image.
model_path (str, optional): Path to the YOLOv8 model weights file. Defaults to "waste-detection-yolov8/best_p6.pt".
Returns:
PIL.Image: The processed image.
"""
# Load the YOLOv8 model from the specified path
model = torch.hub.load('ultralytics/yolov8n', 'custom', path=model_path)
# Read the input image
image = Image.open(image_path)
# Convert the image to a tensor
image = model(image)
# Get the processed image from the results
processed_image = image.imgs[0]
return processed_image
# Define the Gradio interface
interface = gr.Interface(
fn=process_image,
inputs=gr.Image(label="Input Image", type="filepath"),
outputs="image",
title="Image Processing with YOLOv8n",
description="Upload an image to process it with the YOLOv8n model.",
thumbnail=None,
article="<p>This Gradio app allows you to upload an image and process it using a YOLOv8n model.</p>",
)
# Launch the interface
interface.launch(server_port=11111, server_name="localhost", enable_queue=True, allow_screenshot=False, allow_user_code=False) |