Spaces:
Running
on
Zero
Running
on
Zero
| import gradio as gr | |
| from ultralytics import YOLO | |
| import spaces | |
| # Load pre-trained YOLOv8 model | |
| model = YOLO("yolov8x-doclaynet-epoch64-imgsz640-initiallr1e-4-finallr1e-5.pt") | |
| # Get class names from model | |
| class_names = model.names | |
| # Decorate the `process_image` function with `@spaces.GPU` | |
| def process_image(image): | |
| try: | |
| # Process the image | |
| results = model(source=image, save=False, show_labels=True, show_conf=True, show_boxes=True) | |
| result = results[0] # Get the first result | |
| # Extract annotated image and labels with class names | |
| annotated_image = result.plot() | |
| # Use cls attribute for labels and get class name from model | |
| detected_areas_labels = "\n".join( | |
| [f"{class_names[int(box.cls)].upper()}: {box.conf:.2f}" for box in result.boxes] | |
| ) | |
| return annotated_image, detected_areas_labels | |
| except Exception as e: | |
| return None, f"Error processing image: {e}" | |
| # Create the Gradio Interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Document Segmentation Demo (ZeroGPU)") | |
| # Input Components | |
| input_image = gr.Image(type="pil", label="Upload Image") | |
| # Output Components | |
| output_image = gr.Image(type="pil", label="Annotated Image") | |
| output_text = gr.Textbox(label="Detected Areas and Labels") | |
| # Button to trigger inference | |
| btn = gr.Button("Run Document Segmentation") | |
| btn.click(fn=process_image, inputs=input_image, outputs=[output_image, output_text]) | |
| # Launch the demo | |
| demo.queue(max_size=1).launch() # Queue to handle concurrent requests | |