import os import gradio as gr from transformers import pipeline, DetrForObjectDetection, DetrConfig, DetrImageProcessor import numpy as np import cv2 from PIL import Image def draw_detections(image, detections): # Convert PIL image to a numpy array np_image = np.array(image) # Convert RGB to BGR for OpenCV np_image = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR) for detection in detections: # Extract scores, labels, and bounding boxes properly score = detection['score'] label = detection['label'] box = detection['boxes'] # Make sure 'boxes' data structure matches expected in terms of naming and indexing x_min, y_min, x_max, y_max = map(int, [box[0], box[1], box[2], box[3]]) cv2.rectangle(np_image, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2) cv2.putText(np_image, f'{label} {score:.2f}', (x_min, max(y_min - 10, 0)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1) # Convert BGR to RGB for displaying final_image = cv2.cvtColor(np_image, cv2.COLOR_BGR2RGB) # Convert the numpy array to PIL Image final_pil_image = Image.fromarray(final_image) return final_pil_image # Initialize objects from transformers config = DetrConfig.from_pretrained("facebook/detr-resnet-50") model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50", config=config) image_processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") od_pipe = pipeline(task='object-detection', model=model, image_processor=image_processor) def get_pipeline_prediction(pil_image): try: # Run the object detection pipeline pipeline_output = od_pipe(pil_image) # Draw the detection results on the image processed_image = draw_detections(pil_image, pipeline_output) # Provide both the image and the JSON detection results return processed_image, pipeline_output except Exception as e: # Log the error print(f"An error occurred: {str(e)}") # Return a message and an empty JSON return pil_image, {"error": str(e)} ##def get_pipeline_prediction(pil_image): ## try: # Run the object detection pipeline ## pipeline_output = od_pipe(pil_image) # Debugging: print the keys in the output dictionary ## if pipeline_output: ## print("Keys available in the detection output:", pipeline_output[0].keys()) # Draw the detection results on the image ## processed_image = draw_detections(pil_image, pipeline_output) # Provide both the image and the JSON detection results ## return processed_image, pipeline_output ## except Exception as e: # Log the error #3 print(f"An error occurred: {str(e)}") # Return a message and an empty JSON ## return pil_image, {"error": str(e)} demo = gr.Interface( fn=get_pipeline_prediction, inputs=gr.Image(label="Input image", type="pil"), outputs=[ gr.Image(label="Annotated Image"), gr.JSON(label="Detected Objects") ] ) demo.launch()