File size: 2,851 Bytes
13bf6b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ec5f23
 
 
 
 
 
13bf6b4
 
2ec5f23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import gradio as gr
import cv2
import torch
import numpy as np

# Load the YOLOv5 model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)

# Function to run inference on an image
def run_inference(image):
    # Convert the image from PIL format to a format compatible with OpenCV
    image = np.array(image)

    # Run YOLOv5 inference
    results = model(image)

    # Convert the annotated image from BGR to RGB for display
    annotated_image = results.render()[0]
    annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)

    # Generate a summary of detected objects
    detected_objects = results.pandas().xyxy[0]['name'].value_counts().to_dict()
    summary = "Detected Objects:\n" + "\n".join(
        [f"{obj}: {count}" for obj, count in detected_objects.items()]
    )
    return annotated_image, summary

# Create the Gradio interface
interface = gr.Blocks(css="""
body {
    font-family: 'Poppins', sans-serif;
    background: linear-gradient(135deg, #6a11cb, #2575fc);
    color: #fff;
    margin: 0;
    padding: 0;
}

header {
    text-align: center;
    background: linear-gradient(90deg, #ff758c, #ff7eb3);
    padding: 1rem;
    border-radius: 15px;
    box-shadow: 0px 4px 15px rgba(0, 0, 0, 0.2);
    margin-bottom: 1rem;
}

header h1 {
    margin: 0;
    font-size: 2.5rem;
    color: #fff;
}

.description {
    text-align: center;
    font-size: 1.2rem;
    margin: 0.5rem 0 1.5rem 0;
    color: #f0f0f0;
}

button {
    background: linear-gradient(90deg, #6a11cb, #2575fc);
    border: none;
    border-radius: 10px;
    padding: 0.8rem 1.5rem;
    font-size: 1rem;
    color: white;
    cursor: pointer;
    transition: transform 0.2s, background 0.2s;
}

button:hover {
    transform: scale(1.05);
    background: linear-gradient(90deg, #2575fc, #6a11cb);
}
""")

with interface:
    with gr.Row():
        gr.Markdown("<header><h1>🌟 InsightVision: Detect, Analyze, Summarize 🌟</h1></header>")
    with gr.Row():
        gr.Markdown(
            "<div class='description'>Upload an image to detect objects using YOLOv5 and receive a detailed summary of detected items!</div>"
        )
    with gr.Row():
        with gr.Column():
            uploaded_image = gr.Image(type="pil", label="Upload Your Image")
        with gr.Column():
            detected_image = gr.Image(type="pil", label="Detected Objects")
            summary_output = gr.Textbox(lines=10, label="Summary of Detected Objects")

    with gr.Row():
        submit_button = gr.Button("Analyze Image")

    def process_image(image):
        annotated_image, summary = run_inference(image)
        return annotated_image, summary

    submit_button.click(
        process_image,
        inputs=[uploaded_image],
        outputs=[detected_image, summary_output],
    )

if __name__ == "__main__":
    interface.launch()