File size: 9,072 Bytes
13d7bed
 
 
a27e6f2
13d7bed
a27e6f2
13d7bed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8ce6ee
 
 
 
 
 
 
 
 
 
a27e6f2
f8ce6ee
13d7bed
a27e6f2
f8ce6ee
a27e6f2
1f43f91
a27e6f2
 
f8ce6ee
13d7bed
f8ce6ee
 
 
8dbbc99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8ce6ee
 
3da6b4f
a27e6f2
8dbbc99
f8ce6ee
13d7bed
a27e6f2
f8ce6ee
 
 
13d7bed
 
 
 
a27e6f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8ce6ee
a27e6f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8ce6ee
a27e6f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8ce6ee
a27e6f2
 
 
 
8dbbc99
a27e6f2
 
 
 
 
f8ce6ee
a27e6f2
 
 
 
 
 
 
 
 
f8ce6ee
a27e6f2
 
 
 
 
 
 
 
 
 
 
 
8dbbc99
a27e6f2
f8ce6ee
 
 
 
 
 
a27e6f2
 
 
f8ce6ee
a27e6f2
 
 
 
 
 
 
 
 
13d7bed
 
 
 
 
 
 
 
 
fcba7cb
 
f8ce6ee
fcba7cb
 
 
 
 
f8ce6ee
a27e6f2
13d7bed
 
 
 
 
8dbbc99
13d7bed
 
b7b543a
13d7bed
 
 
 
a27e6f2
8dbbc99
6ab9894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13d7bed
 
6ab9894
8dbbc99
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
import torch
from PIL import Image
import requests
from openai import OpenAI
from transformers import (Owlv2Processor, Owlv2ForObjectDetection,
                          AutoProcessor, AutoModelForMaskGeneration)
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import base64
import io
import numpy as np
import gradio as gr
import json
import os
from dotenv import load_dotenv

# Load environment variables
load_dotenv()
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')


def encode_image_to_base64(image):
    # If image is a tuple (as sometimes provided by Gradio), take the first element
    if isinstance(image, tuple):
        image = image[0]

    # If image is a numpy array, convert to PIL Image
    if isinstance(image, np.ndarray):
        image = Image.fromarray(image)

    # Ensure image is in PIL Image format
    if not isinstance(image, Image.Image):
        raise ValueError("Input must be a PIL Image, numpy array, or tuple containing an image")

    buffered = io.BytesIO()
    image.save(buffered, format="PNG")
    return base64.b64encode(buffered.getvalue()).decode('utf-8')

def analyze_image(image):
    client = OpenAI(api_key=OPENAI_API_KEY)
    base64_image = encode_image_to_base64(image)

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": """Your task is to determine if the image is surprising or not surprising.    
                    if the image is surprising, determine which element, figure or object in the image is making the image surprising and write it only in one sentence with no more then 6 words, otherwise, write 'NA'.    
                    Also rate how surprising the image is on a scale of 1-5, where 1 is not surprising at all and 5 is highly surprising.
                    Provide the response as a JSON with the following structure:    
                    {
                        "label": "[surprising OR not surprising]",
                        "element": "[element]",
                        "rating": [1-5]
                    }"""
                },
                {
                    "type": "image_url",
                    "image_url": {
                        "url": f"data:image/jpeg;base64,{base64_image}"
                    }
                }
            ]
        }
    ]

    response = client.chat.completions.create(
        model="gpt-4o-mini",
        messages=messages,
        max_tokens=100,
        temperature=0.1,
        response_format={
            "type": "json_object"
        }
    )

    return response.choices[0].message.content


def show_mask(mask, ax, random_color=False):
    if random_color:
        color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
    else:
        color = np.array([1.0, 0.0, 0.0, 0.5])

    if len(mask.shape) == 4:
        mask = mask[0, 0]

    mask_image = np.zeros((*mask.shape, 4), dtype=np.float32)
    mask_image[mask > 0] = color

    ax.imshow(mask_image)


def process_image_detection(image, target_label, surprise_rating):
    device = "cuda" if torch.cuda.is_available() else "cpu"

    # Get original image DPI and size
    original_dpi = image.info.get('dpi', (72, 72))
    original_size = image.size

    # Calculate relative font size based on image dimensions
    base_fontsize = min(original_size) / 40  # Adjust this divisor to change overall font size

    owlv2_processor = Owlv2Processor.from_pretrained("google/owlv2-base-patch16")
    owlv2_model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16").to(device)

    sam_processor = AutoProcessor.from_pretrained("facebook/sam-vit-base")
    sam_model = AutoModelForMaskGeneration.from_pretrained("facebook/sam-vit-base").to(device)

    image_np = np.array(image)

    inputs = owlv2_processor(text=[target_label], images=image, return_tensors="pt").to(device)
    with torch.no_grad():
        outputs = owlv2_model(**inputs)

    target_sizes = torch.tensor([image.size[::-1]]).to(device)
    results = owlv2_processor.post_process_object_detection(outputs, target_sizes=target_sizes)[0]

    dpi = 300  # Increased DPI for better text rendering
    figsize = (original_size[0] / dpi, original_size[1] / dpi)
    fig = plt.figure(figsize=figsize, dpi=dpi)

    ax = plt.Axes(fig, [0., 0., 1., 1.])
    fig.add_axes(ax)

    plt.imshow(image)

    scores = results["scores"]
    if len(scores) > 0:
        max_score_idx = scores.argmax().item()
        max_score = scores[max_score_idx].item()

        if max_score > 0.2:
            box = results["boxes"][max_score_idx].cpu().numpy()

            sam_inputs = sam_processor(
                image,
                input_boxes=[[[box[0], box[1], box[2], box[3]]]],
                return_tensors="pt"
            ).to(device)

            with torch.no_grad():
                sam_outputs = sam_model(**sam_inputs)

            masks = sam_processor.image_processor.post_process_masks(
                sam_outputs.pred_masks.cpu(),
                sam_inputs["original_sizes"].cpu(),
                sam_inputs["reshaped_input_sizes"].cpu()
            )

            mask = masks[0].numpy() if isinstance(masks[0], torch.Tensor) else masks[0]
            show_mask(mask, ax=ax)

            # Draw rectangle with increased line width
            rect = patches.Rectangle(
                (box[0], box[1]),
                box[2] - box[0],
                box[3] - box[1],
                linewidth=max(2, min(original_size) / 500),  # Scale line width with image size
                edgecolor='red',
                facecolor='none'
            )
            ax.add_patch(rect)

            # Add confidence score with improved visibility
            plt.text(
                box[0], box[1] - base_fontsize,
                f'{max_score:.2f}',
                color='red',
                fontsize=base_fontsize,
                fontweight='bold',
                bbox=dict(facecolor='white', alpha=0.7, edgecolor='none', pad=2)
            )

            # Add label and rating with improved visibility
            plt.text(
                box[2] + base_fontsize / 2, box[1],
                f'Unexpected (Rating: {surprise_rating}/5)\n{target_label}',
                color='red',
                fontsize=base_fontsize,
                fontweight='bold',
                bbox=dict(facecolor='white', alpha=0.7, edgecolor='none', pad=2),
                verticalalignment='bottom'
            )

    plt.axis('off')

    # Save with high DPI
    buf = io.BytesIO()
    plt.savefig(buf,
                format='png',
                dpi=dpi,
                bbox_inches='tight',
                pad_inches=0,
                metadata={'dpi': original_dpi})
    buf.seek(0)
    plt.close()

    # Process final image
    output_image = Image.open(buf)
    output_image = output_image.resize(original_size, Image.Resampling.LANCZOS)

    final_buf = io.BytesIO()
    output_image.save(final_buf, format='PNG', dpi=original_dpi)
    final_buf.seek(0)

    return final_buf


def process_and_analyze(image):
    if image is None:
        return None, "Please upload an image first."

    if OPENAI_API_KEY is None:
        return None, "OpenAI API key not found in environment variables."

    try:
        # Handle different input types
        if isinstance(image, tuple):
            image = image[0]  # Take the first element if it's a tuple
        if isinstance(image, np.ndarray):
            image = Image.fromarray(image)
        if not isinstance(image, Image.Image):
            raise ValueError("Invalid image format")

        # Analyze image
        gpt_response = analyze_image(image)
        response_data = json.loads(gpt_response)

        if response_data["label"].lower() == "surprising" and response_data["element"].lower() != "na":
            result_buf = process_image_detection(image, response_data["element"], response_data["rating"])
            result_image = Image.open(result_buf)
            analysis_text = f"Label: {response_data['label']}\nElement: {response_data['element']}\nRating: {response_data['rating']}/5"
            return result_image, analysis_text
        else:
            return image, "Not Surprising"

    except Exception as e:
        return None, f"Error processing image: {str(e)}"


# Create Gradio interface
def create_interface():
    with gr.Blocks() as demo:
        gr.Markdown("# Image Surprise Analysis")

        with gr.Row():
            with gr.Column():
                input_image = gr.Image(label="Upload Image")
                analyze_btn = gr.Button("Analyze Image")

            with gr.Column():
                output_image = gr.Image(label="Processed Image")
                output_text = gr.Textbox(label="Analysis Results")

        analyze_btn.click(
            fn=process_and_analyze,
            inputs=[input_image],
            outputs=[output_image, output_text]
        )

    return demo


if __name__ == "__main__":
    demo = create_interface()
    demo.launch()