Datasets:
Updated Readme
Browse files
README.md
CHANGED
@@ -49,10 +49,199 @@ dataset = load_dataset("VLR-CVC/ComPAP", skill, split=split)
|
|
49 |
```
|
50 |
|
51 |
<details>
|
|
|
52 |
<summary>Map to single images</summary>
|
|
|
53 |
If your model can only process single images, you can render each sample as a single image:
|
54 |
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
</details>
|
58 |
|
|
|
49 |
```
|
50 |
|
51 |
<details>
|
52 |
+
|
53 |
<summary>Map to single images</summary>
|
54 |
+
|
55 |
If your model can only process single images, you can render each sample as a single image:
|
56 |
|
57 |
+

|
58 |
+
|
59 |
+
|
60 |
+
```python
|
61 |
+
from PIL import Image, ImageDraw, ImageFont
|
62 |
+
import numpy as np
|
63 |
+
from datasets import Features, Value, Image as ImageFeature
|
64 |
+
|
65 |
+
class SingleImagePickAPanel:
|
66 |
+
def __init__(self, max_size=500, margin=10, label_space=20, font_path="Arial.ttf"):
|
67 |
+
self.max_size = max_size
|
68 |
+
self.margin = margin
|
69 |
+
self.label_space = label_space
|
70 |
+
# Add separate font sizes
|
71 |
+
self.label_font_size = 20
|
72 |
+
self.number_font_size = 24
|
73 |
+
|
74 |
+
self.font_path = font_path
|
75 |
+
|
76 |
+
def resize_image(self, img):
|
77 |
+
"""Resize image keeping aspect ratio if longest edge > max_size"""
|
78 |
+
if max(img.size) > self.max_size:
|
79 |
+
ratio = self.max_size / max(img.size)
|
80 |
+
new_size = tuple(int(dim * ratio) for dim in img.size)
|
81 |
+
return img.resize(new_size, Image.Resampling.LANCZOS)
|
82 |
+
return img
|
83 |
+
|
84 |
+
def create_mask_panel(self, width, height):
|
85 |
+
"""Create a question mark panel"""
|
86 |
+
mask_panel = Image.new("RGB", (width, height), (200, 200, 200))
|
87 |
+
draw = ImageDraw.Draw(mask_panel)
|
88 |
+
font_size = int(height * 0.8)
|
89 |
+
try:
|
90 |
+
font = ImageFont.truetype(self.font_path, font_size)
|
91 |
+
except:
|
92 |
+
raise ValueError("Font file not found")
|
93 |
+
|
94 |
+
text = "?"
|
95 |
+
bbox = draw.textbbox((0, 0), text, font=font)
|
96 |
+
text_x = (width - (bbox[2] - bbox[0])) // 2
|
97 |
+
text_y = (height - (bbox[3] - bbox[1])) // 2
|
98 |
+
draw.text((text_x, text_y), text, fill="black", font=font)
|
99 |
+
return mask_panel
|
100 |
+
|
101 |
+
def draw_number_on_panel(self, panel, number, font):
|
102 |
+
"""Draw number on the bottom of the panel with background"""
|
103 |
+
draw = ImageDraw.Draw(panel)
|
104 |
+
|
105 |
+
# Get text size
|
106 |
+
bbox = draw.textbbox((0, 0), str(number), font=font)
|
107 |
+
text_width = bbox[2] - bbox[0]
|
108 |
+
text_height = bbox[3] - bbox[1]
|
109 |
+
|
110 |
+
# Calculate position (bottom-right corner)
|
111 |
+
padding = 2
|
112 |
+
text_x = panel.size[0] - text_width - padding
|
113 |
+
text_y = panel.size[1] - text_height - padding
|
114 |
+
|
115 |
+
# Draw semi-transparent background
|
116 |
+
bg_rect = [(text_x - padding, text_y - padding),
|
117 |
+
(text_x + text_width + padding, text_y + text_height + padding)]
|
118 |
+
draw.rectangle(bg_rect, fill=(255, 255, 255, 180))
|
119 |
+
|
120 |
+
# Draw text
|
121 |
+
draw.text((text_x, text_y), str(number), fill="black", font=font)
|
122 |
+
return panel
|
123 |
+
|
124 |
+
def map_to_single_image(self, examples):
|
125 |
+
"""Process a batch of examples from a HuggingFace dataset"""
|
126 |
+
single_images = []
|
127 |
+
|
128 |
+
for i in range(len(examples['sample_id'])):
|
129 |
+
# Get context and options for current example
|
130 |
+
context = examples['context'][i] if len(examples['context'][i]) > 0 else []
|
131 |
+
options = examples['options'][i]
|
132 |
+
|
133 |
+
# Resize all images
|
134 |
+
context = [self.resize_image(img) for img in context]
|
135 |
+
options = [self.resize_image(img) for img in options]
|
136 |
+
|
137 |
+
# Calculate common panel size (use median size to avoid outliers)
|
138 |
+
all_panels = context + options
|
139 |
+
if len(all_panels) > 0:
|
140 |
+
widths = [img.size[0] for img in all_panels]
|
141 |
+
heights = [img.size[1] for img in all_panels]
|
142 |
+
panel_width = int(np.median(widths))
|
143 |
+
panel_height = int(np.median(heights))
|
144 |
+
|
145 |
+
# Resize all panels to common size
|
146 |
+
context = [img.resize((panel_width, panel_height)) for img in context]
|
147 |
+
options = [img.resize((panel_width, panel_height)) for img in options]
|
148 |
+
|
149 |
+
# Create mask panel for sequence filling tasks if needed
|
150 |
+
if 'index' in examples and len(context) > 0:
|
151 |
+
mask_idx = examples['index'][i]
|
152 |
+
mask_panel = self.create_mask_panel(panel_width, panel_height)
|
153 |
+
context.insert(mask_idx, mask_panel)
|
154 |
+
|
155 |
+
# Calculate canvas dimensions based on whether we have context
|
156 |
+
if len(context) > 0:
|
157 |
+
context_row_width = panel_width * len(context) + self.margin * (len(context) - 1)
|
158 |
+
options_row_width = panel_width * len(options) + self.margin * (len(options) - 1)
|
159 |
+
canvas_width = max(context_row_width, options_row_width)
|
160 |
+
canvas_height = (panel_height * 2 +
|
161 |
+
self.label_space * 2)
|
162 |
+
else:
|
163 |
+
# Only options row for caption_relevance
|
164 |
+
canvas_width = panel_width * len(options) + self.margin * (len(options) - 1)
|
165 |
+
canvas_height = (panel_height +
|
166 |
+
self.label_space)
|
167 |
+
|
168 |
+
# Create canvas
|
169 |
+
final_image = Image.new("RGB", (canvas_width, canvas_height), "white")
|
170 |
+
draw = ImageDraw.Draw(final_image)
|
171 |
+
|
172 |
+
try:
|
173 |
+
label_font = ImageFont.truetype(self.font_path, self.label_font_size)
|
174 |
+
number_font = ImageFont.truetype(self.font_path, self.number_font_size)
|
175 |
+
except:
|
176 |
+
raise ValueError("Font file not found")
|
177 |
+
|
178 |
+
current_y = 0
|
179 |
+
|
180 |
+
# Add context section if it exists
|
181 |
+
if len(context) > 0:
|
182 |
+
# Draw "Context" label
|
183 |
+
bbox = draw.textbbox((0, 0), "Context", font=label_font)
|
184 |
+
text_x = (canvas_width - (bbox[2] - bbox[0])) // 2
|
185 |
+
draw.text((text_x, current_y), "Context", fill="black", font=label_font)
|
186 |
+
current_y += self.label_space
|
187 |
+
|
188 |
+
# Paste context panels
|
189 |
+
x_offset = (canvas_width - (panel_width * len(context) +
|
190 |
+
self.margin * (len(context) - 1))) // 2
|
191 |
+
for panel in context:
|
192 |
+
final_image.paste(panel, (x_offset, current_y))
|
193 |
+
x_offset += panel_width + self.margin
|
194 |
+
current_y += panel_height
|
195 |
+
|
196 |
+
# Add "Options" label
|
197 |
+
bbox = draw.textbbox((0, 0), "Options", font=label_font)
|
198 |
+
text_x = (canvas_width - (bbox[2] - bbox[0])) // 2
|
199 |
+
draw.text((text_x, current_y), "Options", fill="black", font=label_font)
|
200 |
+
current_y += self.label_space
|
201 |
+
|
202 |
+
# Paste options with numbers on panels
|
203 |
+
x_offset = (canvas_width - (panel_width * len(options) +
|
204 |
+
self.margin * (len(options) - 1))) // 2
|
205 |
+
for idx, panel in enumerate(options):
|
206 |
+
# Create a copy of the panel to draw on
|
207 |
+
panel_with_number = panel.copy()
|
208 |
+
if panel_with_number.mode != 'RGBA':
|
209 |
+
panel_with_number = panel_with_number.convert('RGBA')
|
210 |
+
|
211 |
+
# Draw number on panel
|
212 |
+
panel_with_number = self.draw_number_on_panel(
|
213 |
+
panel_with_number,
|
214 |
+
idx,
|
215 |
+
number_font
|
216 |
+
)
|
217 |
+
|
218 |
+
# Paste the panel with number
|
219 |
+
final_image.paste(panel_with_number, (x_offset, current_y), panel_with_number)
|
220 |
+
x_offset += panel_width + self.margin
|
221 |
+
|
222 |
+
# Convert final_image to PIL Image format (instead of numpy array)
|
223 |
+
single_images.append(final_image)
|
224 |
+
|
225 |
+
# Prepare batch output
|
226 |
+
examples['single_image'] = single_images
|
227 |
+
|
228 |
+
return examples
|
229 |
+
|
230 |
+
from datasets import load_dataset
|
231 |
+
|
232 |
+
skill = "sequence_filling" # "sequence_filling", "char_coherence", "visual_closure", "text_closure", "caption_relevance"
|
233 |
+
split = "val" # "val", "test"
|
234 |
+
dataset = load_dataset("VLR-CVC/ComPAP", skill, split=split)
|
235 |
+
|
236 |
+
processor = SingleImagePickAPanel()
|
237 |
+
dataset = dataset.map(
|
238 |
+
processor.map_to_single_image,
|
239 |
+
batched=True,
|
240 |
+
batch_size=32,
|
241 |
+
remove_columns=['context', 'options']
|
242 |
+
)
|
243 |
+
dataset.save_to_disk(f"ComPAP_{skill}_{split}_single_images")
|
244 |
+
```
|
245 |
|
246 |
</details>
|
247 |
|