|
import json |
|
import os |
|
from pathlib import Path |
|
|
|
import datasets |
|
from PIL import Image |
|
import jsonlines |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_CITATION = """\ |
|
@misc{chen2024paligemma, |
|
title={PaliGemma Multitask Dataset}, |
|
author={Chen, Xingqiang}, |
|
year={2024}, |
|
publisher={Hugging Face} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This dataset contains images and annotations for defect detection and analysis, |
|
designed for training and evaluating the PaliGemma multitask model. |
|
The dataset includes both basic defect detection samples and a larger set of |
|
874 annotated images from real-world structural inspections. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/xingqiang/paligemma-multitask-dataset" |
|
|
|
class PaligemmaDataset(datasets.GeneratorBasedBuilder): |
|
"""PaliGemma Multitask Dataset for defect detection and analysis.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"image": datasets.Image(), |
|
"boxes": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=4)), |
|
"labels": datasets.Sequence(datasets.ClassLabel(names=["void", "crack"])), |
|
"caption": datasets.Value("string"), |
|
"source": datasets.Value("string"), |
|
}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"split": "val", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, split): |
|
"""生成示例。""" |
|
|
|
unified_path = os.path.join("annotations", f"{split}_unified.json") |
|
if os.path.exists(unified_path): |
|
logger.info(f"使用统一格式的注释文件: {unified_path}") |
|
with open(unified_path, encoding="utf-8") as f: |
|
annotations = json.load(f) |
|
for idx, ann in enumerate(annotations): |
|
image_path = os.path.join("images", split, ann["image_filename"]) |
|
try: |
|
yield f"unified_{idx}", { |
|
"image": image_path, |
|
"boxes": ann["boxes"], |
|
"labels": ann["labels"], |
|
"caption": ann["caption"], |
|
"source": ann.get("source", "unified") |
|
} |
|
except Exception as e: |
|
logger.warning(f"跳过无效图像 {image_path}: {e}") |
|
continue |
|
return |
|
|
|
|
|
|
|
json_path = os.path.join("annotations", f"{split}.json") |
|
if os.path.exists(json_path): |
|
with open(json_path, encoding="utf-8") as f: |
|
annotations = json.load(f) |
|
for idx, ann in enumerate(annotations): |
|
image_path = os.path.join("images", split, ann["image_filename"]) |
|
try: |
|
|
|
yield f"orig_{idx}", { |
|
"image": image_path, |
|
"boxes": ann["boxes"], |
|
"labels": ann["labels"], |
|
"caption": ann["caption"], |
|
"source": "original" |
|
} |
|
except Exception as e: |
|
logger.warning(f"跳过无效图像 {image_path}: {e}") |
|
continue |
|
|
|
|
|
jsonl_path = os.path.join("annotations", f"_annotations.{split}.jsonl") |
|
if os.path.exists(jsonl_path): |
|
with jsonlines.open(jsonl_path) as reader: |
|
for idx, ann in enumerate(reader): |
|
|
|
image_filename = ann.get("image", "") |
|
image_path = os.path.join("images", split, image_filename) |
|
|
|
try: |
|
|
|
|
|
if "annotations" in ann: |
|
|
|
boxes = [[b["x"], b["y"], b["width"], b["height"]] for b in ann["annotations"]] |
|
labels = [0 if b["class"] == "void" else 1 for b in ann["annotations"]] |
|
caption = f"Image contains {len(boxes)} defects: " + \ |
|
", ".join([b["class"] for b in ann["annotations"]]) |
|
else: |
|
|
|
|
|
boxes = [] |
|
labels = [] |
|
if "suffix" in ann: |
|
parts = ann["suffix"].split(";") |
|
for part in parts: |
|
part = part.strip() |
|
if "<loc" in part: |
|
|
|
loc_parts = part.split() |
|
if len(loc_parts) >= 2: |
|
|
|
coords = [] |
|
for loc in loc_parts[0].split("><"): |
|
if loc.startswith("<loc"): |
|
coords.append(int(loc[4:-1]) / 1024) |
|
|
|
if len(coords) == 4: |
|
boxes.append(coords) |
|
label = 0 if "void" in loc_parts[1] else 1 |
|
labels.append(label) |
|
|
|
caption = ann.get("prefix", "") |
|
|
|
|
|
image_exists = False |
|
|
|
|
|
if os.path.exists(f"images/datasets/{image_filename}"): |
|
image_exists = True |
|
|
|
|
|
if not image_exists: |
|
for img_split in ["train", "val", "test"]: |
|
if os.path.exists(f"images/{img_split}/{image_filename}"): |
|
image_exists = True |
|
break |
|
|
|
if not image_exists: |
|
print(f"警告: 图像文件不存在: {image_filename}") |
|
continue |
|
|
|
yield f"p1v1_{idx}", { |
|
"image": image_path, |
|
"boxes": boxes, |
|
"labels": labels, |
|
"caption": caption, |
|
"source": "p1v1" |
|
} |
|
except Exception as e: |
|
logger.warning(f"跳过无效注释 {image_path}: {e}") |
|
continue |
|
|
|
def convert_annotations_to_unified_format(): |
|
"""将所有注释转换为统一格式""" |
|
print("开始转换注释为统一格式...") |
|
|
|
|
|
os.makedirs("annotations", exist_ok=True) |
|
|
|
|
|
for split in ["train", "val", "valid", "test"]: |
|
print(f"处理 {split} 分割...") |
|
unified_annotations = [] |
|
|
|
|
|
json_path = f"annotations/{split}.json" |
|
print(f"检查 JSON 文件: {json_path}") |
|
if os.path.exists(json_path): |
|
print(f"找到 JSON 文件: {json_path}") |
|
with open(json_path, encoding="utf-8") as f: |
|
try: |
|
annotations = json.load(f) |
|
print(f"从 {json_path} 加载了 {len(annotations)} 条注释") |
|
for ann in annotations: |
|
unified_annotations.append({ |
|
"image_filename": ann["image_filename"], |
|
"boxes": ann["boxes"], |
|
"labels": ann["labels"], |
|
"caption": ann["caption"], |
|
"source": "original" |
|
}) |
|
except json.JSONDecodeError: |
|
print(f"错误: {json_path} 不是有效的 JSON 文件") |
|
else: |
|
print(f"未找到 JSON 文件: {json_path}") |
|
|
|
|
|
jsonl_variants = [ |
|
f"_annotations.{split}.jsonl", |
|
f"_annotations.{split}1.jsonl", |
|
f"p-1.v1i.paligemma/_annotations.{split}.jsonl", |
|
f"p-1.v1i.paligemma/_annotations.{split}1.jsonl" |
|
] |
|
|
|
for jsonl_variant in jsonl_variants: |
|
jsonl_path = f"annotations/{jsonl_variant}" |
|
print(f"检查 JSONL 文件: {jsonl_path}") |
|
if os.path.exists(jsonl_path): |
|
print(f"找到 JSONL 文件: {jsonl_path}") |
|
annotation_count = 0 |
|
with open(jsonl_path, encoding="utf-8") as f: |
|
for line_num, line in enumerate(f, 1): |
|
try: |
|
line = line.strip() |
|
if not line: |
|
print(f"跳过第 {line_num} 行: 空行") |
|
continue |
|
|
|
ann = json.loads(line) |
|
image_filename = ann.get("image", "") |
|
|
|
if not image_filename: |
|
print(f"跳过第 {line_num} 行: 没有图像文件名") |
|
continue |
|
|
|
|
|
image_exists = False |
|
|
|
|
|
if os.path.exists(f"images/datasets/{image_filename}"): |
|
image_exists = True |
|
|
|
|
|
if not image_exists: |
|
for img_split in ["train", "val", "test"]: |
|
if os.path.exists(f"images/{img_split}/{image_filename}"): |
|
image_exists = True |
|
break |
|
|
|
if not image_exists: |
|
print(f"警告: 图像文件不存在: {image_filename}") |
|
continue |
|
|
|
|
|
if "annotations" in ann: |
|
|
|
boxes = [[b["x"], b["y"], b["width"], b["height"]] for b in ann["annotations"]] |
|
labels = [0 if b["class"] == "void" else 1 for b in ann["annotations"]] |
|
caption = f"Image contains {len(boxes)} defects: " + \ |
|
", ".join([b["class"] for b in ann["annotations"]]) |
|
else: |
|
|
|
boxes = [] |
|
labels = [] |
|
caption = ann.get("prefix", "") |
|
|
|
if "suffix" in ann: |
|
parts = ann["suffix"].split(";") |
|
for part in parts: |
|
part = part.strip() |
|
if "<loc" in part: |
|
|
|
loc_parts = part.split() |
|
if len(loc_parts) >= 2: |
|
|
|
coords = [] |
|
for loc in loc_parts[0].split("><"): |
|
if loc.startswith("<loc"): |
|
try: |
|
coords.append(int(loc[4:-1]) / 1024) |
|
except ValueError: |
|
continue |
|
|
|
if len(coords) == 4: |
|
boxes.append(coords) |
|
label = 0 if "void" in loc_parts[1] else 1 |
|
labels.append(label) |
|
|
|
unified_annotations.append({ |
|
"image_filename": image_filename, |
|
"boxes": boxes, |
|
"labels": labels, |
|
"caption": caption, |
|
"source": "p1v1" |
|
}) |
|
annotation_count += 1 |
|
except json.JSONDecodeError as e: |
|
print(f"警告: {jsonl_path} 第 {line_num} 行不是有效的 JSON: {e}") |
|
continue |
|
print(f"从 {jsonl_path} 加载了 {annotation_count} 条注释") |
|
else: |
|
print(f"未找到 JSONL 文件: {jsonl_path}") |
|
|
|
|
|
if split == "valid" and os.path.exists(f"annotations/val_unified.json"): |
|
try: |
|
with open(f"annotations/val_unified.json", "r", encoding="utf-8") as f: |
|
val_annotations = json.load(f) |
|
unified_annotations.extend(val_annotations) |
|
print(f"将valid分割与val分割合并,共 {len(unified_annotations)} 条记录") |
|
except Exception as e: |
|
print(f"合并valid和val分割时出错: {e}") |
|
|
|
|
|
if unified_annotations: |
|
|
|
save_split = "val" if split == "valid" else split |
|
print(f"为 {save_split} 创建统一格式注释,共 {len(unified_annotations)} 条记录") |
|
unified_path = f"annotations/{save_split}_unified.json" |
|
with open(unified_path, "w", encoding="utf-8") as f: |
|
json.dump(unified_annotations, f, ensure_ascii=False, indent=2) |
|
print(f"已保存统一格式注释到: {unified_path}") |
|
else: |
|
print(f"警告: {split} 没有有效的注释,跳过创建统一格式文件") |