import json import os from pathlib import Path import datasets from PIL import Image import jsonlines logger = datasets.logging.get_logger(__name__) _CITATION = """\ @misc{chen2024paligemma, title={PaliGemma Multitask Dataset}, author={Chen, Xingqiang}, year={2024}, publisher={Hugging Face} } """ _DESCRIPTION = """\ This dataset contains images and annotations for defect detection and analysis, designed for training and evaluating the PaliGemma multitask model. The dataset includes both basic defect detection samples and a larger set of 874 annotated images from real-world structural inspections. """ _HOMEPAGE = "https://huggingface.co/datasets/xingqiang/paligemma-multitask-dataset" class PaligemmaDataset(datasets.GeneratorBasedBuilder): """PaliGemma Multitask Dataset for defect detection and analysis.""" VERSION = datasets.Version("1.1.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features({ "image": datasets.Image(), "boxes": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=4)), "labels": datasets.Sequence(datasets.ClassLabel(names=["void", "crack"])), "caption": datasets.Value("string"), "source": datasets.Value("string"), }), supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "split": "val", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "split": "test", }, ), ] def _generate_examples(self, split): """生成示例。""" # 优先加载统一格式的注释 unified_path = os.path.join("annotations", f"{split}_unified.json") if os.path.exists(unified_path): logger.info(f"使用统一格式的注释文件: {unified_path}") with open(unified_path, encoding="utf-8") as f: annotations = json.load(f) for idx, ann in enumerate(annotations): image_path = os.path.join("images", split, ann["image_filename"]) try: yield f"unified_{idx}", { "image": image_path, "boxes": ann["boxes"], "labels": ann["labels"], "caption": ann["caption"], "source": ann.get("source", "unified") } except Exception as e: logger.warning(f"跳过无效图像 {image_path}: {e}") continue return # 如果使用了统一格式,不再处理其他格式 # 如果没有统一格式,则回退到原始格式 # 加载原始 JSON 注释 json_path = os.path.join("annotations", f"{split}.json") if os.path.exists(json_path): with open(json_path, encoding="utf-8") as f: annotations = json.load(f) for idx, ann in enumerate(annotations): image_path = os.path.join("images", split, ann["image_filename"]) try: # 不要尝试在这里打开图像,只返回路径 yield f"orig_{idx}", { "image": image_path, "boxes": ann["boxes"], "labels": ann["labels"], "caption": ann["caption"], "source": "original" } except Exception as e: logger.warning(f"跳过无效图像 {image_path}: {e}") continue # 加载 JSONL 注释 jsonl_path = os.path.join("annotations", f"_annotations.{split}.jsonl") if os.path.exists(jsonl_path): with jsonlines.open(jsonl_path) as reader: for idx, ann in enumerate(reader): # 确保使用正确的图像文件名 image_filename = ann.get("image", "") image_path = os.path.join("images", split, image_filename) try: # 不要尝试在这里打开图像,只返回路径 # 转换注释为我们的格式 if "annotations" in ann: # 处理新格式 boxes = [[b["x"], b["y"], b["width"], b["height"]] for b in ann["annotations"]] labels = [0 if b["class"] == "void" else 1 for b in ann["annotations"]] caption = f"Image contains {len(boxes)} defects: " + \ ", ".join([b["class"] for b in ann["annotations"]]) else: # 处理旧格式 (prefix/suffix) # 这里需要解析 suffix 中的位置信息 boxes = [] labels = [] if "suffix" in ann: parts = ann["suffix"].split(";") for part in parts: part = part.strip() if "= 2: # 提取坐标 coords = [] for loc in loc_parts[0].split("><"): if loc.startswith("= 2: # 提取坐标 coords = [] for loc in loc_parts[0].split("><"): if loc.startswith("