|
import json |
|
import os |
|
import datasets |
|
|
|
import jsonlines |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_CITATION = """\ |
|
@misc{chen2024gpradar, |
|
title={GPRadar-Defect-MultiTask Dataset}, |
|
author={Chen, Xingqiang}, |
|
year={2024}, |
|
publisher={Hugging Face} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
GPRadar-Defect-MultiTask Dataset |
|
|
|
This dataset contains ground penetrating radar (GPR) images and annotations for defect detection and analysis, |
|
designed for training and evaluating multimodal models for GPR defect detection. |
|
The dataset includes both basic defect detection samples and a larger set of |
|
874 annotated images from real-world structural inspections focusing on voids and cracks. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/xingqiang/GPRadar-Defect-MultiTask" |
|
|
|
class PaligemmaDataset(datasets.GeneratorBasedBuilder): |
|
"""GPRadar-Defect-MultiTask Dataset for GPR defect detection and analysis.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"image": datasets.Image(), |
|
"boxes": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=4)), |
|
"labels": datasets.Sequence(datasets.ClassLabel(names=["void", "crack"])), |
|
"caption": datasets.Value("string"), |
|
}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"split": "val", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, split): |
|
"""Yields examples.""" |
|
|
|
annotation_file = f"annotations/{split}_unified.json" |
|
|
|
if not os.path.exists(annotation_file): |
|
|
|
convert_annotations_to_unified_format() |
|
|
|
|
|
if not os.path.exists(annotation_file): |
|
logger.warning(f"找不到统一格式注释文件: {annotation_file},将返回空数据") |
|
return |
|
|
|
|
|
with open(annotation_file, "r", encoding="utf-8") as f: |
|
annotations = json.load(f) |
|
|
|
for idx, ann in enumerate(annotations): |
|
|
|
image_found = False |
|
image_filename = ann["image_filename"] |
|
|
|
for image_path in [ |
|
f"images/{split}/{image_filename}", |
|
f"images/datasets/{image_filename}", |
|
f"images/{image_filename}", |
|
]: |
|
if os.path.exists(image_path): |
|
yield idx, { |
|
"image": image_path, |
|
"boxes": ann["boxes"], |
|
"labels": ann["labels"], |
|
"caption": ann["caption"], |
|
} |
|
image_found = True |
|
break |
|
|
|
if not image_found: |
|
logger.warning(f"找不到图像文件: {image_filename},跳过该示例") |
|
|
|
|
|
def normalize_image_path(image_path): |
|
"""规范化图像路径,移除多余的前缀""" |
|
|
|
if "p-1.v1i.paligemma-multimodal/dataset/" in image_path: |
|
return image_path.split("p-1.v1i.paligemma-multimodal/dataset/")[-1] |
|
return image_path |
|
|
|
|
|
def convert_annotations_to_unified_format(): |
|
"""将所有注释转换为统一格式""" |
|
print("开始转换注释为统一格式...") |
|
|
|
|
|
os.makedirs("annotations", exist_ok=True) |
|
|
|
|
|
for split in ["train", "val", "valid", "test"]: |
|
print(f"处理 {split} 分割...") |
|
unified_annotations = [] |
|
|
|
|
|
json_path = f"annotations/{split}.json" |
|
print(f"检查 JSON 文件: {json_path}") |
|
if os.path.exists(json_path): |
|
print(f"找到 JSON 文件: {json_path}") |
|
with open(json_path, encoding="utf-8") as f: |
|
try: |
|
annotations = json.load(f) |
|
print(f"从 {json_path} 加载了 {len(annotations)} 条注释") |
|
for ann in annotations: |
|
unified_annotations.append({ |
|
"image_filename": ann["image_filename"], |
|
"boxes": ann["boxes"], |
|
"labels": ann["labels"], |
|
"caption": ann["caption"], |
|
"source": "original" |
|
}) |
|
except json.JSONDecodeError: |
|
print(f"错误: {json_path} 不是有效的 JSON 文件") |
|
else: |
|
print(f"未找到 JSON 文件: {json_path}") |
|
|
|
|
|
|
|
jsonl_files_to_check = [ |
|
f"_annotations.{split}.jsonl", |
|
f"_annotations.{split}1.jsonl" |
|
] |
|
|
|
|
|
for root, dirs, files in os.walk("annotations"): |
|
for file in files: |
|
if file.endswith(f"{split}.jsonl") or file.endswith(f"{split}1.jsonl") or file.endswith(f"{split}2.jsonl"): |
|
rel_path = os.path.relpath(os.path.join(root, file), "annotations") |
|
if rel_path != file: |
|
jsonl_files_to_check.append(rel_path) |
|
|
|
|
|
for jsonl_path in jsonl_files_to_check: |
|
full_path = os.path.join("annotations", jsonl_path) |
|
print(f"检查 JSONL 文件: {full_path}") |
|
if os.path.exists(full_path): |
|
print(f"找到 JSONL 文件: {full_path}") |
|
annotation_count = 0 |
|
with open(full_path, encoding="utf-8") as f: |
|
for line_num, line in enumerate(f, 1): |
|
try: |
|
line = line.strip() |
|
if not line: |
|
print(f"跳过第 {line_num} 行: 空行") |
|
continue |
|
|
|
ann = json.loads(line) |
|
image_filename = ann.get("image", "") |
|
|
|
if not image_filename: |
|
print(f"跳过第 {line_num} 行: 没有图像文件名") |
|
continue |
|
|
|
|
|
image_filename = normalize_image_path(image_filename) |
|
|
|
|
|
image_exists = False |
|
possible_image_paths = [ |
|
f"images/datasets/{image_filename}", |
|
f"images/train/{image_filename}", |
|
f"images/val/{image_filename}", |
|
f"images/test/{image_filename}", |
|
f"images/{image_filename}" |
|
] |
|
|
|
for img_path in possible_image_paths: |
|
if os.path.exists(img_path): |
|
image_exists = True |
|
break |
|
|
|
if not image_exists: |
|
print(f"警告: 图像文件不存在: {image_filename}") |
|
continue |
|
|
|
|
|
if "annotations" in ann: |
|
|
|
boxes = [[b["x"], b["y"], b["width"], b["height"]] for b in ann["annotations"]] |
|
labels = [0 if b["class"] == "void" else 1 for b in ann["annotations"]] |
|
caption = f"Image contains {len(boxes)} defects: " + \ |
|
", ".join([b["class"] for b in ann["annotations"]]) |
|
else: |
|
|
|
boxes = [] |
|
labels = [] |
|
caption = ann.get("prefix", "") |
|
|
|
if "suffix" in ann: |
|
parts = ann["suffix"].split() |
|
for i, part in enumerate(parts): |
|
if "<loc" in part: |
|
|
|
coords = [] |
|
loc_str = part |
|
while loc_str.startswith("<loc") and len(coords) < 4: |
|
try: |
|
|
|
coord_value = int(loc_str[4:loc_str.find(">")]) |
|
coords.append(coord_value / 1024) |
|
|
|
loc_str = loc_str[loc_str.find(">")+1:] |
|
except (ValueError, IndexError): |
|
break |
|
|
|
if len(coords) == 4: |
|
boxes.append(coords) |
|
|
|
label_idx = 1 |
|
while i + label_idx < len(parts) and not parts[i + label_idx].startswith("<loc"): |
|
label_text = parts[i + label_idx] |
|
if "void" in label_text: |
|
labels.append(0) |
|
break |
|
elif "crack" in label_text: |
|
labels.append(1) |
|
break |
|
label_idx += 1 |
|
|
|
|
|
if len(labels) < len(boxes): |
|
labels.append(0) |
|
|
|
unified_annotations.append({ |
|
"image_filename": image_filename, |
|
"boxes": boxes, |
|
"labels": labels, |
|
"caption": caption, |
|
"source": "p1v1" |
|
}) |
|
annotation_count += 1 |
|
except json.JSONDecodeError as e: |
|
print(f"警告: {full_path} 第 {line_num} 行不是有效的 JSON: {e}") |
|
continue |
|
print(f"从 {full_path} 加载了 {annotation_count} 条注释") |
|
else: |
|
print(f"未找到 JSONL 文件: {full_path}") |
|
|
|
|
|
if split == "valid": |
|
val_annotations = [] |
|
if os.path.exists(f"annotations/val_unified.json"): |
|
try: |
|
with open(f"annotations/val_unified.json", "r", encoding="utf-8") as f: |
|
val_annotations = json.load(f) |
|
print(f"加载现有val分割注释,共 {len(val_annotations)} 条记录") |
|
|
|
|
|
existing_filenames = {ann["image_filename"] for ann in val_annotations} |
|
for ann in unified_annotations: |
|
if ann["image_filename"] not in existing_filenames: |
|
val_annotations.append(ann) |
|
existing_filenames.add(ann["image_filename"]) |
|
|
|
print(f"将valid分割与val分割合并,共 {len(val_annotations)} 条记录") |
|
unified_annotations = val_annotations |
|
except Exception as e: |
|
print(f"合并valid和val分割时出错: {e}") |
|
|
|
|
|
if unified_annotations: |
|
|
|
save_split = "val" if split == "valid" else split |
|
print(f"为 {save_split} 创建统一格式注释,共 {len(unified_annotations)} 条记录") |
|
unified_path = f"annotations/{save_split}_unified.json" |
|
with open(unified_path, "w", encoding="utf-8") as f: |
|
json.dump(unified_annotations, f, ensure_ascii=False, indent=2) |
|
print(f"已保存统一格式注释到: {unified_path}") |
|
else: |
|
print(f"警告: {split} 没有有效的注释,跳过创建统一格式文件") |