File size: 4,956 Bytes
3fb3fe3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Processing Samples: 100%|ββββββββββ| 4931/4931 [6:09:48<00:00, 4.50s/it] "
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Dataset preparation complete.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n"
]
}
],
"source": [
"import os\n",
"import json\n",
"import requests\n",
"from tqdm import tqdm\n",
"\n",
"# Load the JSON file\n",
"json_file_path = \"Train-v2.json\" # Update this with the actual file path\n",
"with open(json_file_path, 'r') as f:\n",
" data = json.load(f)\n",
"\n",
"# Create directories for images, labels, and segments\n",
"images_dir = \"images/train\"\n",
"labels_dir = \"labels/train\"\n",
"segments_dir = \"segments/train\"\n",
"os.makedirs(images_dir, exist_ok=True)\n",
"os.makedirs(labels_dir, exist_ok=True)\n",
"os.makedirs(segments_dir, exist_ok=True)\n",
"\n",
"# Helper function to download images or segmentations\n",
"def download_image(url, output_path):\n",
" response = requests.get(url)\n",
" if response.status_code == 200:\n",
" with open(output_path, 'wb') as f:\n",
" f.write(response.content)\n",
"\n",
"# Prepare YOLO labels\n",
"category_id_mapping = {cat['id']: i-1 for i, cat in enumerate(data['dataset']['task_attributes']['categories'], 1)} # Mapping category_id to YOLO class id\n",
"category_names = {i-1: cat['name'] for i, cat in enumerate(data['dataset']['task_attributes']['categories'], 1)} # YOLO class names\n",
"\n",
"# Iterate through the dataset samples\n",
"for sample in tqdm(data['dataset']['samples'], desc=\"Processing Samples\"):\n",
" label_status = sample['labels'].get('ground-truth', {}).get('label_status', 'SKIPPED')\n",
" \n",
" if label_status == \"LABELED\":\n",
" image_url = sample['attributes']['image']['url']\n",
" image_name = sample['name']\n",
" image_output_path = os.path.join(images_dir, image_name)\n",
"\n",
" # Download the image\n",
" download_image(image_url, image_output_path)\n",
"\n",
" # Download segmentation bitmap (if any)\n",
" segmentation_url = sample['labels']['ground-truth']['attributes'].get('segmentation_bitmap', {}).get('url')\n",
" if segmentation_url:\n",
" seg_output_path = os.path.join(segments_dir, image_name)\n",
" download_image(segmentation_url, seg_output_path)\n",
"\n",
" # Write annotations to label files in YOLO format\n",
" label_output_path = os.path.join(labels_dir, os.path.splitext(image_name)[0] + \".txt\")\n",
" annotations = sample['labels']['ground-truth']['attributes']['annotations']\n",
"\n",
" with open(label_output_path, 'w') as f:\n",
" for annotation in annotations:\n",
" category_id = annotation['category_id']\n",
" yolo_class_id = category_id_mapping[category_id]\n",
" # YOLO format typically expects: class_id, x_center, y_center, width, height\n",
" # Since we have segmentation, we may only record class_id here for simplicity.\n",
" # Modify this part to handle polygon or bounding box coordinates if necessary.\n",
" f.write(f\"{yolo_class_id}\\n\")\n",
"\n",
"# Create YAML file for YOLO segmentation\n",
"yaml_content = {\n",
" 'path': '.', # Root path\n",
" 'train': 'images/train', # Path to training images\n",
" 'val': '', # No validation set\n",
" 'names': category_names\n",
"}\n",
"\n",
"yaml_file_path = \"dataset.yaml\"\n",
"with open(yaml_file_path, 'w') as yaml_file:\n",
" yaml_file.write(f\"path: {yaml_content['path']}\\n\")\n",
" yaml_file.write(f\"train: {yaml_content['train']}\\n\")\n",
" yaml_file.write(f\"val: {yaml_content['val']}\\n\")\n",
" yaml_file.write(\"names:\\n\")\n",
" for i, name in yaml_content['names'].items():\n",
" yaml_file.write(f\" {i}: {name}\\n\")\n",
"\n",
"print(\"Dataset preparation complete.\")\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "sgrs",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|