File size: 11,948 Bytes
f31484b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e55761e
 
998873a
f31484b
 
9fbbef8
f31484b
9fbbef8
f31484b
9fbbef8
998873a
f31484b
 
 
 
 
 
 
 
 
 
 
 
 
e55761e
f31484b
 
 
 
 
 
 
 
 
 
 
 
 
 
e55761e
f31484b
 
 
 
e55761e
f31484b
9fbbef8
f31484b
9fbbef8
f31484b
9fbbef8
f31484b
 
 
 
 
e55761e
 
 
f31484b
e55761e
 
f31484b
 
e55761e
f31484b
 
e55761e
f31484b
998873a
f31484b
e55761e
f31484b
 
 
e55761e
 
f31484b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9fbbef8
f31484b
 
9fbbef8
f31484b
 
9fbbef8
f31484b
 
 
 
 
 
998873a
f31484b
 
 
 
 
 
 
 
 
9fbbef8
f31484b
 
 
 
 
998873a
f31484b
 
 
 
 
 
998873a
f31484b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9fbbef8
f31484b
 
 
 
 
 
 
 
 
 
 
 
 
 
9fbbef8
f31484b
 
9fbbef8
f31484b
 
 
 
 
 
 
9fbbef8
f31484b
 
9fbbef8
f31484b
 
 
 
 
 
9fbbef8
f31484b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Importing all required libraries\n",
    "\n",
    "# these are needed for path processing \n",
    "import os\n",
    "import pathlib as pl\n",
    "\n",
    "#image processing and display\n",
    "import numpy as np\n",
    "import PIL\n",
    "import PIL.Image as Image\n",
    "import PIL.ImageDraw as ImageDraw\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "#these are needed for data processing\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "UsageError: Line magic function `%%script` not found.\n"
     ]
    }
   ],
   "source": [
    "# This is the first step of the process. Once you have the images and csvs organized in folders with their names, you need to create the offset file that contains the offset. This code creates the offset file if it doesn't exist\n",
    "\n",
    "testset = os.listdir(\"secondleg\")[8]\n",
    "print(testset)\n",
    "tiff = Image.open(pl.Path(\n",
    "    rf'.\\secondleg\\{testset}\\{testset}.tiff')) \n",
    "csv = pd.read_csv(pl.Path(\n",
    "    rf'.\\secondleg\\{testset}\\{testset}.csv')) \n",
    "with open(pl.Path( \n",
    "    rf'.\\secondleg\\{testset}\\offset.txt'),\"+x\") as f: \n",
    "    offset = f.read()  \n",
    "    if offset != '':\n",
    "        offset = int(offset)\n",
    "    else:\n",
    "        offset = 0\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# This is a helper method for chopping up a large glacial scope image into smaller chunks with a width of parameter length and a certain amount of overlap\n",
    "# Length is the length of the desired chunk, overlap is how much overlap there should be\n",
    "def window_with_remainder(length, overlap, input_size):\n",
    "    testarray = np.arange(0, input_size)\n",
    "    return np.vstack((testarray[0:length], np.lib.stride_tricks.sliding_window_view(testarray[len(testarray) % length:], length)[::overlap]))[:, [0, -1]] + [0, 1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# This code draws a rectangle from (40,0) to (100, y_surface) in green, and from (40, y_surface) to (100, y_bed) in white.\n",
    "# The y_surface and y_bed variables are read from the csv file, and the csv file is read in as a pandas dataframe.\n",
    "# The first 5 rows of the csv file are also printed.\n",
    "# this is done to help calibrate the offsets by allowing the user to manually calibrate the offset for an image and move through the dataset through altering the listdir line through changing the index\n",
    "\n",
    "testset =  os.listdir(\"secondleg\")[10]\n",
    "print(testset)\n",
    "\n",
    "# opens the images, csvs and offset files and reads the needed data\n",
    "tiff = Image.open(pl.Path(\n",
    "    rf'.\\secondleg\\{testset}\\{testset}.tiff'))\n",
    "csv = pd.read_csv(pl.Path(\n",
    "    rf'.\\secondleg\\{testset}\\{testset}.csv'))\n",
    "with open(pl.Path(\n",
    "        rf'.\\secondleg\\{testset}\\offset.txt')) as f:\n",
    "    offset = f.read()\n",
    "    if offset == \"\":\n",
    "        offset = 0\n",
    "    else:\n",
    "        offset = int(offset)\n",
    "\n",
    "# prints the current offset\n",
    "\n",
    "print(offset)\n",
    "\n",
    "# There is no need to open up the entire image, so we make a copy and chop it up\n",
    "img = tiff.copy()\n",
    "img = img.crop((0,430,img.size[0],1790)) \n",
    "\n",
    "print(csv.head()) # prints first 5 rows of csv file\n",
    "csv = csv[[\"x_surface\", \"y_surface\", \"x_bed\", \"y_bed\"]]+offset\n",
    "# the CSV is backwards, so i am accouting for this and getting up the first mask data point\n",
    "line = csv.iloc[-1] # gets last row of csv file\n",
    "print(csv.head()) # prints first 5 rows of csv file to make sure that the offeset was applied properly\n",
    "\n",
    "# creates the image masks and shows the image for calibration\n",
    "draw = ImageDraw.Draw(img)\n",
    "draw.rectangle([(40, 0), (100, line[\"y_surface\"])], fill=\"green\") # draws rectangle from (40,0) to (100, y_surface) in green\n",
    "draw.rectangle([(40, line[\"y_surface\"]),\n",
    "            (100, line[\"y_bed\"])], fill=\"white\") # draws rectangle from (40, y_surface) to (100, y_bed) in white\n",
    "img.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# This code draws the segmentation masks for each scope from the csv file and saves them\n",
    "\n",
    "# Loop over all the files in the \"secondleg\" directory\n",
    "for testset in os.listdir(\"secondleg\"):\n",
    "    # Print the name of the current file\n",
    "    print(testset)\n",
    "\n",
    "    tiff = Image.open(pl.Path(\n",
    "        rf'.\\secondleg\\{testset}\\{testset}.tiff'))\n",
    "\n",
    "    csv = pd.read_csv(pl.Path(\n",
    "        rf'.\\secondleg\\{testset}\\{testset}.csv'))\n",
    "\n",
    "    with open(pl.Path(\n",
    "            rf'.\\secondleg\\{testset}\\offset.txt')) as f:\n",
    "        offset = f.read()\n",
    "        if offset == \"\":\n",
    "            offset = 0\n",
    "        else:\n",
    "            offset = int(offset)\n",
    "\n",
    "    # Make a copy of the image and crop it to remove the unneeded parts\n",
    "    img = tiff.copy()\n",
    "    img = img.crop((0, 430, img.size[0], 1790))\n",
    "\n",
    "    # Convert the image to float and then to grayscale\n",
    "    img_float = Image.fromarray(np.divide(np.array(img), 2**8-1))\n",
    "    img = img_float.convert(\"L\")\n",
    "\n",
    "    # Save the cropped and converted image to the specified path\n",
    "    img.save(pl.Path(\n",
    "        rf'.\\secondleg\\{testset}\\cropped_img_{testset}.png'))\n",
    "\n",
    "    # Add the offset to the specified columns of the csv file and reverse the order\n",
    "    csv = csv[[\"x_surface\", \"y_surface\", \"x_bed\", \"y_bed\"]]+offset\n",
    "    csv = csv[::-1].reset_index(drop=True)\n",
    "\n",
    "    # Create new dataframes for the top and bottom of the image and concatenate them to the previous dataframe\n",
    "    top = pd.DataFrame(\n",
    "        {\"x_surface\": 0, \"y_surface\": csv.iloc[0][\"y_surface\"], \"x_bed\": 0, \"y_bed\": csv.iloc[0][\"y_bed\"]}, index=[0])\n",
    "    bottom = pd.DataFrame({\"x_surface\": tiff.size[0], \"y_surface\": csv.iloc[-1]\n",
    "                          [\"y_surface\"], \"x_bed\": tiff.size[0], \"y_bed\": csv.iloc[-1][\"y_bed\"]}, index=[0])\n",
    "    csv = pd.concat([top, csv, bottom], ignore_index=True)\n",
    "\n",
    "    # Create a draw object for the image for drawing the polygons\n",
    "    draw = ImageDraw.Draw(img)\n",
    "\n",
    "    # Loop over the rows of the csv file\n",
    "    for i in range(len(csv)-1):\n",
    "        crow = csv.iloc[i]\n",
    "        nrow = csv.iloc[i+1]\n",
    "\n",
    "        # Define the coordinates for the sky, bed, and bottom polygons\n",
    "        skycooords = [\n",
    "            (crow[\"x_surface\"], 0),\n",
    "            (nrow[\"x_surface\"], 0),\n",
    "            (nrow[\"x_surface\"], nrow[\"y_surface\"]),\n",
    "            (crow[\"x_surface\"], crow[\"y_surface\"])\n",
    "        ]\n",
    "        bedcoords = [\n",
    "            (crow[\"x_surface\"], crow[\"y_surface\"]),\n",
    "            (nrow[\"x_surface\"], nrow[\"y_surface\"]),\n",
    "            (nrow[\"x_bed\"], nrow[\"y_bed\"]),\n",
    "            (crow[\"x_bed\"], crow[\"y_bed\"])\n",
    "        ]\n",
    "        btmcoords = [\n",
    "            (crow[\"x_bed\"], crow[\"y_bed\"]),\n",
    "            (nrow[\"x_bed\"], nrow[\"y_bed\"]),\n",
    "            (nrow[\"x_bed\"], tiff.size[1]),\n",
    "            (crow[\"x_bed\"], tiff.size[1])\n",
    "        ]\n",
    "\n",
    "        # Draw the polygons on the image\n",
    "        draw.polygon(skycooords, fill=\"#000000\")\n",
    "        draw.polygon(bedcoords, fill=\"#010101\")\n",
    "        draw.polygon(btmcoords, fill=\"#020202\")\n",
    "\n",
    "    # Save the image with the drawn polygons to the specified path\n",
    "    img.save(pl.Path(\n",
    "        rf'.\\secondleg\\{testset}\\img_mask_{testset}.png'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# This code is used to crop the images and masks in the second leg data set into 400x400 images.\n",
    "\n",
    "# Loop over all the files in the \"secondleg\" directory\n",
    "for testset in os.listdir(\"secondleg\"):\n",
    "\n",
    "    cimg = Image.open(pl.Path(\n",
    "        rf'.\\secondleg\\{testset}\\cropped_img_{testset}.png'))\n",
    "\n",
    "    mask = Image.open(pl.Path(\n",
    "        rf'.\\secondleg\\{testset}\\img_mask_{testset}.png'))\n",
    "\n",
    "    # Calculate the sections to crop the image into, with each section being 400 pixels wide and an overlap of 80 pixels\n",
    "    cropsection = window_with_remainder(400, 80, cimg.size[0])\n",
    "\n",
    "    # Try to create directories for the cropped images and masks\n",
    "    try:\n",
    "        os.mkdir(pl.Path(\n",
    "            rf'.\\secondleg\\{testset}\\cropped_images'))\n",
    "\n",
    "        os.mkdir(pl.Path(\n",
    "            rf'.\\secondleg\\{testset}\\cropped_masks'))\n",
    "    except:\n",
    "        pass\n",
    "\n",
    "    for i in cropsection:\n",
    "        # Crop the image to the current section, resize it to 400x400, and save it to the specified path\n",
    "        cimg.crop((i[0], 0, i[1], cimg.size[1])).resize((400, 400)).save(pl.Path(\n",
    "            rf'\\secondleg\\{testset}\\cropped_images\\cimg-{testset}_{i[0]}_{i[1]}.png'))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from huggingface_hub import notebook_login\n",
    "\n",
    "from datasets import Dataset, DatasetDict, Image\n",
    "\n",
    "from glob import glob\n",
    "\n",
    "images = glob(\"secondleg/*/cropped_images/*.png\")\n",
    "\n",
    "masks = glob(\"secondleg/*/cropped_masks/*.png\")\n",
    "\n",
    "# Define a function to create a dataset from image and label paths\n",
    "def create_dataset(image_paths, label_paths):\n",
    "    # Create a Dataset object from a dictionary of image and label paths\n",
    "    dataset = Dataset.from_dict({\"image\": sorted(image_paths),\n",
    "                                \"label\": sorted(label_paths)})\n",
    "    dataset = dataset.cast_column(\"image\", Image())\n",
    "    dataset = dataset.cast_column(\"label\", Image())\n",
    "\n",
    "    return dataset\n",
    "\n",
    "\n",
    "dataset = create_dataset(images, masks)\n",
    "\n",
    "notebook_login()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Call the push_to_hub method on the dataset object, specifying the repository name and setting it to private\n",
    "dataset.push_to_hub(\"aashraychegu/glacier_scopes\", private=True)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.7"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}