lisawen commited on
Commit
37f5d28
·
verified ·
1 Parent(s): e026edc

Update soybean_dataset.py

Browse files
Files changed (1) hide show
  1. soybean_dataset.py +29 -155
soybean_dataset.py CHANGED
@@ -121,184 +121,58 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
121
  name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
122
  ]
123
 
124
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
125
- #
126
- # Licensed under the Apache License, Version 2.0 (the "License");
127
- # you may not use this file except in compliance with the License.
128
- # You may obtain a copy of the License at
129
- #
130
- # http://www.apache.org/licenses/LICENSE-2.0
131
- #
132
- # Unless required by applicable law or agreed to in writing, software
133
- # distributed under the License is distributed on an "AS IS" BASIS,
134
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
135
- # See the License for the specific language governing permissions and
136
- # limitations under the License.
137
- # TODO: Address all TODOs and remove all explanatory comments
138
- """TODO: Add a description here."""
139
-
140
-
141
- import csv
142
- import json
143
- import os
144
- from typing import List
145
- import datasets
146
- import logging
147
- import csv
148
- import numpy as np
149
- from PIL import Image
150
- import os
151
- import io
152
- import pandas as pd
153
- import matplotlib.pyplot as plt
154
- from numpy import asarray
155
- import requests
156
- from io import BytesIO
157
- from numpy import asarray
158
-
159
-
160
- # TODO: Add BibTeX citation
161
- # Find for instance the citation on arxiv or on the dataset repo/website
162
- _CITATION = """\
163
- @article{chen2023dataset,
164
- title={A dataset of the quality of soybean harvested by mechanization for deep-learning-based monitoring and analysis},
165
- author={Chen, M and Jin, C and Ni, Y and Yang, T and Xu, J},
166
- journal={Data in Brief},
167
- volume={52},
168
- pages={109833},
169
- year={2023},
170
- publisher={Elsevier},
171
- doi={10.1016/j.dib.2023.109833}
172
- }
173
 
174
- """
 
 
175
 
176
- # TODO: Add description of the dataset here
177
- # You can copy an official description
178
- _DESCRIPTION = """\
179
- This dataset contains images captured during the mechanized harvesting of soybeans, aimed at facilitating the development of machine vision and deep learning models for quality analysis. It contains information of original soybean pictures in different forms, labels of whether the soybean belongs to training, validation, or testing datasets, segmentation class of soybean pictures in one dataset.
180
- """
 
181
 
182
- # TODO: Add a link to an official homepage for the dataset here
183
- _HOMEPAGE = "https://huggingface.co/datasets/lisawen/soybean_dataset"
184
-
185
- # TODO: Add the licence for the dataset here if you can find it
186
- _LICENSE = "Under a Creative Commons license"
187
-
188
- # TODO: Add link to the official dataset URLs here
189
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
190
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
191
- _URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv"
192
- _URLs = {
193
- "train" : "https://raw.githubusercontent.com/lisawen0707/soybean/main/train_dataset.csv",
194
- "test": "https://raw.githubusercontent.com/lisawen0707/soybean/main/test_dataset.csv",
195
- "valid": "https://raw.githubusercontent.com/lisawen0707/soybean/main/valid_dataset.csv"
196
- }
197
-
198
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
199
- class SoybeanDataset(datasets.GeneratorBasedBuilder):
200
- """TODO: Short description of my dataset."""
201
-
202
- _URLs = _URLs
203
- VERSION = datasets.Version("1.1.0")
204
-
205
- def _info(self):
206
- # raise ValueError('woops!')
207
- return datasets.DatasetInfo(
208
- description=_DESCRIPTION,
209
- features=datasets.Features(
210
- {
211
- "unique_id": datasets.Value("string"),
212
- "sets": datasets.Value("string"),
213
- "original_image": datasets.Image(),
214
- "segmentation_image": datasets.Image(),
215
-
216
- }
217
- ),
218
- # No default supervised_keys (as we have to pass both question
219
- # and context as input).
220
- supervised_keys=("original_image","segmentation_image"),
221
- homepage="https://github.com/lisawen0707/soybean/tree/main",
222
- citation=_CITATION,
223
- )
224
-
225
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
226
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
227
- # Since the dataset is on Google Drive, you need to implement a way to download it using the Google Drive API.
228
-
229
- # The path to the dataset file in Google Drive
230
- urls_to_download = self._URLs
231
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
232
-
233
- # Since we're using a local file, we don't need to download it, so we just return the path.
234
- return [
235
- datasets.SplitGenerator(
236
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
237
- datasets.SplitGenerator(
238
- name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
239
- datasets.SplitGenerator(
240
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
241
- ]
242
-
243
- def download_image(self, image_url):
244
- try:
245
- response = requests.get(image_url)
246
- response.raise_for_status() # This will raise an exception for HTTP errors
247
- img = Image.open(BytesIO(response.content))
248
- return img
249
- except requests.RequestException as e:
250
- logging.error(f"Error downloading {image_url}: {e}")
251
- return None
252
-
253
- def download_images_in_batch(self, image_urls):
254
- images = {}
255
- with ThreadPoolExecutor() as executor:
256
- future_to_url = {executor.submit(self.download_image, url): url for url in image_urls}
257
- for future in as_completed(future_to_url):
258
- url = future_to_url[future]
259
- try:
260
- image = future.result()
261
- if image:
262
- images[url] = image
263
- except Exception as e:
264
- logging.error(f"Error processing {url}: {e}")
265
- return images
266
 
267
  def _generate_examples(self, filepath):
268
- logging.info(f"Generating examples from = {filepath}")
269
 
270
  with open(filepath, encoding="utf-8") as f:
271
  data = csv.DictReader(f)
272
- image_urls = [row['original_image'] for row in data] + [row['segmentation_image'] for row in data]
273
- # Remove duplicates and None values
274
- image_urls = list(set(filter(None, image_urls)))
275
 
276
- # Download images in batch
277
- images = self.download_images_in_batch(image_urls)
 
 
 
 
278
 
279
- # Reset file pointer to the beginning to iterate again
280
  f.seek(0)
281
- data = csv.DictReader(f)
282
 
283
  for row in data:
284
  unique_id = row['unique_id']
285
  original_image_url = row['original_image']
286
  segmentation_image_url = row['segmentation_image']
 
287
 
288
- original_image = images.get(original_image_url)
289
- segmentation_image = images.get(segmentation_image_url)
290
-
291
- if not original_image or not segmentation_image:
292
- logging.warning(f"Missing image for {unique_id}, skipping example.")
293
- continue
294
 
295
  yield unique_id, {
296
  "unique_id": unique_id,
297
- "sets": row['sets'],
298
  "original_image": original_image,
299
  "segmentation_image": segmentation_image,
300
  # ... add other features if necessary
301
- }
302
 
303
 
304
 
 
121
  name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
122
  ]
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
+ def __init__(self, max_workers=5):
126
+ # Initialize a ThreadPoolExecutor with the desired number of workers
127
+ self.executor = ThreadPoolExecutor(max_workers=max_workers)
128
 
129
+ def process_image(self, image_url):
130
+ # This function is now a static method that doesn't need self
131
+ response = requests.get(image_url)
132
+ response.raise_for_status() # This will raise an exception if there is a download error
133
+ img = Image.open(BytesIO(response.content))
134
+ return img
135
 
136
+ def download_images(self, image_urls):
137
+ # Use the executor to download images concurrently
138
+ # and return a future to image map
139
+ future_to_url = {self.executor.submit(self.process_image, url): url for url in image_urls}
140
+ return future_to_url
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
  def _generate_examples(self, filepath):
143
+ logging.info("generating examples from = %s", filepath)
144
 
145
  with open(filepath, encoding="utf-8") as f:
146
  data = csv.DictReader(f)
 
 
 
147
 
148
+ # Create a set to collect all unique image URLs to download
149
+ image_urls = {row['original_image'] for row in data}
150
+ image_urls.update(row['segmentation_image'] for row in data)
151
+
152
+ # Start the batch download
153
+ future_to_url = self.download_images(image_urls)
154
 
155
+ # Reset the file pointer to the start for the second pass
156
  f.seek(0)
157
+ next(data) # Skip header
158
 
159
  for row in data:
160
  unique_id = row['unique_id']
161
  original_image_url = row['original_image']
162
  segmentation_image_url = row['segmentation_image']
163
+ sets = row['sets']
164
 
165
+ # Wait for the individual image futures to complete and get the result
166
+ original_image = future_to_url[self.executor.submit(self.process_image, original_image_url)].result()
167
+ segmentation_image = future_to_url[self.executor.submit(self.process_image, segmentation_image_url)].result()
 
 
 
168
 
169
  yield unique_id, {
170
  "unique_id": unique_id,
171
+ "sets": sets,
172
  "original_image": original_image,
173
  "segmentation_image": segmentation_image,
174
  # ... add other features if necessary
175
+ }
176
 
177
 
178