lisawen commited on
Commit
e5ae10c
·
verified ·
1 Parent(s): b8a2835

Update soybean_dataset.py

Browse files
Files changed (1) hide show
  1. soybean_dataset.py +62 -5
soybean_dataset.py CHANGED
@@ -67,9 +67,9 @@ _LICENSE = "Under a Creative Commons license"
67
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
68
  _URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv"
69
  _URLs = {
70
- "train" : "https://drive.google.com/file/d/1-5Tdr_OTUUfkjf_UCa5EZOjGdlW683S-/view?usp=sharing",
71
- "test": "https://drive.google.com/file/d/1-2wUyuBTeesGxLuDCvxRcUPdftL-Zen9/view?usp=sharing",
72
- "valid": "https://drive.google.com/file/d/1-1DeSjBY9YlfGCl7CvoU97h7eX95R1eC/view?usp=sharing"
73
  }
74
 
75
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
@@ -136,7 +136,8 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
136
 
137
  with open(filepath, encoding="utf-8") as f:
138
  data = csv.DictReader(f)
139
-
 
140
  for row in data:
141
  # Assuming the 'original_image' column has the full path to the image file
142
  unique_id = row['unique_id']
@@ -158,6 +159,28 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
158
  # ... add other features if necessary
159
  }
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
 
162
 
163
 
@@ -169,4 +192,38 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
169
 
170
 
171
 
172
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
68
  _URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv"
69
  _URLs = {
70
+ "train" : "https://raw.githubusercontent.com/lisawen0707/soybean/main/train_dataset.csv",
71
+ "test": "https://raw.githubusercontent.com/lisawen0707/soybean/main/test_dataset.csv",
72
+ "valid": "https://raw.githubusercontent.com/lisawen0707/soybean/main/valid_dataset.csv"
73
  }
74
 
75
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
 
136
 
137
  with open(filepath, encoding="utf-8") as f:
138
  data = csv.DictReader(f)
139
+
140
+
141
  for row in data:
142
  # Assuming the 'original_image' column has the full path to the image file
143
  unique_id = row['unique_id']
 
159
  # ... add other features if necessary
160
  }
161
 
162
+ # for row in data:
163
+ # # Assuming the 'original_image' column has the full path to the image file
164
+ # unique_id = row['unique_id']
165
+ # original_image_path = row['original_image']
166
+ # segmentation_image_path = row['segmentation_image']
167
+ # sets = row['sets']
168
+
169
+ # original_image_array = self.process_image(original_image_path)
170
+ # segmentation_image_array = self.process_image(segmentation_image_path)
171
+
172
+
173
+ # # Here you need to replace 'initial_radius', 'final_radius', 'initial_angle', 'final_angle', 'target'
174
+ # # with actual columns from your CSV or additional processing you need to do
175
+ # yield row['unique_id'], {
176
+ # "unique_id": unique_id,
177
+ # "sets": sets,
178
+ # "original_image": original_image_array,
179
+ # "segmentation_image": segmentation_image_array,
180
+ # # ... add other features if necessary
181
+ # }
182
+
183
+
184
 
185
 
186
 
 
192
 
193
 
194
 
195
+ #### origin
196
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
197
+ urls_to_download = self._URLS
198
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
199
+
200
+ return [
201
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
202
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
203
+ ]
204
+
205
+ def _generate_examples(self, filepath):
206
+ """This function returns the examples in the raw (text) form."""
207
+ logging.info("generating examples from = %s", filepath)
208
+ with open(filepath) as f:
209
+ squad = json.load(f)
210
+ for article in squad["data"]:
211
+ title = article.get("title", "").strip()
212
+ for paragraph in article["paragraphs"]:
213
+ context = paragraph["context"].strip()
214
+ for qa in paragraph["qas"]:
215
+ question = qa["question"].strip()
216
+ id_ = qa["id"]
217
+
218
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
219
+ answers = [answer["text"].strip() for answer in qa["answers"]]
220
+
221
+ # Features currently used are "context", "question", and "answers".
222
+ # Others are extracted here for the ease of future expansions.
223
+ yield id_, {
224
+ "title": title,
225
+ "context": context,
226
+ "question": question,
227
+ "id": id_,
228
+ "answers": {"answer_start": answer_starts, "text": answers,},
229
+ }