JeremyAlain commited on
Commit
a25f908
·
1 Parent(s): ad4e0fb

loading script created

Browse files
Files changed (1) hide show
  1. 123_test.py +23 -23
123_test.py CHANGED
@@ -45,9 +45,9 @@ _LICENSE = "Apache 2.0"
45
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
 
47
  _URLS = {
48
- "data_0": "https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/0/file_0.jsonl",
49
- "data_1": "https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/1/file_0.jsonl",
50
- "data_2": "https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/2/file_0.jsonl",
51
 
52
  }
53
  logger = datasets.logging.get_logger(__name__)
@@ -113,34 +113,34 @@ class FewshotPretraining(datasets.GeneratorBasedBuilder):
113
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
114
  urls = _URLS[self.config.name]
115
 
116
- data_dir = dl_manager.download_and_extract(urls)
117
  return datasets.SplitGenerator(
118
  name=datasets.Split.TRAIN,
119
  # These kwargs will be passed to _generate_examples
120
  gen_kwargs={
121
- "folder_path": data_dir,
122
  "split": "train",
123
  },
124
  )
125
 
126
 
127
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
128
- def _generate_examples(self, folder_path, split):
129
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
130
- for filepath in os.listdir(folder_path):
131
- with open(filepath, encoding="utf-8") as f:
132
- data = pd.read_json(filepath, orient="records", lines=True)
133
- for i in range(data.shape[0]):
134
- row = data.iloc[i]
135
- # Yields examples as (key, example) tuples
136
- key = row["task"] + "_i"
137
- yield key, {
138
- "task": data["task"],
139
- "input": data["input"],
140
- "output": data["output"],
141
- "options": data["options"],
142
- "pageTitle": data["pageTitle"],
143
- "outputColName": data["outputColName"],
144
- "url": data["url"],
145
- "wdcFile": data["wdcFile"],
146
- }
 
45
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
 
47
  _URLS = {
48
+ "data_0": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/0/file_{}.jsonl".format(i) for i in range(20)],
49
+ "data_1": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/1/file_{}.jsonl".format(i) for i in range(20)],
50
+ "data_2": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/2/file_{}.jsonl".format(i) for i in range(20)],
51
 
52
  }
53
  logger = datasets.logging.get_logger(__name__)
 
113
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
114
  urls = _URLS[self.config.name]
115
 
116
+ extracted_paths = dl_manager.download_and_extract(urls)
117
  return datasets.SplitGenerator(
118
  name=datasets.Split.TRAIN,
119
  # These kwargs will be passed to _generate_examples
120
  gen_kwargs={
121
+ "file_paths": extracted_paths,
122
  "split": "train",
123
  },
124
  )
125
 
126
 
127
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
128
+ def _generate_examples(self, file_path, split):
129
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
130
+ print("generating {}".format(file_path))
131
+ with open(file_path, encoding="utf-8") as f:
132
+ data = pd.read_json(file_path, orient="records", lines=True)
133
+ for i in range(data.shape[0]):
134
+ row = data.iloc[i]
135
+ # Yields examples as (key, example) tuples
136
+ key = row["task"] + "_i"
137
+ yield key, {
138
+ "task": data["task"],
139
+ "input": data["input"],
140
+ "output": data["output"],
141
+ "options": data["options"],
142
+ "pageTitle": data["pageTitle"],
143
+ "outputColName": data["outputColName"],
144
+ "url": data["url"],
145
+ "wdcFile": data["wdcFile"],
146
+ }