Commit
·
b34b79d
1
Parent(s):
349ca84
Fix MultiSim.py to download large files properly from huggingface
Browse files- MultiSim.py +16 -24
MultiSim.py
CHANGED
|
@@ -18,6 +18,7 @@ import pandas as pd
|
|
| 18 |
import os
|
| 19 |
from collections import defaultdict
|
| 20 |
import urllib.parse
|
|
|
|
| 21 |
|
| 22 |
|
| 23 |
import datasets
|
|
@@ -300,34 +301,27 @@ class MultiSim(datasets.GeneratorBasedBuilder):
|
|
| 300 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
| 301 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 302 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 303 |
-
|
|
|
|
|
|
|
| 304 |
if (self.config.name == 'all'):
|
| 305 |
for subcorpus in _SUBCORPORA:
|
| 306 |
-
|
| 307 |
-
download_urls[subcorpus + "-test"] = _URLS[subcorpus+"-test"]
|
| 308 |
-
download_urls[subcorpus + "-val"] = _URLS[subcorpus+"-val"]
|
| 309 |
elif (self.config.name in _LANGUAGES):
|
| 310 |
lang_code = _LANGUAGES[self.config.name]
|
| 311 |
for subcorpus in _SUBCORPORA:
|
| 312 |
if _SUBCORPORA[subcorpus]['language'] == lang_code:
|
| 313 |
-
|
| 314 |
-
download_urls[subcorpus + "-test"] = _URLS[subcorpus+"-test"]
|
| 315 |
-
download_urls[subcorpus + "-val"] = _URLS[subcorpus+"-val"]
|
| 316 |
elif (self.config.name in _SUBCORPORA):
|
| 317 |
-
|
| 318 |
-
download_urls[self.config.name + "-test"] = _URLS[self.config.name+"-test"]
|
| 319 |
-
download_urls[self.config.name + "-val"] = _URLS[self.config.name+"-val"]
|
| 320 |
else:
|
| 321 |
print("Invalid configuration name: " + self.config.name + ". Try 'all', 'English', 'ASSET', etc.")
|
| 322 |
-
|
| 323 |
-
downloaded_files = dl_manager.download_and_extract(download_urls)
|
| 324 |
-
|
| 325 |
return [
|
| 326 |
datasets.SplitGenerator(
|
| 327 |
name=datasets.Split.TRAIN,
|
| 328 |
# These kwargs will be passed to _generate_examples
|
| 329 |
gen_kwargs={
|
| 330 |
-
"filepaths":
|
| 331 |
"split": "train",
|
| 332 |
},
|
| 333 |
),
|
|
@@ -335,7 +329,7 @@ class MultiSim(datasets.GeneratorBasedBuilder):
|
|
| 335 |
name=datasets.Split.VALIDATION,
|
| 336 |
# These kwargs will be passed to _generate_examples
|
| 337 |
gen_kwargs={
|
| 338 |
-
"filepaths":
|
| 339 |
"split": "val",
|
| 340 |
},
|
| 341 |
),
|
|
@@ -343,7 +337,7 @@ class MultiSim(datasets.GeneratorBasedBuilder):
|
|
| 343 |
name=datasets.Split.TEST,
|
| 344 |
# These kwargs will be passed to _generate_examples
|
| 345 |
gen_kwargs={
|
| 346 |
-
"filepaths":
|
| 347 |
"split": "test"
|
| 348 |
},
|
| 349 |
),
|
|
@@ -355,18 +349,16 @@ class MultiSim(datasets.GeneratorBasedBuilder):
|
|
| 355 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 356 |
df = pd.DataFrame()
|
| 357 |
|
| 358 |
-
if (len(filepaths
|
| 359 |
-
for
|
| 360 |
-
if os.path.exists(
|
| 361 |
-
|
| 362 |
-
df = pd.concat([df, pd.read_csv(path)])
|
| 363 |
|
| 364 |
# shuffle the combined dataset
|
| 365 |
df = df.sample(frac=1, random_state=3600).reset_index(drop=True)
|
| 366 |
else:
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
df = pd.read_csv(filepaths[dataset])
|
| 370 |
|
| 371 |
if len(df) > 0:
|
| 372 |
for key, row in df.iterrows():
|
|
|
|
| 18 |
import os
|
| 19 |
from collections import defaultdict
|
| 20 |
import urllib.parse
|
| 21 |
+
from huggingface_hub import snapshot_download
|
| 22 |
|
| 23 |
|
| 24 |
import datasets
|
|
|
|
| 301 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
| 302 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 303 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 304 |
+
dataset_path = snapshot_download(repo_id="MichaelR207/MultiSim", repo_type="dataset")
|
| 305 |
+
|
| 306 |
+
filepaths = []
|
| 307 |
if (self.config.name == 'all'):
|
| 308 |
for subcorpus in _SUBCORPORA:
|
| 309 |
+
filepaths.append(os.path.join(dataset_path,_SUBCORPORA[subcorpus]['path']))
|
|
|
|
|
|
|
| 310 |
elif (self.config.name in _LANGUAGES):
|
| 311 |
lang_code = _LANGUAGES[self.config.name]
|
| 312 |
for subcorpus in _SUBCORPORA:
|
| 313 |
if _SUBCORPORA[subcorpus]['language'] == lang_code:
|
| 314 |
+
filepaths.append(os.path.join(dataset_path,_SUBCORPORA[subcorpus]['path']))
|
|
|
|
|
|
|
| 315 |
elif (self.config.name in _SUBCORPORA):
|
| 316 |
+
filepaths = [os.path.join(dataset_path,_SUBCORPORA[self.config.name]['path'])]
|
|
|
|
|
|
|
| 317 |
else:
|
| 318 |
print("Invalid configuration name: " + self.config.name + ". Try 'all', 'English', 'ASSET', etc.")
|
|
|
|
|
|
|
|
|
|
| 319 |
return [
|
| 320 |
datasets.SplitGenerator(
|
| 321 |
name=datasets.Split.TRAIN,
|
| 322 |
# These kwargs will be passed to _generate_examples
|
| 323 |
gen_kwargs={
|
| 324 |
+
"filepaths": filepaths,
|
| 325 |
"split": "train",
|
| 326 |
},
|
| 327 |
),
|
|
|
|
| 329 |
name=datasets.Split.VALIDATION,
|
| 330 |
# These kwargs will be passed to _generate_examples
|
| 331 |
gen_kwargs={
|
| 332 |
+
"filepaths": filepaths,
|
| 333 |
"split": "val",
|
| 334 |
},
|
| 335 |
),
|
|
|
|
| 337 |
name=datasets.Split.TEST,
|
| 338 |
# These kwargs will be passed to _generate_examples
|
| 339 |
gen_kwargs={
|
| 340 |
+
"filepaths": filepaths,
|
| 341 |
"split": "test"
|
| 342 |
},
|
| 343 |
),
|
|
|
|
| 349 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 350 |
df = pd.DataFrame()
|
| 351 |
|
| 352 |
+
if (len(filepaths) > 1):
|
| 353 |
+
for filepath in filepaths:
|
| 354 |
+
if os.path.exists(filepath + "_" + split + ".csv"):
|
| 355 |
+
df = pd.concat([df, pd.read_csv(filepath + "_" + split + ".csv")])
|
|
|
|
| 356 |
|
| 357 |
# shuffle the combined dataset
|
| 358 |
df = df.sample(frac=1, random_state=3600).reset_index(drop=True)
|
| 359 |
else:
|
| 360 |
+
if os.path.exists(filepaths[0] + "_" + split + ".csv"):
|
| 361 |
+
df = pd.read_csv(filepaths[0] + "_" + split + ".csv")
|
|
|
|
| 362 |
|
| 363 |
if len(df) > 0:
|
| 364 |
for key, row in df.iterrows():
|