Hieu Nguyen
commited on
Commit
·
0cae752
1
Parent(s):
bf757ad
Finalize loading script, load from downloadable files from Minio instead of temporary public url
Browse files- deep-research.py +13 -4
deep-research.py
CHANGED
|
@@ -14,9 +14,11 @@
|
|
| 14 |
# TODO: Address all TODOs and remove all explanatory comments
|
| 15 |
"""TODO: Add a description here."""
|
| 16 |
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
import json
|
| 19 |
-
import os
|
| 20 |
|
| 21 |
import datasets
|
| 22 |
|
|
@@ -48,10 +50,17 @@ _LICENSE = ""
|
|
| 48 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 49 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 50 |
_URLS = {
|
| 51 |
-
"train": "
|
| 52 |
-
"dev": "
|
| 53 |
}
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
| 57 |
class NewDataset(datasets.GeneratorBasedBuilder):
|
|
@@ -115,7 +124,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
| 115 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 116 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 117 |
# urls = _URLS[self.config.name]
|
| 118 |
-
data_dir = dl_manager.
|
| 119 |
return [
|
| 120 |
datasets.SplitGenerator(
|
| 121 |
name=datasets.Split.TRAIN,
|
|
|
|
| 14 |
# TODO: Address all TODOs and remove all explanatory comments
|
| 15 |
"""TODO: Add a description here."""
|
| 16 |
|
| 17 |
+
import boto3
|
| 18 |
+
from botocore.client import Config
|
| 19 |
+
from botocore import UNSIGNED
|
| 20 |
|
| 21 |
import json
|
|
|
|
| 22 |
|
| 23 |
import datasets
|
| 24 |
|
|
|
|
| 50 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 51 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 52 |
_URLS = {
|
| 53 |
+
"train": "train.json",
|
| 54 |
+
"dev": "dev.json"
|
| 55 |
}
|
| 56 |
|
| 57 |
+
storage_options = {"anon": True}
|
| 58 |
+
|
| 59 |
+
def download_file(url, path):
|
| 60 |
+
s3_anonymous = boto3.client('s3',
|
| 61 |
+
endpoint_url='http://52.172.255.53:9000',
|
| 62 |
+
config=Config(signature_version=UNSIGNED))
|
| 63 |
+
s3_anonymous.download_file("hieutn", Key=url, Filename=path)
|
| 64 |
|
| 65 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
| 66 |
class NewDataset(datasets.GeneratorBasedBuilder):
|
|
|
|
| 124 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 125 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 126 |
# urls = _URLS[self.config.name]
|
| 127 |
+
data_dir = dl_manager.download_custom(_URLS, download_file)
|
| 128 |
return [
|
| 129 |
datasets.SplitGenerator(
|
| 130 |
name=datasets.Split.TRAIN,
|