Datasets:
Commit
·
93666f2
1
Parent(s):
a5c7288
put everything in one file
Browse files
odor.py
CHANGED
@@ -21,8 +21,14 @@ import os
|
|
21 |
import pandas as pd
|
22 |
|
23 |
import datasets
|
24 |
-
import
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
|
28 |
_CITATION = """\
|
@@ -74,11 +80,43 @@ class ODOR(datasets.GeneratorBasedBuilder):
|
|
74 |
)
|
75 |
|
76 |
def _split_generators(self, dl_manager):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
imgs_dir = f'{self.cache_dir}/images'
|
78 |
csv_pth = 'meta/meta.csv'
|
79 |
if not os.path.isdir(imgs_dir):
|
80 |
os.makedirs(imgs_dir)
|
81 |
-
img_pths =
|
82 |
|
83 |
return [
|
84 |
datasets.SplitGenerator(
|
@@ -137,12 +175,14 @@ class ODOR(datasets.GeneratorBasedBuilder):
|
|
137 |
}
|
138 |
idx += 1
|
139 |
|
140 |
-
|
141 |
-
#
|
142 |
-
#
|
|
|
|
|
143 |
|
144 |
-
#
|
145 |
|
146 |
-
|
147 |
|
148 |
-
|
|
|
21 |
import pandas as pd
|
22 |
|
23 |
import datasets
|
24 |
+
import time
|
25 |
+
|
26 |
+
import requests
|
27 |
+
import pandas as pd
|
28 |
+
from tqdm import tqdm
|
29 |
+
from multiprocessing.pool import ThreadPool
|
30 |
+
from multiprocessing import cpu_count
|
31 |
+
from requests.exceptions import MissingSchema, Timeout, ConnectionError
|
32 |
|
33 |
|
34 |
_CITATION = """\
|
|
|
80 |
)
|
81 |
|
82 |
def _split_generators(self, dl_manager):
|
83 |
+
def _download_one(entry, overwrite=False):
|
84 |
+
fn, uri, target_pth, retries = entry
|
85 |
+
fn = fn.replace("/", "_")
|
86 |
+
path = f'{target_pth}/{fn}'
|
87 |
+
if os.path.exists(path) and not overwrite:
|
88 |
+
return fn
|
89 |
+
|
90 |
+
for i in range(retries):
|
91 |
+
try:
|
92 |
+
r = requests.get(uri, stream=True, timeout=50)
|
93 |
+
except (MissingSchema, Timeout, ConnectionError, InvalidSchema):
|
94 |
+
time.sleep(i)
|
95 |
+
continue
|
96 |
+
|
97 |
+
if r.status_code == 200:
|
98 |
+
with open(path, 'wb') as f:
|
99 |
+
for chunk in r:
|
100 |
+
f.write(chunk)
|
101 |
+
return fn
|
102 |
+
else:
|
103 |
+
time.sleep(i)
|
104 |
+
continue
|
105 |
+
|
106 |
+
return fn
|
107 |
+
def _download_all(metadata_pth, target_pth, retries=3):
|
108 |
+
df = pd.read_csv(metadata_pth)
|
109 |
+
entries = [[*x, target_pth, retries] for x in df[['File Name', 'Image Credits']].values]
|
110 |
+
n_processes = max(1, cpu_count() - 1)
|
111 |
+
with ThreadPool(n_processes) as p:
|
112 |
+
results = list(tqdm(p.imap(_download_one, entries), total=len(entries)))
|
113 |
+
return results
|
114 |
+
|
115 |
imgs_dir = f'{self.cache_dir}/images'
|
116 |
csv_pth = 'meta/meta.csv'
|
117 |
if not os.path.isdir(imgs_dir):
|
118 |
os.makedirs(imgs_dir)
|
119 |
+
img_pths = _download_all(csv_pth, imgs_dir)
|
120 |
|
121 |
return [
|
122 |
datasets.SplitGenerator(
|
|
|
175 |
}
|
176 |
idx += 1
|
177 |
|
178 |
+
if __name__ == '__main__':
|
179 |
+
# ds_builder = ODOR()
|
180 |
+
# n_processes = min(1, multiprocessing.cpu_count()-1)
|
181 |
+
|
182 |
+
# ds_builder.download_and_prepare()
|
183 |
|
184 |
+
# ds = ds_builder.as_dataset()
|
185 |
|
186 |
+
ds = datasets.load_dataset('mathiaszinnen/odor')
|
187 |
|
188 |
+
print('ay')
|