hibana2077 commited on
Commit
8ff2bc1
·
1 Parent(s): a8c9215

add LICENSE file for Flickr-Faces-HQ dataset and temporary JSON file for download warning

Browse files
main/temp_data/LICENSE.txt ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Flickr-Faces-HQ (FFHQ) is a high-quality image dataset of human faces,
2
+ originally created as a benchmark for generative adversarial networks (GAN):
3
+
4
+ A Style-Based Generator Architecture for Generative Adversarial Networks
5
+ Tero Karras (NVIDIA), Samuli Laine (NVIDIA), Timo Aila (NVIDIA)
6
+ http://stylegan.xyz/paper
7
+
8
+ The individual images were published in Flickr by their respective authors
9
+ under either Creative Commons BY 2.0, Creative Commons BY-NC 2.0,
10
+ Public Domain Mark 1.0, Public Domain CC0 1.0, or U.S. Government Works
11
+ license. All of these licenses allow free use, redistribution, and adaptation
12
+ for non-commercial purposes. However, some of them require giving appropriate
13
+ credit to the original author, as well as indicating any changes that were
14
+ made to the images. The license and original author of each image are
15
+ indicated in the metadata.
16
+
17
+ https://creativecommons.org/licenses/by/2.0/
18
+ https://creativecommons.org/licenses/by-nc/2.0/
19
+ https://creativecommons.org/publicdomain/mark/1.0/
20
+ https://creativecommons.org/publicdomain/zero/1.0/
21
+ http://www.usa.gov/copyright.shtml
22
+
23
+ The dataset itself (including JSON metadata, download script, and
24
+ documentation) is made available under Creative Commons BY-NC-SA 4.0 license
25
+ by NVIDIA Corporation. You can use, redistribute, and adapt it for
26
+ non-commercial purposes, as long as you (a) give appropriate credit by
27
+ citing our paper, (b) indicate any changes that you've made, and
28
+ (c) distribute any derivative works under the same license.
29
+
30
+ https://creativecommons.org/licenses/by-nc-sa/4.0/
main/temp_data/download_ffhq.py ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons
4
+ # Attribution-NonCommercial-ShareAlike 4.0 International License.
5
+ # To view a copy of this license, visit
6
+ # http://creativecommons.org/licenses/by-nc-sa/4.0/ or send a letter to
7
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
8
+
9
+ """Download Flickr-Faces-HQ (FFHQ) dataset to current working directory."""
10
+
11
+ import os
12
+ import sys
13
+ import requests
14
+ import html
15
+ import hashlib
16
+ import PIL.Image
17
+ import PIL.ImageFile
18
+ import numpy as np
19
+ import scipy.ndimage
20
+ import threading
21
+ import queue
22
+ import time
23
+ import json
24
+ import uuid
25
+ import glob
26
+ import argparse
27
+ import itertools
28
+ import shutil
29
+ from collections import OrderedDict, defaultdict
30
+
31
+ PIL.ImageFile.LOAD_TRUNCATED_IMAGES = True # avoid "Decompressed Data Too Large" error
32
+
33
+ #----------------------------------------------------------------------------
34
+
35
+ json_spec = dict(file_url='https://drive.google.com/uc?id=16N0RV4fHI6joBuKbQAoG34V_cQk7vxSA', file_path='ffhq-dataset-v2.json', file_size=267793842, file_md5='425ae20f06a4da1d4dc0f46d40ba5fd6')
36
+
37
+ tfrecords_specs = [
38
+ dict(file_url='https://drive.google.com/uc?id=1LnhoytWihRRJ7CfhLQ76F8YxwxRDlZN3', file_path='tfrecords/ffhq/ffhq-r02.tfrecords', file_size=6860000, file_md5='63e062160f1ef9079d4f51206a95ba39'),
39
+ dict(file_url='https://drive.google.com/uc?id=1LWeKZGZ_x2rNlTenqsaTk8s7Cpadzjbh', file_path='tfrecords/ffhq/ffhq-r03.tfrecords', file_size=17290000, file_md5='54fb32a11ebaf1b86807cc0446dd4ec5'),
40
+ dict(file_url='https://drive.google.com/uc?id=1Lr7Tiufr1Za85HQ18yg3XnJXstiI2BAC', file_path='tfrecords/ffhq/ffhq-r04.tfrecords', file_size=57610000, file_md5='7164cc5531f6828bf9c578bdc3320e49'),
41
+ dict(file_url='https://drive.google.com/uc?id=1LnyiayZ-XJFtatxGFgYePcs9bdxuIJO_', file_path='tfrecords/ffhq/ffhq-r05.tfrecords', file_size=218890000, file_md5='050cc7e5fd07a1508eaa2558dafbd9ed'),
42
+ dict(file_url='https://drive.google.com/uc?id=1Lt6UP201zHnpH8zLNcKyCIkbC-aMb5V_', file_path='tfrecords/ffhq/ffhq-r06.tfrecords', file_size=864010000, file_md5='90bedc9cc07007cd66615b2b1255aab8'),
43
+ dict(file_url='https://drive.google.com/uc?id=1LwOP25fJ4xN56YpNCKJZM-3mSMauTxeb', file_path='tfrecords/ffhq/ffhq-r07.tfrecords', file_size=3444980000, file_md5='bff839e0dda771732495541b1aff7047'),
44
+ dict(file_url='https://drive.google.com/uc?id=1LxxgVBHWgyN8jzf8bQssgVOrTLE8Gv2v', file_path='tfrecords/ffhq/ffhq-r08.tfrecords', file_size=13766900000, file_md5='74de4f07dc7bfb07c0ad4471fdac5e67'),
45
+ dict(file_url='https://drive.google.com/uc?id=1M-ulhD5h-J7sqSy5Y1njUY_80LPcrv3V', file_path='tfrecords/ffhq/ffhq-r09.tfrecords', file_size=55054580000, file_md5='05355aa457a4bd72709f74a81841b46d'),
46
+ dict(file_url='https://drive.google.com/uc?id=1M11BIdIpFCiapUqV658biPlaXsTRvYfM', file_path='tfrecords/ffhq/ffhq-r10.tfrecords', file_size=220205650000, file_md5='bf43cab9609ab2a27892fb6c2415c11b'),
47
+ ]
48
+
49
+ license_specs = {
50
+ 'json': dict(file_url='https://drive.google.com/uc?id=1SHafCugkpMZzYhbgOz0zCuYiy-hb9lYX', file_path='LICENSE.txt', file_size=1610, file_md5='724f3831aaecd61a84fe98500079abc2'),
51
+ 'images': dict(file_url='https://drive.google.com/uc?id=1sP2qz8TzLkzG2gjwAa4chtdB31THska4', file_path='images1024x1024/LICENSE.txt', file_size=1610, file_md5='724f3831aaecd61a84fe98500079abc2'),
52
+ 'thumbs': dict(file_url='https://drive.google.com/uc?id=1iaL1S381LS10VVtqu-b2WfF9TiY75Kmj', file_path='thumbnails128x128/LICENSE.txt', file_size=1610, file_md5='724f3831aaecd61a84fe98500079abc2'),
53
+ 'wilds': dict(file_url='https://drive.google.com/uc?id=1rsfFOEQvkd6_Z547qhpq5LhDl2McJEzw', file_path='in-the-wild-images/LICENSE.txt', file_size=1610, file_md5='724f3831aaecd61a84fe98500079abc2'),
54
+ 'tfrecords': dict(file_url='https://drive.google.com/uc?id=1SYUmqKdLoTYq-kqsnPsniLScMhspvl5v', file_path='tfrecords/ffhq/LICENSE.txt', file_size=1610, file_md5='724f3831aaecd61a84fe98500079abc2'),
55
+ }
56
+
57
+ #----------------------------------------------------------------------------
58
+
59
+ def download_file(session, file_spec, stats, chunk_size=128, num_attempts=10, **kwargs):
60
+ file_path = file_spec['file_path']
61
+ file_url = file_spec['file_url']
62
+ file_dir = os.path.dirname(file_path)
63
+ tmp_path = file_path + '.tmp.' + uuid.uuid4().hex
64
+ if file_dir:
65
+ os.makedirs(file_dir, exist_ok=True)
66
+
67
+ for attempts_left in reversed(range(num_attempts)):
68
+ data_size = 0
69
+ try:
70
+ # Download.
71
+ data_md5 = hashlib.md5()
72
+ with session.get(file_url, stream=True) as res:
73
+ res.raise_for_status()
74
+ with open(tmp_path, 'wb') as f:
75
+ for chunk in res.iter_content(chunk_size=chunk_size<<10):
76
+ f.write(chunk)
77
+ data_size += len(chunk)
78
+ data_md5.update(chunk)
79
+ with stats['lock']:
80
+ stats['bytes_done'] += len(chunk)
81
+
82
+ # Validate.
83
+ if 'file_size' in file_spec and data_size != file_spec['file_size']:
84
+ raise IOError('Incorrect file size', file_path)
85
+ if 'file_md5' in file_spec and data_md5.hexdigest() != file_spec['file_md5']:
86
+ raise IOError('Incorrect file MD5', file_path)
87
+ if 'pixel_size' in file_spec or 'pixel_md5' in file_spec:
88
+ with PIL.Image.open(tmp_path) as image:
89
+ if 'pixel_size' in file_spec and list(image.size) != file_spec['pixel_size']:
90
+ raise IOError('Incorrect pixel size', file_path)
91
+ if 'pixel_md5' in file_spec and hashlib.md5(np.array(image)).hexdigest() != file_spec['pixel_md5']:
92
+ raise IOError('Incorrect pixel MD5', file_path)
93
+ break
94
+
95
+ except:
96
+ with stats['lock']:
97
+ stats['bytes_done'] -= data_size
98
+
99
+ # Handle known failure cases.
100
+ if data_size > 0 and data_size < 8192:
101
+ with open(tmp_path, 'rb') as f:
102
+ data = f.read()
103
+ data_str = data.decode('utf-8')
104
+
105
+ # Google Drive virus checker nag.
106
+ links = [html.unescape(link) for link in data_str.split('"') if 'export=download' in link]
107
+ if len(links) == 1:
108
+ if attempts_left:
109
+ file_url = requests.compat.urljoin(file_url, links[0])
110
+ continue
111
+
112
+ # Google Drive quota exceeded.
113
+ if 'Google Drive - Quota exceeded' in data_str:
114
+ if not attempts_left:
115
+ raise IOError("Google Drive download quota exceeded -- please try again later")
116
+
117
+ # Last attempt => raise error.
118
+ if not attempts_left:
119
+ raise
120
+
121
+ # Rename temp file to the correct name.
122
+ os.replace(tmp_path, file_path) # atomic
123
+ with stats['lock']:
124
+ stats['files_done'] += 1
125
+
126
+ # Attempt to clean up any leftover temps.
127
+ for filename in glob.glob(file_path + '.tmp.*'):
128
+ try:
129
+ os.remove(filename)
130
+ except:
131
+ pass
132
+
133
+ #----------------------------------------------------------------------------
134
+
135
+ def choose_bytes_unit(num_bytes):
136
+ b = int(np.rint(num_bytes))
137
+ if b < (100 << 0): return 'B', (1 << 0)
138
+ if b < (100 << 10): return 'kB', (1 << 10)
139
+ if b < (100 << 20): return 'MB', (1 << 20)
140
+ if b < (100 << 30): return 'GB', (1 << 30)
141
+ return 'TB', (1 << 40)
142
+
143
+ #----------------------------------------------------------------------------
144
+
145
+ def format_time(seconds):
146
+ s = int(np.rint(seconds))
147
+ if s < 60: return '%ds' % s
148
+ if s < 60 * 60: return '%dm %02ds' % (s // 60, s % 60)
149
+ if s < 24 * 60 * 60: return '%dh %02dm' % (s // (60 * 60), (s // 60) % 60)
150
+ if s < 100 * 24 * 60 * 60: return '%dd %02dh' % (s // (24 * 60 * 60), (s // (60 * 60)) % 24)
151
+ return '>100d'
152
+
153
+ #----------------------------------------------------------------------------
154
+
155
+ def download_files(file_specs, num_threads=32, status_delay=0.2, timing_window=50, **download_kwargs):
156
+
157
+ # Determine which files to download.
158
+ done_specs = {spec['file_path']: spec for spec in file_specs if os.path.isfile(spec['file_path'])}
159
+ missing_specs = [spec for spec in file_specs if spec['file_path'] not in done_specs]
160
+ files_total = len(file_specs)
161
+ bytes_total = sum(spec['file_size'] for spec in file_specs)
162
+ stats = dict(files_done=len(done_specs), bytes_done=sum(spec['file_size'] for spec in done_specs.values()), lock=threading.Lock())
163
+ if len(done_specs) == files_total:
164
+ print('All files already downloaded -- skipping.')
165
+ return
166
+
167
+ # Launch worker threads.
168
+ spec_queue = queue.Queue()
169
+ exception_queue = queue.Queue()
170
+ for spec in missing_specs:
171
+ spec_queue.put(spec)
172
+ thread_kwargs = dict(spec_queue=spec_queue, exception_queue=exception_queue, stats=stats, download_kwargs=download_kwargs)
173
+ for _thread_idx in range(min(num_threads, len(missing_specs))):
174
+ threading.Thread(target=_download_thread, kwargs=thread_kwargs, daemon=True).start()
175
+
176
+ # Monitor status until done.
177
+ bytes_unit, bytes_div = choose_bytes_unit(bytes_total)
178
+ spinner = '/-\\|'
179
+ timing = []
180
+ while True:
181
+ with stats['lock']:
182
+ files_done = stats['files_done']
183
+ bytes_done = stats['bytes_done']
184
+ spinner = spinner[1:] + spinner[:1]
185
+ timing = timing[max(len(timing) - timing_window + 1, 0):] + [(time.time(), bytes_done)]
186
+ bandwidth = max((timing[-1][1] - timing[0][1]) / max(timing[-1][0] - timing[0][0], 1e-8), 0)
187
+ bandwidth_unit, bandwidth_div = choose_bytes_unit(bandwidth)
188
+ eta = format_time((bytes_total - bytes_done) / max(bandwidth, 1))
189
+
190
+ print('\r%s %6.2f%% done %d/%d files %-13s %-10s ETA: %-7s ' % (
191
+ spinner[0],
192
+ bytes_done / bytes_total * 100,
193
+ files_done, files_total,
194
+ '%.2f/%.2f %s' % (bytes_done / bytes_div, bytes_total / bytes_div, bytes_unit),
195
+ '%.2f %s/s' % (bandwidth / bandwidth_div, bandwidth_unit),
196
+ 'done' if bytes_total == bytes_done else '...' if len(timing) < timing_window or bandwidth == 0 else eta,
197
+ ), end='', flush=True)
198
+
199
+ if files_done == files_total:
200
+ print()
201
+ break
202
+
203
+ try:
204
+ exc_info = exception_queue.get(timeout=status_delay)
205
+ raise exc_info[1].with_traceback(exc_info[2])
206
+ except queue.Empty:
207
+ pass
208
+
209
+ def _download_thread(spec_queue, exception_queue, stats, download_kwargs):
210
+ with requests.Session() as session:
211
+ while not spec_queue.empty():
212
+ spec = spec_queue.get()
213
+ try:
214
+ download_file(session, spec, stats, **download_kwargs)
215
+ except:
216
+ exception_queue.put(sys.exc_info())
217
+
218
+ #----------------------------------------------------------------------------
219
+
220
+ def print_statistics(json_data):
221
+ categories = defaultdict(int)
222
+ licenses = defaultdict(int)
223
+ countries = defaultdict(int)
224
+ for item in json_data.values():
225
+ categories[item['category']] += 1
226
+ licenses[item['metadata']['license']] += 1
227
+ country = item['metadata']['country']
228
+ countries[country if country else '<Unknown>'] += 1
229
+
230
+ for name in [name for name, num in countries.items() if num / len(json_data) < 1e-3]:
231
+ countries['<Other>'] += countries.pop(name)
232
+
233
+ rows = [[]] * 2
234
+ rows += [['Category', 'Images', '% of all']]
235
+ rows += [['---'] * 3]
236
+ for name, num in sorted(categories.items(), key=lambda x: -x[1]):
237
+ rows += [[name, '%d' % num, '%.2f' % (100.0 * num / len(json_data))]]
238
+
239
+ rows += [[]] * 2
240
+ rows += [['License', 'Images', '% of all']]
241
+ rows += [['---'] * 3]
242
+ for name, num in sorted(licenses.items(), key=lambda x: -x[1]):
243
+ rows += [[name, '%d' % num, '%.2f' % (100.0 * num / len(json_data))]]
244
+
245
+ rows += [[]] * 2
246
+ rows += [['Country', 'Images', '% of all', '% of known']]
247
+ rows += [['---'] * 4]
248
+ for name, num in sorted(countries.items(), key=lambda x: -x[1] if x[0] != '<Other>' else 0):
249
+ rows += [[name, '%d' % num, '%.2f' % (100.0 * num / len(json_data)),
250
+ '%.2f' % (0 if name == '<Unknown>' else 100.0 * num / (len(json_data) - countries['<Unknown>']))]]
251
+
252
+ rows += [[]] * 2
253
+ widths = [max(len(cell) for cell in column if cell is not None) for column in itertools.zip_longest(*rows)]
254
+ for row in rows:
255
+ print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths)))
256
+
257
+ #----------------------------------------------------------------------------
258
+
259
+ def recreate_aligned_images(json_data, source_dir, dst_dir='realign1024x1024', output_size=1024, transform_size=4096, enable_padding=True, rotate_level=True, random_shift=0.0, retry_crops=False):
260
+ print('Recreating aligned images...')
261
+
262
+ # Fix random seed for reproducibility
263
+ np.random.seed(12345)
264
+ # The following random numbers are unused in present implementation, but we consume them for reproducibility
265
+ _ = np.random.normal(0, 1, (len(json_data.values()), 2))
266
+
267
+ if dst_dir:
268
+ os.makedirs(dst_dir, exist_ok=True)
269
+ shutil.copyfile('LICENSE.txt', os.path.join(dst_dir, 'LICENSE.txt'))
270
+
271
+ for item_idx, item in enumerate(json_data.values()):
272
+ print('\r%d / %d ... ' % (item_idx, len(json_data)), end='', flush=True)
273
+
274
+ # Parse landmarks.
275
+ # pylint: disable=unused-variable
276
+ lm = np.array(item['in_the_wild']['face_landmarks'])
277
+ lm_chin = lm[0 : 17] # left-right
278
+ lm_eyebrow_left = lm[17 : 22] # left-right
279
+ lm_eyebrow_right = lm[22 : 27] # left-right
280
+ lm_nose = lm[27 : 31] # top-down
281
+ lm_nostrils = lm[31 : 36] # top-down
282
+ lm_eye_left = lm[36 : 42] # left-clockwise
283
+ lm_eye_right = lm[42 : 48] # left-clockwise
284
+ lm_mouth_outer = lm[48 : 60] # left-clockwise
285
+ lm_mouth_inner = lm[60 : 68] # left-clockwise
286
+
287
+ # Calculate auxiliary vectors.
288
+ eye_left = np.mean(lm_eye_left, axis=0)
289
+ eye_right = np.mean(lm_eye_right, axis=0)
290
+ eye_avg = (eye_left + eye_right) * 0.5
291
+ eye_to_eye = eye_right - eye_left
292
+ mouth_left = lm_mouth_outer[0]
293
+ mouth_right = lm_mouth_outer[6]
294
+ mouth_avg = (mouth_left + mouth_right) * 0.5
295
+ eye_to_mouth = mouth_avg - eye_avg
296
+
297
+ # Choose oriented crop rectangle.
298
+ if rotate_level:
299
+ x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
300
+ x /= np.hypot(*x)
301
+ x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
302
+ y = np.flipud(x) * [-1, 1]
303
+ c0 = eye_avg + eye_to_mouth * 0.1
304
+ else:
305
+ x = np.array([1, 0], dtype=np.float64)
306
+ x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
307
+ y = np.flipud(x) * [-1, 1]
308
+ c0 = eye_avg + eye_to_mouth * 0.1
309
+
310
+ # Load in-the-wild image.
311
+ src_file = os.path.join(source_dir, item['in_the_wild']['file_path'])
312
+ if not os.path.isfile(src_file):
313
+ print('\nCannot find source image. Please run "--wilds" before "--align".')
314
+ return
315
+ img = PIL.Image.open(src_file)
316
+
317
+ quad = np.stack([c0 - x - y, c0 - x + y, c0 + x + y, c0 + x - y])
318
+ qsize = np.hypot(*x) * 2
319
+
320
+ # Keep drawing new random crop offsets until we find one that is contained in the image
321
+ # and does not require padding
322
+ if random_shift != 0:
323
+ for _ in range(1000):
324
+ # Offset the crop rectange center by a random shift proportional to image dimension
325
+ # and the requested standard deviation
326
+ c = (c0 + np.hypot(*x)*2 * random_shift * np.random.normal(0, 1, c0.shape))
327
+ quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
328
+ crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
329
+ if not retry_crops or not (crop[0] < 0 or crop[1] < 0 or crop[2] >= img.width or crop[3] >= img.height):
330
+ # We're happy with this crop (either it fits within the image, or retries are disabled)
331
+ break
332
+ else:
333
+ # rejected N times, give up and move to next image
334
+ # (does not happen in practice with the FFHQ data)
335
+ print('rejected image')
336
+ return
337
+
338
+ # Shrink.
339
+ shrink = int(np.floor(qsize / output_size * 0.5))
340
+ if shrink > 1:
341
+ rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
342
+ img = img.resize(rsize, PIL.Image.ANTIALIAS)
343
+ quad /= shrink
344
+ qsize /= shrink
345
+
346
+ # Crop.
347
+ border = max(int(np.rint(qsize * 0.1)), 3)
348
+ crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
349
+ crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
350
+ if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
351
+ img = img.crop(crop)
352
+ quad -= crop[0:2]
353
+
354
+ # Pad.
355
+ pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
356
+ pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
357
+ if enable_padding and max(pad) > border - 4:
358
+ pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
359
+ img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
360
+ h, w, _ = img.shape
361
+ y, x, _ = np.ogrid[:h, :w, :1]
362
+ mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
363
+ blur = qsize * 0.02
364
+ img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
365
+ img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
366
+ img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
367
+ quad += pad[:2]
368
+
369
+ # Transform.
370
+ img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
371
+ if output_size < transform_size:
372
+ img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
373
+
374
+ # Save aligned image.
375
+ dst_subdir = os.path.join(dst_dir, '%05d' % (item_idx - item_idx % 1000))
376
+ os.makedirs(dst_subdir, exist_ok=True)
377
+ img.save(os.path.join(dst_subdir, '%05d.png' % item_idx))
378
+
379
+ # All done.
380
+ print('\r%d / %d ... done' % (len(json_data), len(json_data)))
381
+
382
+ #----------------------------------------------------------------------------
383
+
384
+ def run(tasks, **download_kwargs):
385
+ if not os.path.isfile(json_spec['file_path']) or not os.path.isfile('LICENSE.txt'):
386
+ print('Downloading JSON metadata...')
387
+ download_files([json_spec, license_specs['json']], **download_kwargs)
388
+
389
+ print('Parsing JSON metadata...')
390
+ with open(json_spec['file_path'], 'rb') as f:
391
+ json_data = json.load(f, object_pairs_hook=OrderedDict)
392
+
393
+ if 'stats' in tasks:
394
+ print_statistics(json_data)
395
+
396
+ specs = []
397
+ if 'images' in tasks:
398
+ specs += [item['image'] for item in json_data.values()] + [license_specs['images']]
399
+ if 'thumbs' in tasks:
400
+ specs += [item['thumbnail'] for item in json_data.values()] + [license_specs['thumbs']]
401
+ if 'wilds' in tasks:
402
+ specs += [item['in_the_wild'] for item in json_data.values()] + [license_specs['wilds']]
403
+ if 'tfrecords' in tasks:
404
+ specs += tfrecords_specs + [license_specs['tfrecords']]
405
+
406
+ if len(specs):
407
+ print('Downloading %d files...' % len(specs))
408
+ np.random.shuffle(specs) # to make the workload more homogeneous
409
+ download_files(specs, **download_kwargs)
410
+
411
+ if 'align' in tasks:
412
+ recreate_aligned_images(json_data, source_dir=download_kwargs['source_dir'], rotate_level=not download_kwargs['no_rotation'], random_shift=download_kwargs['random_shift'], enable_padding=not download_kwargs['no_padding'], retry_crops=download_kwargs['retry_crops'])
413
+
414
+ #----------------------------------------------------------------------------
415
+
416
+ def run_cmdline(argv):
417
+ parser = argparse.ArgumentParser(prog=argv[0], description='Download Flickr-Face-HQ (FFHQ) dataset to current working directory.')
418
+ parser.add_argument('-j', '--json', help='download metadata as JSON (254 MB)', dest='tasks', action='append_const', const='json')
419
+ parser.add_argument('-s', '--stats', help='print statistics about the dataset', dest='tasks', action='append_const', const='stats')
420
+ parser.add_argument('-i', '--images', help='download 1024x1024 images as PNG (89.1 GB)', dest='tasks', action='append_const', const='images')
421
+ parser.add_argument('-t', '--thumbs', help='download 128x128 thumbnails as PNG (1.95 GB)', dest='tasks', action='append_const', const='thumbs')
422
+ parser.add_argument('-w', '--wilds', help='download in-the-wild images as PNG (955 GB)', dest='tasks', action='append_const', const='wilds')
423
+ parser.add_argument('-r', '--tfrecords', help='download multi-resolution TFRecords (273 GB)', dest='tasks', action='append_const', const='tfrecords')
424
+ parser.add_argument('-a', '--align', help='recreate 1024x1024 images from in-the-wild images', dest='tasks', action='append_const', const='align')
425
+ parser.add_argument('--num_threads', help='number of concurrent download threads (default: 32)', type=int, default=32, metavar='NUM')
426
+ parser.add_argument('--status_delay', help='time between download status prints (default: 0.2)', type=float, default=0.2, metavar='SEC')
427
+ parser.add_argument('--timing_window', help='samples for estimating download eta (default: 50)', type=int, default=50, metavar='LEN')
428
+ parser.add_argument('--chunk_size', help='chunk size for each download thread (default: 128)', type=int, default=128, metavar='KB')
429
+ parser.add_argument('--num_attempts', help='number of download attempts per file (default: 10)', type=int, default=10, metavar='NUM')
430
+ parser.add_argument('--random-shift', help='standard deviation of random crop rectangle jitter', type=float, default=0.0, metavar='SHIFT')
431
+ parser.add_argument('--retry-crops', help='retry random shift if crop rectangle falls outside image (up to 1000 times)', dest='retry_crops', default=False, action='store_true')
432
+ parser.add_argument('--no-rotation', help='keep the original orientation of images', dest='no_rotation', default=False, action='store_true')
433
+ parser.add_argument('--no-padding', help='do not apply blur-padding outside and near the image borders', dest='no_padding', default=False, action='store_true')
434
+ parser.add_argument('--source-dir', help='where to find already downloaded FFHQ source data', default='', metavar='DIR')
435
+
436
+ args = parser.parse_args()
437
+ if not args.tasks:
438
+ print('No tasks specified. Please see "-h" for help.')
439
+ exit(1)
440
+ run(**vars(args))
441
+
442
+ #----------------------------------------------------------------------------
443
+
444
+ if __name__ == "__main__":
445
+ run_cmdline(sys.argv)
446
+
447
+ #----------------------------------------------------------------------------
main/temp_data/ffhq-dataset-v2.json.tmp.643c371ce6e445d1a35409d2f07ca20d ADDED
@@ -0,0 +1 @@
 
 
1
+ <!DOCTYPE html><html><head><title>Google Drive - Virus scan warning</title><meta http-equiv="content-type" content="text/html; charset=utf-8"/><style nonce="BClLfRWu_3akhFm1bkkxgA">.goog-link-button{position:relative;color:#15c;text-decoration:underline;cursor:pointer}.goog-link-button-disabled{color:#ccc;text-decoration:none;cursor:default}body{color:#222;font:normal 13px/1.4 arial,sans-serif;margin:0}.grecaptcha-badge{visibility:hidden}.uc-main{padding-top:50px;text-align:center}#uc-dl-icon{display:inline-block;margin-top:16px;padding-right:1em;vertical-align:top}#uc-text{display:inline-block;max-width:68ex;text-align:left}.uc-error-caption,.uc-warning-caption{color:#222;font-size:16px}#uc-download-link{text-decoration:none}.uc-name-size a{color:#15c;text-decoration:none}.uc-name-size a:visited{color:#61c;text-decoration:none}.uc-name-size a:active{color:#d14836;text-decoration:none}.uc-footer{color:#777;font-size:11px;padding-bottom:5ex;padding-top:5ex;text-align:center}.uc-footer a{color:#15c}.uc-footer a:visited{color:#61c}.uc-footer a:active{color:#d14836}.uc-footer-divider{color:#ccc;width:100%}.goog-inline-block{position:relative;display:-moz-inline-box;display:inline-block}* html .goog-inline-block{display:inline}*:first-child+html .goog-inline-block{display:inline}sentinel{}</style><link rel="icon" href="//ssl.gstatic.com/docs/doclist/images/drive_2022q3_32dp.png"/></head><body><div class="uc-main"><div id="uc-dl-icon" class="image-container"><div class="drive-sprite-aux-download-file"></div></div><div id="uc-text"><p class="uc-warning-caption">Google Drive can't scan this file for viruses.</p><p class="uc-warning-subcaption"><span class="uc-name-size"><a href="/open?id=16N0RV4fHI6joBuKbQAoG34V_cQk7vxSA">ffhq-dataset-v2.json</a> (255M)</span> is too large for Google to scan for viruses. Would you still like to download this file?</p><form id="download-form" action="https://drive.usercontent.google.com/download" method="get"><input type="submit" id="uc-download-link" class="goog-inline-block jfk-button jfk-button-action" value="Download anyway"/><input type="hidden" name="id" value="16N0RV4fHI6joBuKbQAoG34V_cQk7vxSA"><input type="hidden" name="confirm" value="t"><input type="hidden" name="uuid" value="709e518a-d83c-4a01-89c8-7c84852cf844"></form></div></div><div class="uc-footer"><hr class="uc-footer-divider"></div></body></html>