|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""VGGFace2-HQ audio-visual human speech dataset.""" |
|
|
|
import json |
|
import os |
|
import re |
|
from urllib.parse import urlparse, parse_qs |
|
from getpass import getpass |
|
from hashlib import sha256 |
|
from itertools import repeat |
|
from multiprocessing import Manager, Pool, Process |
|
from pathlib import Path |
|
from shutil import copyfileobj |
|
from warnings import catch_warnings, filterwarnings |
|
from urllib3.exceptions import InsecureRequestWarning |
|
|
|
import pandas as pd |
|
import requests |
|
|
|
import datasets |
|
|
|
_DESCRIPTION = "VGGFace2-HQ is a large-scale face recognition dataset. Images are downloaded from Google Image Search and have large variations in pose, age, illumination, ethnicity and profession." |
|
_CITATION = """\ |
|
@article{DBLP:journals/corr/abs-1710-08092, |
|
author = {Qiong Cao and |
|
Li Shen and |
|
Weidi Xie and |
|
Omkar M. Parkhi and |
|
Andrew Zisserman}, |
|
title = {VGGFace2-HQ: {A} dataset for recognising faces across pose and age}, |
|
journal = {CoRR}, |
|
volume = {abs/1710.08092}, |
|
year = {2017}, |
|
url = {http://arxiv.org/abs/1710.08092}, |
|
eprinttype = {arXiv}, |
|
eprint = {1710.08092}, |
|
timestamp = {Wed, 04 Aug 2021 07:50:14 +0200}, |
|
biburl = {https://dblp.org/rec/journals/corr/abs-1710-08092.bib}, |
|
bibsource = {dblp computer science bibliography, https://dblp.org} |
|
} |
|
""" |
|
|
|
|
|
|
|
_URLS = { |
|
"default": { |
|
"train": ("https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/train/VGGFac01.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/train/VGGFac02.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/train/VGGFac03.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/train/VGGFac04.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/train/VGGFac05.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/train/VGGFac06.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/train/VGGFac07.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/train/VGGFac08.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/train/VGGFac09.zip", |
|
"https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/train/VGGFac10.zip" |
|
|
|
), |
|
"test": "https://huggingface.co/datasets/ProgramComputer/VGGFace2-HQ/resolve/main/test/test.zip", |
|
} |
|
} |
|
|
|
|
|
|
|
class VGGFace2-HQ(datasets.GeneratorBasedBuilder): |
|
"""VGGFace2-HQ is dataset contains faces from Google Search""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( version=VERSION |
|
) |
|
] |
|
|
|
def _info(self): |
|
features = { |
|
"image": datasets.Image(), |
|
"image_id": datasets.Value("string"), |
|
"class_id": datasets.Value("string"), |
|
"identity": datasets.Value("string"), |
|
'gender': datasets.Value("string"), |
|
'sample_num':datasets.Value("uint64"), |
|
'flag':datasets.Value("bool"), |
|
"male": datasets.Value("bool"), |
|
"black_hair": datasets.Value("bool"), |
|
"gray_hair": datasets.Value("bool"), |
|
"blond_hair": datasets.Value("bool"), |
|
"long_hair": datasets.Value("bool"), |
|
"mustache_or_beard": datasets.Value("bool"), |
|
"wearing_hat": datasets.Value("bool"), |
|
"eyeglasses": datasets.Value("bool"), |
|
"sunglasses": datasets.Value("bool"), |
|
"mouth_open": datasets.Value("bool"), |
|
} |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
supervised_keys=datasets.info.SupervisedKeysData("file", "class_id"), |
|
features=datasets.Features(features), |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
targets = ( |
|
["01-Male.txt", "02-Black_Hair.txt","03-Brown_Hair.txt","04-Gray_Hair.txt","05-Blond_Hair.txt","06-Long_Hair.txt","07-Mustache_or_Beard.txt","08-Wearing_Hat.txt","09-Eyeglasses.txt","10-Sunglasses.txt","11-Mouth_Open.txt"] |
|
) |
|
target_dict = dict( |
|
( |
|
re.sub(r"^\d+-|\.txt$","",target), |
|
f"https://raw.githubusercontent.com/ox-vgg/vgg_face2/master/attributes/{target}", |
|
) |
|
for target in targets |
|
) |
|
target_dict['identity'] = "https://huggingface.co/datasets/ProgramComputer/VGGFace2/raw/main/meta/identity_meta.csv" |
|
metadata = dl_manager.download( |
|
target_dict |
|
) |
|
|
|
mapped_paths_train = dl_manager.download_and_extract( |
|
_URLS["default"]["train"] |
|
) |
|
mapped_paths_test = dl_manager.download_and_extract( |
|
_URLS["default"]["test"] |
|
) |
|
return [ |
|
datasets.SplitGenerator( |
|
name="train", |
|
gen_kwargs={ |
|
"paths": mapped_paths_train, |
|
"meta_paths": metadata, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test", |
|
gen_kwargs={ |
|
"paths": mapped_paths_test, |
|
"meta_paths": metadata, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, paths, meta_paths): |
|
key = 0 |
|
meta = pd.read_csv( |
|
meta_paths["identity"], |
|
sep=", " |
|
) |
|
for key,conf in [(k,v) for (k,v) in meta_paths.items() if k != "identity"]: |
|
|
|
temp = pd.read_csv(conf,sep='\t', header=None) |
|
temp.columns = ['Image_Path', key] |
|
|
|
temp['Class_ID'] = temp['Image_Path'].str.split('/').str[0] |
|
|
|
|
|
temp.drop(columns=['Image_Path'], inplace=True) |
|
|
|
meta = meta.merge(temp, on='Class_ID', how='left') |
|
raise Exception(meta) |
|
for file_path, file_obj in paths: |
|
|
|
label = file_path.split("/")[2] |
|
yield file_path, { |
|
"image": {"path": file_path, "bytes": file_obj.read()}, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
} |
|
key+= 1 |
|
|