|
|
|
"""TGIF: A New Dataset and Benchmark on Animated GIF Description""" |
|
|
|
|
|
import os |
|
import csv |
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@InProceedings{tgif-cvpr2016, |
|
author = {Li, Yuncheng and Song, Yale and Cao, Liangliang and Tetreault, Joel and Goldberg, Larry and Jaimes, Alejandro and Luo, Jiebo}, |
|
title = "{TGIF: A New Dataset and Benchmark on Animated GIF Description}", |
|
booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, |
|
month = {June}, |
|
year = {2016} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Tumblr GIF (TGIF) dataset contains 100K animated GIFs and 120K sentences describing visual content of the animated GIFs. |
|
The animated GIFs have been collected from Tumblr, from randomly selected posts published between May and June of 2015. |
|
We provide the URLs of animated GIFs in this release. The sentences are collected via crowdsourcing, with a carefully designed |
|
annotationinterface that ensures high quality dataset. We provide one sentence per animated GIF for the training and validation splits, |
|
and three sentences per GIF for the test split. The dataset shall be used to evaluate animated GIF/video description techniques. |
|
""" |
|
|
|
_URL_BASE = "http://raingo.github.io/TGIF-Release/" |
|
|
|
_DL_PATH = "data.tar.gz" |
|
|
|
class TGIFConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for TGIF.""" |
|
|
|
def __init__(self, **kwargs): |
|
super(TGIFConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs) |
|
|
|
class TGIF(datasets.GeneratorBasedBuilder): |
|
|
|
DEFAULT_CONFIG_NAME = "all" |
|
BUILDER_CONFIGS = [ |
|
TGIFConfig(name="all", description="All the TGIF dataset"), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"video_id": datasets.Value("string"), |
|
"caption": datasets.features.Sequence(datasets.Value("string")) |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_URL_BASE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
archive_path = dl_manager.download(_DL_PATH) |
|
|
|
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {} |
|
train_splits = [ |
|
datasets.SplitGenerator( |
|
name="train", |
|
gen_kwargs={ |
|
"local_extracted_archive": local_extracted_archive, |
|
"files": dl_manager.iter_archive(_DL_PATH), |
|
"split": "train" |
|
}, |
|
) |
|
] |
|
dev_splits = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"local_extracted_archive": local_extracted_archive, |
|
"files": dl_manager.iter_archive(_DL_PATH), |
|
"split": "dev" |
|
}, |
|
) |
|
] |
|
test_splits = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"local_extracted_archive": local_extracted_archive, |
|
"files": dl_manager.iter_archive(_DL_PATH), |
|
"split": "test" |
|
}, |
|
) |
|
] |
|
return train_splits + dev_splits + test_splits |
|
|
|
def _generate_examples(self, files, local_extracted_archive, split): |
|
"""This function returns the examples.""" |
|
|
|
dict = {} |
|
for path, f in files: |
|
if path.endswith(split + ".txt"): |
|
print(path) |
|
with open(path,'r') as txt_file: |
|
for line in txt_file: |
|
line = line[0:-1] |
|
dict[line] = [] |
|
for path, f in files: |
|
if path.endswith("tgif-v1.0.tsv"): |
|
print(path) |
|
with open(path, encoding="utf-8") as tsv_file: |
|
tsv_reader = csv.reader(tsv_file, delimiter="\t", quotechar='"' ) |
|
|
|
for idx, (video_link, text) in enumerate(tsv_reader): |
|
try: |
|
dict[video_link].append(text) |
|
except Exception: |
|
pass |
|
|
|
for idx, video_link in enumerate(dict): |
|
yield idx, { |
|
"video_id": video_link, |
|
"captions": dict[video_link], |
|
} |
|
|
|
|
|
|