Datasets:
reformat + fix train split name
Browse files
TGIF.py
CHANGED
@@ -26,11 +26,14 @@ _URL_BASE = "http://raingo.github.io/TGIF-Release/"
|
|
26 |
|
27 |
_DL_URL = "https://huggingface.co/datasets/Leyo/TGIF/resolve/main/data.tar.gz"
|
28 |
|
|
|
29 |
class TGIFConfig(datasets.BuilderConfig):
|
30 |
"""BuilderConfig for TGIF."""
|
31 |
|
32 |
def __init__(self, **kwargs):
|
33 |
-
super(TGIFConfig, self).__init__(
|
|
|
|
|
34 |
|
35 |
class TGIF(datasets.GeneratorBasedBuilder):
|
36 |
|
@@ -56,32 +59,32 @@ class TGIF(datasets.GeneratorBasedBuilder):
|
|
56 |
def _split_generators(self, dl_manager):
|
57 |
archive_path = dl_manager.download(_DL_URL)
|
58 |
train_splits = [
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
dev_splits = [
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
test_splits = [
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
return train_splits + dev_splits + test_splits
|
86 |
|
87 |
def _generate_examples(self, files, split):
|
@@ -97,18 +100,16 @@ class TGIF(datasets.GeneratorBasedBuilder):
|
|
97 |
for path, f in files:
|
98 |
if path.endswith("tgif-v1.0.tsv"):
|
99 |
tsv_file = f.read().decode("utf-8").split("\n")[:-1]
|
100 |
-
tsv_reader = csv.reader(
|
101 |
-
|
|
|
102 |
try:
|
103 |
dict[video_link].append(text)
|
104 |
except Exception:
|
105 |
pass
|
106 |
-
|
107 |
for idx, video_link in enumerate(dict):
|
108 |
yield idx, {
|
109 |
"video_id": video_link,
|
110 |
"captions": dict[video_link],
|
111 |
}
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
26 |
|
27 |
_DL_URL = "https://huggingface.co/datasets/Leyo/TGIF/resolve/main/data.tar.gz"
|
28 |
|
29 |
+
|
30 |
class TGIFConfig(datasets.BuilderConfig):
|
31 |
"""BuilderConfig for TGIF."""
|
32 |
|
33 |
def __init__(self, **kwargs):
|
34 |
+
super(TGIFConfig, self).__init__(
|
35 |
+
version=datasets.Version("2.1.0", ""), **kwargs)
|
36 |
+
|
37 |
|
38 |
class TGIF(datasets.GeneratorBasedBuilder):
|
39 |
|
|
|
59 |
def _split_generators(self, dl_manager):
|
60 |
archive_path = dl_manager.download(_DL_URL)
|
61 |
train_splits = [
|
62 |
+
datasets.SplitGenerator(
|
63 |
+
name=datasets.Split.TRAIN,
|
64 |
+
gen_kwargs={
|
65 |
+
"files": dl_manager.iter_archive(archive_path),
|
66 |
+
"split": "train"
|
67 |
+
},
|
68 |
+
)
|
69 |
+
]
|
70 |
dev_splits = [
|
71 |
+
datasets.SplitGenerator(
|
72 |
+
name=datasets.Split.VALIDATION,
|
73 |
+
gen_kwargs={
|
74 |
+
"files": dl_manager.iter_archive(archive_path),
|
75 |
+
"split": "dev"
|
76 |
+
},
|
77 |
+
)
|
78 |
+
]
|
79 |
test_splits = [
|
80 |
+
datasets.SplitGenerator(
|
81 |
+
name=datasets.Split.TEST,
|
82 |
+
gen_kwargs={
|
83 |
+
"files": dl_manager.iter_archive(archive_path),
|
84 |
+
"split": "test"
|
85 |
+
},
|
86 |
+
)
|
87 |
+
]
|
88 |
return train_splits + dev_splits + test_splits
|
89 |
|
90 |
def _generate_examples(self, files, split):
|
|
|
100 |
for path, f in files:
|
101 |
if path.endswith("tgif-v1.0.tsv"):
|
102 |
tsv_file = f.read().decode("utf-8").split("\n")[:-1]
|
103 |
+
tsv_reader = csv.reader(
|
104 |
+
tsv_file, delimiter="\t", quotechar='"')
|
105 |
+
for idx, (video_link, text) in enumerate(tsv_reader):
|
106 |
try:
|
107 |
dict[video_link].append(text)
|
108 |
except Exception:
|
109 |
pass
|
110 |
+
|
111 |
for idx, video_link in enumerate(dict):
|
112 |
yield idx, {
|
113 |
"video_id": video_link,
|
114 |
"captions": dict[video_link],
|
115 |
}
|
|
|
|
|
|