Leyo commited on
Commit
05597b5
·
1 Parent(s): bbbb88e

create yttemporal loading script

Browse files
Files changed (1) hide show
  1. yttemporal180m.py +112 -0
yttemporal180m.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import datasets
4
+ import datetime
5
+
6
+ _CITATION = """
7
+ @inproceedings{zellersluhessel2021merlot,
8
+ title={MERLOT: Multimodal Neural Script Knowledge Models},
9
+ author={Zellers, Rowan and Lu, Ximing and Hessel, Jack and Yu, Youngjae and Park, Jae Sung and Cao, Jize and Farhadi, Ali and Choi, Yejin},
10
+ booktitle={Advances in Neural Information Processing Systems 34},
11
+ year={2021}
12
+ }
13
+ """
14
+
15
+ _DESCRIPTION = """\
16
+ YT-Temporal-180M, a large and diverse dataset of 6 million videos (spanning 180M extracted frames)
17
+ that covers diverse topics.
18
+ """
19
+
20
+ _URL_BASE = "https://rowanzellers.com/merlot/#data"
21
+
22
+ url_numbers = ["00" + str(i) if i<10 else "0"+ str(i) for i in range(100)]
23
+ _DL_URLS = ["https://storage.googleapis.com/merlot/yttemporal180m/yttemporal180m_{num}of100.jsonl.gz" for num in url_numbers]
24
+
25
+ def json_serializer(o):
26
+ if isinstance(o, datetime):
27
+ return str(o)
28
+
29
+ raise TypeError(
30
+ f"Object of type {o.__class__.__name__} is not JSON serializable")
31
+
32
+ class yttemporal180mConfig(datasets.BuilderConfig):
33
+ """BuilderConfig for ActivityNet Captions."""
34
+
35
+ def __init__(self, **kwargs):
36
+ super(yttemporal180mConfig, self).__init__(
37
+ version=datasets.Version("2.1.0", ""), **kwargs)
38
+
39
+
40
+ class yttemporal180m(datasets.GeneratorBasedBuilder):
41
+
42
+ DEFAULT_CONFIG_NAME = "all"
43
+ BUILDER_CONFIGS = [
44
+ yttemporal180mConfig(
45
+ name="default", description="Default full yttemporal180m dataset"),
46
+ ]
47
+
48
+ def _info(self):
49
+ return datasets.DatasetInfo(
50
+ description=_DESCRIPTION,
51
+ features=datasets.Features(
52
+ {
53
+ "video_id": datasets.Value("string"),
54
+ "video_url": datasets.Value("string"),
55
+ "asr": datasets.Value("string"),
56
+ "title": datasets.Value("string"),
57
+ "quality": datasets.Value("int8"),
58
+ "meta": datasets.Value("string"),
59
+ }
60
+ ),
61
+ supervised_keys=None,
62
+ homepage=_URL_BASE,
63
+ citation=_CITATION,
64
+ )
65
+
66
+ def _split_generators(self, dl_manager):
67
+ archive_paths = [dl_manager.download_and_extract(url) for url in _DL_URLS]
68
+
69
+ train_split = [
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TRAIN,
72
+ gen_kwargs={
73
+ "jsonl_files": archive_paths
74
+ },
75
+ )
76
+ ]
77
+
78
+ return train_split
79
+
80
+ def _generate_examples(self, jsonl_files):
81
+ """This function returns the examples."""
82
+ idx = 0
83
+ for file in jsonl_files:
84
+ with open(file, encoding="utf-8") as jsonl_file:
85
+ json_list = list(jsonl_file)
86
+ for json_str in json_list:
87
+ infos = json.loads(json_str)
88
+
89
+ id = infos['info']['display_id']
90
+ url = "https://www.youtube.com/watch?v=" + id[2:]
91
+ asr = ""
92
+ for example in infos['denoised']:
93
+ asr += example["cleanasr"]
94
+
95
+ metadata_dict = {
96
+ "asr_info": infos["denoised"],
97
+ "info": infos["info"],
98
+ "subtitles": infos["subtitles"],
99
+ }
100
+ yield idx, {
101
+ "video_id": id,
102
+ "video_url": url,
103
+ "asr": asr,
104
+ "title": infos['info']['title'],
105
+ "quality": infos['info']['quality'],
106
+ "meta": json.dumps(
107
+ metadata_dict,
108
+ default=json_serializer,
109
+ indent=2
110
+ )
111
+ }
112
+ idx += 1