kapilchauhan commited on
Commit
29c1346
·
1 Parent(s): 31dce0a

Upload dataset_infos.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset_infos.json +137 -0
dataset_infos.json ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"kapilchauhan--processed_bert_dataset_free_speech": {
2
+ "description": "This corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.",
3
+ "citation": "@inproceedings{volske-etal-2017-tl,\n title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},\n author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno},\n booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},\n month = {sep},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W17-4508},\n doi = {10.18653/v1/W17-4508},\n pages = {59--63},\n abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},\n}",
4
+ "homepage": "https://github.com/webis-de/webis-tldr-17-corpus",
5
+ "license": "",
6
+ "features": {
7
+ "author": {
8
+ "feature": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "length": -1,
14
+ "id": null,
15
+ "_type": "Sequence"
16
+ },
17
+ "body": {
18
+ "feature": {
19
+ "dtype": "string",
20
+ "id": null,
21
+ "_type": "Value"
22
+ },
23
+ "length": -1,
24
+ "id": null,
25
+ "_type": "Sequence"
26
+ },
27
+ "normalizedBody": {
28
+ "feature": {
29
+ "dtype": "string",
30
+ "id": null,
31
+ "_type": "Value"
32
+ },
33
+ "length": -1,
34
+ "id": null,
35
+ "_type": "Sequence"
36
+ },
37
+ "subreddit": {
38
+ "feature": {
39
+ "dtype": "string",
40
+ "id": null,
41
+ "_type": "Value"
42
+ },
43
+ "length": -1,
44
+ "id": null,
45
+ "_type": "Sequence"
46
+ },
47
+ "subreddit_id": {
48
+ "feature": {
49
+ "dtype": "string",
50
+ "id": null,
51
+ "_type": "Value"
52
+ },
53
+ "length": -1,
54
+ "id": null,
55
+ "_type": "Sequence"
56
+ },
57
+ "id": {
58
+ "feature": {
59
+ "dtype": "string",
60
+ "id": null,
61
+ "_type": "Value"
62
+ },
63
+ "length": -1,
64
+ "id": null,
65
+ "_type": "Sequence"
66
+ },
67
+ "summary": {
68
+ "feature": {
69
+ "dtype": "string",
70
+ "id": null,
71
+ "_type": "Value"
72
+ },
73
+ "length": -1,
74
+ "id": null,
75
+ "_type": "Sequence"
76
+ },
77
+ "input_ids": {
78
+ "feature": {
79
+ "dtype": "int32",
80
+ "id": null,
81
+ "_type": "Value"
82
+ },
83
+ "length": -1,
84
+ "id": null,
85
+ "_type": "Sequence"
86
+ },
87
+ "token_type_ids": {
88
+ "feature": {
89
+ "dtype": "int8",
90
+ "id": null,
91
+ "_type": "Value"
92
+ },
93
+ "length": -1,
94
+ "id": null,
95
+ "_type": "Sequence"
96
+ },
97
+ "attention_mask": {
98
+ "feature": {
99
+ "dtype": "int8",
100
+ "id": null,
101
+ "_type": "Value"
102
+ },
103
+ "length": -1,
104
+ "id": null,
105
+ "_type": "Sequence"
106
+ },
107
+ "special_tokens_mask": {
108
+ "feature": {
109
+ "dtype": "int8",
110
+ "id": null,
111
+ "_type": "Value"
112
+ },
113
+ "length": -1,
114
+ "id": null,
115
+ "_type": "Sequence"
116
+ }
117
+ },
118
+ "post_processed": null,
119
+ "supervised_keys": null,
120
+ "task_templates": null,
121
+ "builder_name": null,
122
+ "config_name": null,
123
+ "version": null,
124
+ "splits": {
125
+ "train": {
126
+ "name": "train",
127
+ "num_bytes": 1570623293.0,
128
+ "num_examples": 78093,
129
+ "dataset_name": "processed_bert_dataset_free_speech"
130
+ }
131
+ },
132
+ "download_checksums": null,
133
+ "download_size": 253310712,
134
+ "post_processing_size": null,
135
+ "dataset_size": 1570623293.0,
136
+ "size_in_bytes": 1823934005.0
137
+ }}