Truong Lac commited on
Commit
eecd666
1 Parent(s): dc5bdbd

Preprocessed reddit dataset

Browse files
dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["train", "test", "valid"]}
test/dataset.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f873bf39c1e3071d027e9ed9994ccc64cd44ad745a45e301eae8b8ccae1982bc
3
+ size 53269856
test/dataset_info.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "reddit",
3
+ "citation": "\n@inproceedings{volske-etal-2017-tl,\n title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},\n author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno},\n booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},\n month = {sep},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W17-4508},\n doi = {10.18653/v1/W17-4508},\n pages = {59--63},\n abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},\n}\n",
4
+ "config_name": "default",
5
+ "dataset_size": 18936213573,
6
+ "description": "\nThis corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.\n",
7
+ "download_checksums": {
8
+ "https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1": {
9
+ "num_bytes": 3141854161,
10
+ "checksum": "c1a0f8c4374c7314d3c9ec50dd505303c536062d87037d4dca7035b89b36938a"
11
+ }
12
+ },
13
+ "download_size": 3141854161,
14
+ "features": {
15
+ "content": {
16
+ "dtype": "string",
17
+ "id": null,
18
+ "_type": "Value"
19
+ },
20
+ "summary": {
21
+ "dtype": "string",
22
+ "id": null,
23
+ "_type": "Value"
24
+ }
25
+ },
26
+ "homepage": "https://github.com/webis-de/webis-tldr-17-corpus",
27
+ "license": "",
28
+ "post_processed": null,
29
+ "post_processing_size": null,
30
+ "size_in_bytes": 22078067734,
31
+ "splits": {
32
+ "train": {
33
+ "name": "train",
34
+ "num_bytes": 18936213573,
35
+ "num_examples": 3848330,
36
+ "dataset_name": "reddit"
37
+ }
38
+ },
39
+ "supervised_keys": null,
40
+ "task_templates": null,
41
+ "version": {
42
+ "version_str": "1.0.0",
43
+ "description": null,
44
+ "major": 1,
45
+ "minor": 0,
46
+ "patch": 0
47
+ }
48
+ }
test/state.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "dataset.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "07c62ea796c293b6",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_indexes": {},
12
+ "_output_all_columns": false,
13
+ "_split": "train"
14
+ }
train/dataset.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:994f812da4d64433e35ed025780299accc0a8d1940b84a2c0072e5a61ecb072c
3
+ size 247748200
train/dataset_info.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "reddit",
3
+ "citation": "\n@inproceedings{volske-etal-2017-tl,\n title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},\n author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno},\n booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},\n month = {sep},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W17-4508},\n doi = {10.18653/v1/W17-4508},\n pages = {59--63},\n abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},\n}\n",
4
+ "config_name": "default",
5
+ "dataset_size": 18936213573,
6
+ "description": "\nThis corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.\n",
7
+ "download_checksums": {
8
+ "https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1": {
9
+ "num_bytes": 3141854161,
10
+ "checksum": "c1a0f8c4374c7314d3c9ec50dd505303c536062d87037d4dca7035b89b36938a"
11
+ }
12
+ },
13
+ "download_size": 3141854161,
14
+ "features": {
15
+ "content": {
16
+ "dtype": "string",
17
+ "id": null,
18
+ "_type": "Value"
19
+ },
20
+ "summary": {
21
+ "dtype": "string",
22
+ "id": null,
23
+ "_type": "Value"
24
+ }
25
+ },
26
+ "homepage": "https://github.com/webis-de/webis-tldr-17-corpus",
27
+ "license": "",
28
+ "post_processed": null,
29
+ "post_processing_size": null,
30
+ "size_in_bytes": 22078067734,
31
+ "splits": {
32
+ "train": {
33
+ "name": "train",
34
+ "num_bytes": 18936213573,
35
+ "num_examples": 3848330,
36
+ "dataset_name": "reddit"
37
+ }
38
+ },
39
+ "supervised_keys": null,
40
+ "task_templates": null,
41
+ "version": {
42
+ "version_str": "1.0.0",
43
+ "description": null,
44
+ "major": 1,
45
+ "minor": 0,
46
+ "patch": 0
47
+ }
48
+ }
train/state.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "dataset.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "a0cac1e696966c5d",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_indexes": {},
12
+ "_output_all_columns": false,
13
+ "_split": "train"
14
+ }
valid/dataset.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba03a4334366693158b0e26da0e6e23a88edd4e021302a853a4211c702599937
3
+ size 53055432
valid/dataset_info.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "reddit",
3
+ "citation": "\n@inproceedings{volske-etal-2017-tl,\n title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},\n author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno},\n booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},\n month = {sep},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W17-4508},\n doi = {10.18653/v1/W17-4508},\n pages = {59--63},\n abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},\n}\n",
4
+ "config_name": "default",
5
+ "dataset_size": 18936213573,
6
+ "description": "\nThis corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.\n",
7
+ "download_checksums": {
8
+ "https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1": {
9
+ "num_bytes": 3141854161,
10
+ "checksum": "c1a0f8c4374c7314d3c9ec50dd505303c536062d87037d4dca7035b89b36938a"
11
+ }
12
+ },
13
+ "download_size": 3141854161,
14
+ "features": {
15
+ "content": {
16
+ "dtype": "string",
17
+ "id": null,
18
+ "_type": "Value"
19
+ },
20
+ "summary": {
21
+ "dtype": "string",
22
+ "id": null,
23
+ "_type": "Value"
24
+ }
25
+ },
26
+ "homepage": "https://github.com/webis-de/webis-tldr-17-corpus",
27
+ "license": "",
28
+ "post_processed": null,
29
+ "post_processing_size": null,
30
+ "size_in_bytes": 22078067734,
31
+ "splits": {
32
+ "train": {
33
+ "name": "train",
34
+ "num_bytes": 18936213573,
35
+ "num_examples": 3848330,
36
+ "dataset_name": "reddit"
37
+ }
38
+ },
39
+ "supervised_keys": null,
40
+ "task_templates": null,
41
+ "version": {
42
+ "version_str": "1.0.0",
43
+ "description": null,
44
+ "major": 1,
45
+ "minor": 0,
46
+ "patch": 0
47
+ }
48
+ }
valid/state.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "dataset.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "ae859c4a6589884b",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_indexes": {},
12
+ "_output_all_columns": false,
13
+ "_split": "train"
14
+ }