davzoku commited on
Commit
76e5fcb
1 Parent(s): f910114

Delete legacy dataset_infos.json

Browse files
Files changed (1) hide show
  1. dataset_infos.json +0 -55
dataset_infos.json DELETED
@@ -1,55 +0,0 @@
1
- {
2
- "default": {
3
- "description": "AG is a collection of more than 1 million news articles. News articles have been\ngathered from more than 2000 news sources by ComeToMyHead in more than 1 year of\nactivity. ComeToMyHead is an academic news search engine which has been running\nsince July, 2004. The dataset is provided by the academic comunity for research\npurposes in data mining (clustering, classification, etc), information retrieval\n(ranking, search, etc), xml, data compression, data streaming, and any other\nnon-commercial activity. For more information, please refer to the link\nhttp://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html .\n\nThe AG's news topic classification dataset is constructed by Xiang Zhang\n(xiang.zhang@nyu.edu) from the dataset above. It is used as a text\nclassification benchmark in the following paper: Xiang Zhang, Junbo Zhao, Yann\nLeCun. Character-level Convolutional Networks for Text Classification. Advances\nin Neural Information Processing Systems 28 (NIPS 2015).\n",
4
- "citation": "@inproceedings{Zhang2015CharacterlevelCN,\n title={Character-level Convolutional Networks for Text Classification},\n author={Xiang Zhang and Junbo Jake Zhao and Yann LeCun},\n booktitle={NIPS},\n year={2015}\n}\n",
5
- "homepage": "http://groups.di.unipi.it/~gulli/AG_corpus_of_news_articles.html",
6
- "license": "",
7
- "features": {
8
- "text": {
9
- "dtype": "string",
10
- "_type": "Value"
11
- },
12
- "label": {
13
- "names": [
14
- "World",
15
- "Sports",
16
- "Business",
17
- "Sci/Tech"
18
- ],
19
- "_type": "ClassLabel"
20
- }
21
- },
22
- "task_templates": [
23
- {
24
- "task": "text-classification",
25
- "label_column": "label"
26
- }
27
- ],
28
- "builder_name": "parquet",
29
- "dataset_name": "ag_news",
30
- "config_name": "default",
31
- "version": {
32
- "version_str": "0.0.0",
33
- "major": 0,
34
- "minor": 0,
35
- "patch": 0
36
- },
37
- "splits": {
38
- "train": {
39
- "name": "train",
40
- "num_bytes": 29817303,
41
- "num_examples": 120000,
42
- "dataset_name": null
43
- },
44
- "test": {
45
- "name": "test",
46
- "num_bytes": 1879474,
47
- "num_examples": 7600,
48
- "dataset_name": null
49
- }
50
- },
51
- "download_size": 19820267,
52
- "dataset_size": 31696777,
53
- "size_in_bytes": 51517044
54
- }
55
- }