albertvillanova HF staff commited on
Commit
748c4f3
1 Parent(s): 09de238

Convert dataset to Parquet (#6)

Browse files

- Convert dataset to Parquet (72a7fea57cef23cb6ae1ef605f4829f4df804c96)
- Delete loading script (e06be61c9de485faf7dd4996255851f6c95b554b)

README.md CHANGED
@@ -34,10 +34,15 @@ dataset_info:
34
  '2': adv
35
  splits:
36
  - name: train
37
- num_bytes: 2278043
38
  num_examples: 567
39
- download_size: 1228804
40
- dataset_size: 2278043
 
 
 
 
 
41
  ---
42
 
43
  # Dataset Card for OneStopEnglish corpus
 
34
  '2': adv
35
  splits:
36
  - name: train
37
+ num_bytes: 2278039
38
  num_examples: 567
39
+ download_size: 1398139
40
+ dataset_size: 2278039
41
+ configs:
42
+ - config_name: default
43
+ data_files:
44
+ - split: train
45
+ path: data/train-*
46
  ---
47
 
48
  # Dataset Card for OneStopEnglish corpus
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f31161ff98617fc970147ac6bcd0a55a37b60fc1625bd78fcfdcbc500f40d6a8
3
+ size 1398139
onestop_english.py DELETED
@@ -1,135 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """OneStopEnglish Corpus: Dataset of texts classified into reading levels/text complexities."""
16
-
17
-
18
- import os
19
-
20
- import datasets
21
- from datasets.tasks import TextClassification
22
-
23
-
24
- logger = datasets.logging.get_logger(__name__)
25
-
26
-
27
- _CITATION = """\
28
- @inproceedings{vajjala-lucic-2018-onestopenglish,
29
- title = {OneStopEnglish corpus: A new corpus for automatic readability assessment and text simplification},
30
- author = {Sowmya Vajjala and Ivana Lučić},
31
- booktitle = {Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications},
32
- year = {2018}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- This dataset is a compilation of the OneStopEnglish corpus of texts written at three reading levels into one file.
38
- Text documents are classified into three reading levels - ele, int, adv (Elementary, Intermediate and Advance).
39
- This dataset demonstrates its usefulness for through two applica-tions - automatic readability assessment and automatic text simplification.
40
- The corpus consists of 189 texts, each in three versions/reading levels (567 in total).
41
- """
42
-
43
- _HOMEPAGE = "https://github.com/nishkalavallabhi/OneStopEnglishCorpus"
44
-
45
- _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International License"
46
-
47
- _URL = "https://github.com/purvimisal/OneStopCorpus-Compiled/raw/main/Texts-SeparatedByReadingLevel.zip"
48
-
49
-
50
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
51
- class OnestopEnglish(datasets.GeneratorBasedBuilder):
52
- """OneStopEnglish Corpus: Dataset of texts classified into reading levels"""
53
-
54
- VERSION = datasets.Version("1.1.0")
55
-
56
- def _info(self):
57
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
58
- return datasets.DatasetInfo(
59
- description=_DESCRIPTION,
60
- features=datasets.Features(
61
- {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["ele", "int", "adv"])}
62
- ),
63
- supervised_keys=[""],
64
- homepage=_HOMEPAGE,
65
- license=_LICENSE,
66
- citation=_CITATION,
67
- task_templates=[TextClassification(text_column="text", label_column="label")],
68
- )
69
-
70
- def _vocab_text_gen(self, train_file):
71
- for _, ex in self._generate_examples(train_file):
72
- yield ex["text"]
73
-
74
- def _split_generators(self, dl_manager):
75
- """Downloads OneStopEnglish corpus"""
76
- extracted_folder_path = dl_manager.download_and_extract(_URL)
77
- return [
78
- datasets.SplitGenerator(
79
- name=datasets.Split.TRAIN,
80
- gen_kwargs={"split_key": "train", "data_dir": extracted_folder_path},
81
- )
82
- ]
83
-
84
- def _get_examples_from_split(self, split_key, data_dir):
85
- """Reads the downloaded and extracted files and combines the individual text files to one dataset."""
86
-
87
- data_dir = os.path.join(data_dir, "Texts-SeparatedByReadingLevel")
88
-
89
- ele_samples = []
90
- dir_path = os.path.join(data_dir, "Ele-Txt")
91
- files = os.listdir(dir_path)
92
- for f in sorted(files):
93
- try:
94
- with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
95
- text = myfile.read().strip()
96
- ele_samples.append(text)
97
- except Exception as e:
98
- logger.info("Error with:", os.path.join(dir_path, f), e)
99
-
100
- int_samples = []
101
- dir_path = os.path.join(data_dir, "Int-Txt")
102
- files = os.listdir(dir_path)
103
- for f in sorted(files):
104
- try:
105
- with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
106
- text = myfile.read().strip()
107
- int_samples.append(text)
108
- except Exception as e:
109
- logger.info("Error with:", os.path.join(dir_path, f), e)
110
-
111
- adv_samples = []
112
- dir_path = os.path.join(data_dir, "Adv-Txt")
113
- files = os.listdir(dir_path)
114
- for f in sorted(files):
115
- try:
116
- with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
117
- text = myfile.read().strip()
118
- adv_samples.append(text)
119
- except Exception as e:
120
- logger.info("Error with:", os.path.join(dir_path, f), e)
121
-
122
- train_samples = ele_samples + int_samples + adv_samples
123
- train_labels = (["ele"] * len(ele_samples)) + (["int"] * len(int_samples)) + (["adv"] * len(adv_samples))
124
-
125
- if split_key == "train":
126
- return (train_samples, train_labels)
127
- else:
128
- raise ValueError(f"Invalid split key {split_key}")
129
-
130
- def _generate_examples(self, split_key, data_dir):
131
- """Yields examples for a given split of dataset."""
132
- split_text, split_labels = self._get_examples_from_split(split_key, data_dir)
133
- for id_, (text, label) in enumerate(zip(split_text, split_labels)):
134
- feature_dict = {"text": text, "label": label}
135
- yield id_, feature_dict