Matej Klemen commited on
Commit
75a8519
1 Parent(s): a0bae4d

Add first version of the dataset script

Browse files
Files changed (2) hide show
  1. README.md +58 -0
  2. akces_gec.py +141 -0
README.md CHANGED
@@ -1,3 +1,61 @@
1
  ---
2
  license: cc-by-nc-sa-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
1
  ---
2
  license: cc-by-nc-sa-4.0
3
+ dataset_info:
4
+ - config_name: ann0
5
+ features:
6
+ - name: src_tokens
7
+ sequence: string
8
+ - name: tgt_tokens
9
+ sequence: string
10
+ - name: corrections
11
+ list:
12
+ - name: idx_src
13
+ sequence: int32
14
+ - name: idx_tgt
15
+ sequence: int32
16
+ - name: corr_types
17
+ sequence: string
18
+ splits:
19
+ - name: train
20
+ num_bytes: 11199287
21
+ num_examples: 42210
22
+ - name: validation
23
+ num_bytes: 713686
24
+ num_examples: 2485
25
+ - name: test
26
+ num_bytes: 741411
27
+ num_examples: 2676
28
+ download_size: 3534547
29
+ dataset_size: 12654384
30
+ - config_name: ann1
31
+ features:
32
+ - name: src_tokens
33
+ sequence: string
34
+ - name: tgt_tokens
35
+ sequence: string
36
+ - name: corrections
37
+ list:
38
+ - name: idx_src
39
+ sequence: int32
40
+ - name: idx_tgt
41
+ sequence: int32
42
+ - name: corr_types
43
+ sequence: string
44
+ splits:
45
+ - name: train
46
+ num_bytes: 8124054
47
+ num_examples: 42210
48
+ - name: validation
49
+ num_bytes: 618583
50
+ num_examples: 2485
51
+ - name: test
52
+ num_bytes: 655536
53
+ num_examples: 2676
54
+ download_size: 3534547
55
+ dataset_size: 9398173
56
  ---
57
+
58
+ There are two configs: `ann0` (default) and `ann1`. These correspond to the annotator ID whose annotations will be loaded.
59
+
60
+ **Important:** Annotations from annotator 1 only exist for the dev set so the training and test set will have no annotations.
61
+ It is up to the user to combine the annotations somehow.
akces_gec.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from copy import deepcopy
3
+
4
+ import datasets
5
+
6
+
7
+ _CITATION = """\
8
+ @article{naplava2019wnut,
9
+ title={Grammatical Error Correction in Low-Resource Scenarios},
10
+ author={N{\'a}plava, Jakub and Straka, Milan},
11
+ journal={arXiv preprint arXiv:1910.00353},
12
+ year={2019}
13
+ }
14
+ """
15
+
16
+ _DESCRIPTION = """\
17
+ AKCES-GEC is a grammar error correction corpus for Czech generated from a subset of AKCES resources.
18
+ """
19
+
20
+ _HOMEPAGE = "https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-3057"
21
+
22
+ _LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
23
+
24
+ _URLS = {
25
+ "akces_gec": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3057/AKCES-GEC.zip"
26
+ }
27
+
28
+
29
+ class AkcesGEC(datasets.GeneratorBasedBuilder):
30
+ """AKCES-GEC dataset for grammatical error correction. """
31
+
32
+ VERSION = datasets.Version("1.0.0")
33
+
34
+ BUILDER_CONFIGS = [
35
+ datasets.BuilderConfig(name="ann0", version=VERSION, description="Use annotations from annotator#0"),
36
+ datasets.BuilderConfig(name="ann1", version=VERSION, description="Use annotations from annotator#1")
37
+ ]
38
+
39
+ DEFAULT_CONFIG_NAME = "ann0"
40
+
41
+ def _info(self):
42
+ features = datasets.Features(
43
+ {
44
+ "src_tokens": datasets.Sequence(datasets.Value("string")),
45
+ "tgt_tokens": datasets.Sequence(datasets.Value("string")),
46
+ "corrections": [{
47
+ "idx_src": datasets.Sequence(datasets.Value("int32")),
48
+ "idx_tgt": datasets.Sequence(datasets.Value("int32")),
49
+ "corr_types": datasets.Sequence(datasets.Value("string"))
50
+ }]
51
+ }
52
+ )
53
+
54
+ return datasets.DatasetInfo(
55
+ description=_DESCRIPTION,
56
+ features=features,
57
+ homepage=_HOMEPAGE,
58
+ license=_LICENSE,
59
+ citation=_CITATION,
60
+ )
61
+
62
+ def _split_generators(self, dl_manager):
63
+ urls = _URLS["akces_gec"]
64
+ data_dir = dl_manager.download_and_extract(urls)
65
+
66
+ consider_annotator = 0 if self.config.name == "ann0" else 1
67
+ return [
68
+ datasets.SplitGenerator(
69
+ name=datasets.Split.TRAIN,
70
+ gen_kwargs={"file_path": os.path.join(data_dir, "train", "train.all.m2"), "annotator": consider_annotator},
71
+ ),
72
+ datasets.SplitGenerator(
73
+ name=datasets.Split.VALIDATION,
74
+ gen_kwargs={"file_path": os.path.join(data_dir, "dev", "dev.all.m2"), "annotator": consider_annotator},
75
+ ),
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.TEST,
78
+ gen_kwargs={"file_path": os.path.join(data_dir, "test", "test.all.m2"), "annotator": consider_annotator},
79
+ ),
80
+ ]
81
+
82
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
83
+ def _generate_examples(self, file_path, annotator=0):
84
+ skip_edits = {"noop", "UNK", "Um"}
85
+ with open(file_path, "r", encoding="utf-8") as f:
86
+ idx_ex = 0
87
+ src_sent, tgt_sent, corrections, offset = None, None, [], 0
88
+ for idx_line, _line in enumerate(f):
89
+ line = _line.strip()
90
+
91
+ if len(line) > 0:
92
+ prefix, remainder = line[0], line[2:]
93
+ if prefix == "S":
94
+ src_sent = remainder.split(" ")
95
+ tgt_sent = deepcopy(src_sent)
96
+
97
+ elif prefix == "A":
98
+ annotation_data = remainder.split("|||")
99
+ idx_start, idx_end = map(int, annotation_data[0].split(" "))
100
+ edit_types, edit_text = annotation_data[1], annotation_data[2]
101
+ edit_types = edit_types.split(",")
102
+ if len(set(edit_types) & skip_edits) > 0:
103
+ continue
104
+
105
+ formatted_correction = {
106
+ "idx_src": list(range(idx_start, idx_end)),
107
+ "idx_tgt": [],
108
+ "corr_types": edit_types
109
+ }
110
+ annotator_id = int(annotation_data[-1])
111
+ if annotator_id != annotator:
112
+ continue
113
+
114
+ removal = len(edit_text) == 0 or edit_text == "-NONE-"
115
+ if removal:
116
+ for idx_to_remove in range(idx_start, idx_end):
117
+ del tgt_sent[offset + idx_to_remove]
118
+ offset -= 1
119
+
120
+ else: # replacement/insertion
121
+ edit_tokens = edit_text.split(" ")
122
+ len_diff = len(edit_tokens) - (idx_end - idx_start)
123
+
124
+ formatted_correction["idx_tgt"] = list(
125
+ range(offset + idx_start, offset + idx_end + len_diff))
126
+ tgt_sent[offset + idx_start: offset + idx_end] = edit_tokens
127
+ offset += len_diff
128
+
129
+ corrections.append(formatted_correction)
130
+
131
+ else: # empty line, indicating end of example
132
+ if src_sent is None and tgt_sent is None: # multiple empty lines
133
+ continue
134
+
135
+ yield idx_ex, {
136
+ "src_tokens": src_sent,
137
+ "tgt_tokens": tgt_sent,
138
+ "corrections": corrections
139
+ }
140
+ src_sent, tgt_sent, corrections, offset = None, None, [], 0
141
+ idx_ex += 1