import csv import json import os from copy import deepcopy import datasets _CITATION = """\ @InProceedings{boyd2018wnut, author = {Adriane Boyd}, title = {Using Wikipedia Edits in Low Resource Grammatical Error Correction}, booktitle = {Proceedings of the 4th Workshop on Noisy User-generated Text}, publisher = {Association for Computational Linguistics}, year = {2018}, url = {http://aclweb.org/anthology/W18-6111} } """ _DESCRIPTION = """\ Falko-MERLIN is a grammatical error correction corpus consisting of essays and exams. """ _HOMEPAGE = "https://github.com/adrianeboyd/boyd-wnut2018" _LICENSE = "Creative Commons Attribution Share Alike 4.0 International" _URLS = { "falko_merlin_wikipedia": "http://www.sfs.uni-tuebingen.de/~adriane/download/wnut2018/data.tar.gz" } class FalkoMERLIN(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") def _info(self): features = datasets.Features( { "src_tokens": datasets.Sequence(datasets.Value("string")), "tgt_tokens": datasets.Sequence(datasets.Value("string")), "corrections": [{ "idx_src": datasets.Sequence(datasets.Value("int32")), "idx_tgt": datasets.Sequence(datasets.Value("int32")), "corr_type": datasets.Value("string") }] } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): urls = _URLS["falko_merlin_wikipedia"] data_dir = dl_manager.download_and_extract(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"file_path": os.path.join(data_dir, "data", "fm-train.m2")}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"file_path": os.path.join(data_dir, "data", "fm-dev.m2")}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"file_path": os.path.join(data_dir, "data", "fm-test.m2")} ) ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, file_path): skip_edits = {"noop", "UNK", "Um"} with open(file_path, "r", encoding="utf-8") as f: idx_ex = 0 src_sent, tgt_sent, corrections, offset = None, None, [], 0 for idx_line, _line in enumerate(f): line = _line.strip() if len(line) > 0: prefix, remainder = line[0], line[2:] if prefix == "S": src_sent = remainder.split(" ") tgt_sent = deepcopy(src_sent) elif prefix == "A": annotation_data = remainder.split("|||") idx_start, idx_end = map(int, annotation_data[0].split(" ")) edit_type, edit_text = annotation_data[1], annotation_data[2] if edit_type in skip_edits: continue formatted_correction = { "idx_src": list(range(idx_start, idx_end)), "idx_tgt": [], "corr_type": edit_type } annotator_id = int(annotation_data[-1]) assert annotator_id == 0 removal = len(edit_text) == 0 or edit_text == "-NONE-" if removal: for idx_to_remove in range(idx_start, idx_end): del tgt_sent[offset + idx_to_remove] offset -= 1 else: # replacement/insertion edit_tokens = edit_text.split(" ") len_diff = len(edit_tokens) - (idx_end - idx_start) formatted_correction["idx_tgt"] = list( range(offset + idx_start, offset + idx_end + len_diff)) tgt_sent[offset + idx_start: offset + idx_end] = edit_tokens offset += len_diff corrections.append(formatted_correction) else: # empty line, indicating end of example yield idx_ex, { "src_tokens": src_sent, "tgt_tokens": tgt_sent, "corrections": corrections } src_sent, tgt_sent, corrections, offset = None, None, [], 0 idx_ex += 1