Gaëtan Caillaut commited on
Commit
579e126
1 Parent(s): ae8acea

Initial Commit

Browse files
Files changed (5) hide show
  1. .gitattributes +2 -0
  2. README.md +71 -0
  3. data/corpus.jsonl.gz +3 -0
  4. data/entities.jsonl.gz +3 -0
  5. frwiki_el.py +208 -0
.gitattributes CHANGED
@@ -35,3 +35,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
38
+ data/corpus.jsonl.gz filter=lfs diff=lfs merge=lfs -text
39
+ data/entities.jsonl.gz filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - crowdsourced
4
+ language_creators:
5
+ - machine-generated
6
+ languages:
7
+ - fr
8
+ - fr-FR
9
+ licenses:
10
+ - wtfpl
11
+ multilinguality:
12
+ - monolingual
13
+ pretty_name: French Wikipedia dataset for Entity Linking
14
+ size_categories:
15
+ - 1M<n<10M
16
+ source_datasets:
17
+ - original
18
+ task_categories:
19
+ - token-classification
20
+ task_ids: []
21
+ ---
22
+
23
+ # Dataset Card for frwiki_good_pages_el
24
+
25
+ ## Dataset Description
26
+
27
+ - Repository: [frwiki_el](https://github.com/GaaH/frwiki_el)
28
+ - Point of Contact: [Gaëtan Caillaut](mailto://g.caillaut@brgm.fr)
29
+
30
+ ### Dataset Summary
31
+
32
+ This dataset contains articles from the French Wikipédia.
33
+ It is intended to be used to train Entity Linking (EL) systems. Links in articles are used to detect named entities.
34
+
35
+ The dataset `frwiki` contains sentences of each Wikipedia pages.
36
+
37
+ The dataset `entities` contains description for each Wikipedia pages.
38
+
39
+ ### Languages
40
+
41
+ - French
42
+
43
+ ## Dataset Structure
44
+
45
+ ### frwiki
46
+
47
+ ```
48
+ {
49
+ "name": "Title of the page",
50
+ "wikidata_id": "Identifier of the related Wikidata entity. Can be null.",
51
+ "wikipedia_id": "Identifier of the Wikipedia page",
52
+ "wikipedia_url": "URL to the Wikipedia page",
53
+ "wikidata_url": "URL to the Wikidata page. Can be null.",
54
+ "words": ["words", "in", "the", "sentence"],
55
+ "ner": ["ner", "labels", "of", "each", "words"],
56
+ "el": ["el", "labels", "of", "each", "words"]
57
+ }
58
+ ```
59
+
60
+ ### entities
61
+
62
+ ```
63
+ {
64
+ "name": "Title of the page",
65
+ "wikidata_id": "Identifier of the related Wikidata entity. Can be null.",
66
+ "wikipedia_id": "Identifier of the Wikipedia page",
67
+ "wikipedia_url": "URL to the Wikipedia page",
68
+ "wikidata_url": "URL to the Wikidata page. Can be null.",
69
+ "description": "Description of the entity"
70
+ }
71
+ ```
data/corpus.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71726bfc2d2af42133e3036f741ea55a1cbf8d1fe87e9dc7c82a15381b1489d6
3
+ size 3047426604
data/entities.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e6cc15e381b296240ea825e1d49d072d6ec61e0395ddf9786bfaf4f2a4dbfb6
3
+ size 238584221
frwiki_el.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import re
19
+ import gzip
20
+ import json
21
+ import datasets
22
+ from pathlib import Path
23
+
24
+
25
+ # TODO: Add BibTeX citation
26
+ # Find for instance the citation on arxiv or on the dataset repo/website
27
+ _CITATION = ""
28
+
29
+ _DESCRIPTION = """\
30
+ French Wikipedia dataset for Entity Linking
31
+ """
32
+
33
+ _HOMEPAGE = "https://github.com/GaaH/frwiki_el"
34
+
35
+ _LICENSE = "WTFPL"
36
+
37
+ _URLs = {
38
+ "frwiki": "data/corpus.jsonl.gz",
39
+ "entities": "data/entities.jsonl.gz",
40
+ }
41
+
42
+ _NER_CLASS_LABELS = [
43
+ "B",
44
+ "I",
45
+ "O",
46
+ ]
47
+
48
+ _ENTITY_TYPES = [
49
+ "DATE",
50
+ "PERSON",
51
+ "GEOLOC",
52
+ "ORG",
53
+ "OTHER",
54
+ ]
55
+
56
+
57
+ def item_to_el_features(item, title2qid):
58
+ res = {
59
+ "title": item['name'].replace("_", " "),
60
+ "wikidata_id": item['wikidata_id'],
61
+ "wikipedia_id": item['wikipedia_id'],
62
+ "wikidata_url": item['wikidata_url'],
63
+ "wikipedia_url": item['wikipedia_url'],
64
+ }
65
+ text_dict = {
66
+ "words": [],
67
+ "ner": [],
68
+ "el": [],
69
+ }
70
+ entity_pattern = r"\[E=(.+?)\](.+?)\[/E\]"
71
+
72
+ # start index of the previous text
73
+ i = 0
74
+ text = item['text']
75
+ for m in re.finditer(entity_pattern, text):
76
+ mention_title = m.group(1)
77
+ mention = m.group(2)
78
+
79
+ mention_qid = title2qid.get(mention_title.replace("_", " "), "unknown")
80
+
81
+ mention_words = mention.split()
82
+
83
+ j = m.start(0)
84
+ prev_text = text[i:j].split()
85
+ len_prev_text = len(prev_text)
86
+ text_dict["words"].extend(prev_text)
87
+ text_dict["ner"].extend(["O"] * len_prev_text)
88
+ text_dict["el"].extend([None] * len_prev_text)
89
+
90
+ text_dict["words"].extend(mention_words)
91
+
92
+ len_mention_tail = len(mention_words) - 1
93
+ text_dict["ner"].extend(["B"] + ["I"] * len_mention_tail)
94
+ text_dict["el"].extend([mention_qid] + [mention_qid] * len_mention_tail)
95
+
96
+ i = m.end(0)
97
+
98
+ tail = text[i:].split()
99
+ len_tail = len(tail)
100
+ text_dict["words"].extend(tail)
101
+ text_dict["ner"].extend(["O"] * len_tail)
102
+ text_dict["el"].extend([None] * len_tail)
103
+ res.update(text_dict)
104
+ return res
105
+
106
+
107
+ class FrwikiElDataset(datasets.GeneratorBasedBuilder):
108
+ """
109
+ """
110
+
111
+ VERSION = datasets.Version("0.1.0")
112
+
113
+ # This is an example of a dataset with multiple configurations.
114
+ # If you don't want/need to define several sub-sets in your dataset,
115
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
116
+
117
+ # If you need to make complex sub-parts in the datasets with configurable options
118
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
119
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
120
+
121
+ # You will be able to load one or the other configurations in the following list with
122
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
123
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
124
+ BUILDER_CONFIGS = [
125
+ datasets.BuilderConfig(name="frwiki", version=VERSION,
126
+ description="The frwiki dataset for Entity Linking"),
127
+ datasets.BuilderConfig(name="entities", version=VERSION,
128
+ description="Entities and their descriptions"),
129
+ ]
130
+
131
+ # It's not mandatory to have a default configuration. Just use one if it make sense.
132
+ DEFAULT_CONFIG_NAME = "frwiki"
133
+
134
+ def _info(self):
135
+ if self.config.name == "frwiki":
136
+ features = datasets.Features({
137
+ "name": datasets.Value("string"),
138
+ "wikidata_id": datasets.Value("string"),
139
+ "wikipedia_id": datasets.Value("string"),
140
+ "wikipedia_url": datasets.Value("string"),
141
+ "wikidata_url": datasets.Value("string"),
142
+ "words": [datasets.Value("string")],
143
+ "ner": [datasets.ClassLabel(names=_NER_CLASS_LABELS)],
144
+ "el": [datasets.Value("string")],
145
+ })
146
+ elif self.config.name == "entities":
147
+ features = datasets.Features({
148
+ "name": datasets.Value("string"),
149
+ "wikidata_id": datasets.Value("string"),
150
+ "wikipedia_id": datasets.Value("string"),
151
+ "wikipedia_url": datasets.Value("string"),
152
+ "wikidata_url": datasets.Value("string"),
153
+ "description": datasets.Value("string"),
154
+ })
155
+
156
+ return datasets.DatasetInfo(
157
+ # This is the description that will appear on the datasets page.
158
+ description=_DESCRIPTION,
159
+ # This defines the different columns of the dataset and their types
160
+ # Here we define them above because they are different between the two configurations
161
+ features=features,
162
+ # If there's a common (input, target) tuple from the features,
163
+ # specify them here. They'll be used if as_supervised=True in
164
+ # builder.as_dataset.
165
+ supervised_keys=None,
166
+ # Homepage of the dataset for documentation
167
+ homepage=_HOMEPAGE,
168
+ # License for the dataset if available
169
+ license=_LICENSE,
170
+ # Citation for the dataset
171
+ citation=_CITATION,
172
+ )
173
+
174
+ def _split_generators(self, dl_manager):
175
+ """Returns SplitGenerators."""
176
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
177
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
178
+
179
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
180
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
181
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
182
+ filepath = _URLs[self.config.name]
183
+ # data_dir = dl_manager.download_and_extract(my_urls)
184
+ return [
185
+ datasets.SplitGenerator(
186
+ name=datasets.Split.TRAIN,
187
+ # These kwargs will be passed to _generate_examples
188
+ gen_kwargs={
189
+ "path": filepath,
190
+ }
191
+ )
192
+ ]
193
+
194
+ def _generate_examples(self, path):
195
+ """ Yields examples as (key, example) tuples. """
196
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
197
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
198
+
199
+ # entities_path = Path(data_dir, "entities.jsonl.gz")
200
+ # corpus_path = Path(data_dir, "corpus.jsonl.gz")
201
+
202
+ def _identiy(x):
203
+ return x
204
+
205
+ with gzip.open(path, "rt", encoding="UTF-8") as crps_file:
206
+ for id, line in enumerate(crps_file):
207
+ item = json.loads(line, parse_int=_identiy, parse_float=_identiy, parse_constant=_identiy)
208
+ yield id, item