Gaëtan Caillaut commited on
Commit
0198e1b
1 Parent(s): 82b42eb

added 'entities' dataset

Browse files
Files changed (2) hide show
  1. .gitattributes +1 -1
  2. frwiki_good_pages_el.py +77 -31
.gitattributes CHANGED
@@ -25,4 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- data/good-pages/scrapped/final-dataset.csv filter=lfs diff=lfs merge=lfs -text
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ data.tar.gz filter=lfs diff=lfs merge=lfs -text
frwiki_good_pages_el.py CHANGED
@@ -57,19 +57,25 @@ _HOMEPAGE = "https://github.com/GaaH/frwiki_good_pages_el"
57
  # TODO: Add the licence for the dataset here if you can find it
58
  _LICENSE = ""
59
 
60
- # TODO: Add link to the official dataset URLs here
61
- # The HuggingFace dataset library don't host the datasets but only point to the original files
62
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
63
  _URLs = {
64
  "frwiki": "data.tar.gz",
 
65
  }
66
 
67
- _CLASS_LABELS = [
68
  "B",
69
  "I",
70
  "O",
71
  ]
72
 
 
 
 
 
 
 
 
 
73
 
74
  def text_to_el_features(doc_qid, doc_title, text, title2qid, title2wikipedia, title2wikidata):
75
  res = {
@@ -176,6 +182,8 @@ class FrWikiGoodPagesELDataset(datasets.GeneratorBasedBuilder):
176
  BUILDER_CONFIGS = [
177
  datasets.BuilderConfig(name="frwiki", version=VERSION,
178
  description="The frwiki dataset for Entity Linking"),
 
 
179
  ]
180
 
181
  # It's not mandatory to have a default configuration. Just use one if it make sense.
@@ -189,10 +197,21 @@ class FrWikiGoodPagesELDataset(datasets.GeneratorBasedBuilder):
189
  "words": [datasets.Value("string")],
190
  "wikipedia": [datasets.Value("string")],
191
  "wikidata": [datasets.Value("string")],
192
- "labels": [datasets.ClassLabel(names=_CLASS_LABELS)],
193
  "titles": [datasets.Value("string")],
194
  "qids": [datasets.Value("string")],
195
  })
 
 
 
 
 
 
 
 
 
 
 
196
 
197
  return datasets.DatasetInfo(
198
  # This is the description that will appear on the datasets page.
@@ -241,31 +260,58 @@ class FrWikiGoodPagesELDataset(datasets.GeneratorBasedBuilder):
241
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
242
  # The `key` is here for legacy reason (tfds) and is not important in itself.
243
 
244
- with open(Path(data_dir, "list-good-pages.txt"), "rt", encoding="UTF-8") as f:
245
- good_pages_list = set(f.read().split("\n")).difference("")
246
-
247
  entities_path = Path(data_dir, "entities.jsonl.gz")
248
  corpus_path = Path(data_dir, "corpus.jsonl.gz")
249
- title2wikipedia = {}
250
- title2wikidata = {}
251
- title2qid = {}
252
- with gzip.open(entities_path, "rt", encoding="UTF-8") as ent_file:
253
- for line in ent_file:
254
- item = json.loads(line, parse_int=lambda x: x,
255
- parse_float=lambda x: x, parse_constant=lambda x: x)
256
- title = item["title"]
257
- title2wikipedia[title] = item["wikipedia_description"]
258
- title2wikidata[title] = item["wikidata_description"]
259
- title2qid[title] = item["qid"]
260
-
261
- with gzip.open(corpus_path, "rt", encoding="UTF-8") as crps_file:
262
- for id, line in enumerate(crps_file):
263
- item = json.loads(line, parse_int=lambda x: x,
264
- parse_float=lambda x: x, parse_constant=lambda x: x)
265
- qid = item["qid"]
266
- title = item["title"]
267
- text = item["text"]
268
-
269
- features = text_to_el_features(
270
- qid, title, text, title2qid, title2wikipedia, title2wikidata)
271
- yield id, features
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # TODO: Add the licence for the dataset here if you can find it
58
  _LICENSE = ""
59
 
 
 
 
60
  _URLs = {
61
  "frwiki": "data.tar.gz",
62
+ "entities": "data.tar.gz",
63
  }
64
 
65
+ _NER_CLASS_LABELS = [
66
  "B",
67
  "I",
68
  "O",
69
  ]
70
 
71
+ _ENTITY_TYPES = [
72
+ "DATE",
73
+ "PERSON",
74
+ "GEOLOC",
75
+ "ORG",
76
+ "OTHER",
77
+ ]
78
+
79
 
80
  def text_to_el_features(doc_qid, doc_title, text, title2qid, title2wikipedia, title2wikidata):
81
  res = {
 
182
  BUILDER_CONFIGS = [
183
  datasets.BuilderConfig(name="frwiki", version=VERSION,
184
  description="The frwiki dataset for Entity Linking"),
185
+ datasets.BuilderConfig(name="entities", version=VERSION,
186
+ description="Entities and their descriptions"),
187
  ]
188
 
189
  # It's not mandatory to have a default configuration. Just use one if it make sense.
 
197
  "words": [datasets.Value("string")],
198
  "wikipedia": [datasets.Value("string")],
199
  "wikidata": [datasets.Value("string")],
200
+ "labels": [datasets.ClassLabel(names=_NER_CLASS_LABELS)],
201
  "titles": [datasets.Value("string")],
202
  "qids": [datasets.Value("string")],
203
  })
204
+ elif self.config.name == "entities":
205
+ features = datasets.Features({
206
+ "qid": datasets.Value("string"),
207
+ "title": datasets.Value("string"),
208
+ "url": datasets.Value("string"),
209
+ "label": datasets.Value("string"),
210
+ "aliases": [datasets.Value("string")],
211
+ "type": datasets.ClassLabel(names=_ENTITY_TYPES),
212
+ "wikipedia": datasets.Value("string"),
213
+ "wikidata": datasets.Value("string"),
214
+ })
215
 
216
  return datasets.DatasetInfo(
217
  # This is the description that will appear on the datasets page.
 
260
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
261
  # The `key` is here for legacy reason (tfds) and is not important in itself.
262
 
 
 
 
263
  entities_path = Path(data_dir, "entities.jsonl.gz")
264
  corpus_path = Path(data_dir, "corpus.jsonl.gz")
265
+
266
+ def _identiy(x):
267
+ return x
268
+
269
+ if self.config.name == "frwiki":
270
+ title2wikipedia = {}
271
+ title2wikidata = {}
272
+ title2qid = {}
273
+ with gzip.open(entities_path, "rt", encoding="UTF-8") as ent_file:
274
+ for line in ent_file:
275
+ item = json.loads(
276
+ line, parse_int=_identiy, parse_float=_identiy, parse_constant=_identiy)
277
+ title = item["title"]
278
+ title2wikipedia[title] = item["wikipedia_description"]
279
+ title2wikidata[title] = item["wikidata_description"]
280
+ title2qid[title] = item["qid"]
281
+
282
+ with gzip.open(corpus_path, "rt", encoding="UTF-8") as crps_file:
283
+ for id, line in enumerate(crps_file):
284
+ item = json.loads(line, parse_int=lambda x: x,
285
+ parse_float=lambda x: x, parse_constant=lambda x: x)
286
+ qid = item["qid"]
287
+ title = item["title"]
288
+ text = item["text"]
289
+
290
+ features = text_to_el_features(
291
+ qid, title, text, title2qid, title2wikipedia, title2wikidata)
292
+ yield id, features
293
+ elif self.config.name == "entities":
294
+ entity_pattern = r"\[E=(.+?)\](.+?)\[/E\]"
295
+ with gzip.open(entities_path, "rt", encoding="UTF-8") as ent_file:
296
+ for id, line in enumerate(ent_file):
297
+ item = json.loads(
298
+ line, parse_int=_identiy, parse_float=_identiy, parse_constant=_identiy)
299
+ try:
300
+ qid = item["qid"]
301
+ item["wikipedia"] = re.sub(
302
+ entity_pattern,
303
+ r"\2",
304
+ item.pop("wikipedia_description")
305
+ )
306
+ item["wikidata"] = item.pop("wikidata_description")
307
+ if qid is None or qid == "" or item["type"] not in _ENTITY_TYPES:
308
+ item["qid"] = ""
309
+ item["wikidata"] = ""
310
+ item["label"] = ""
311
+ item["aliases"] = []
312
+ item["type"] = "OTHER"
313
+ yield id, item
314
+ except:
315
+ import sys
316
+ print(item, file=sys.stderr)
317
+ return