Thibault Clérice commited on
Commit
6a59d9d
0 Parent(s):

2024.07.17 Release

Browse files
.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ data.tar.gz filter=lfs diff=lfs merge=lfs -text
2
+ *.parquet filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ env
2
+ .idea
3
+ *.json
4
+ *.arrow
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - object-detection
4
+ version: 2024.07.17
5
+ license: cc-by-4.0
6
+ pretty_name: LADaS
7
+ size_categories:
8
+ - 1K<n<10K
9
+ ---
10
+
11
+ # LADaS: Layout Analysis Dataset with Segmonto
12
+
13
+ ## Dataset Details
14
+
15
+ LADaS, created by the [ALMANaCH team-project](https://almanach.inria.fr/index-en.html) at Inria,
16
+ continued in partnership with other researchers, is a multidocuments diachronic layout analysis
17
+ dataset. This dataset includes:
18
+
19
+ - Monographs from the Bibliothèque Nationale de France (17th century - today);
20
+ - PhD Thesis, in various fields (not only STEM, 20th-21st century);
21
+ - Selling Catalogs (for manuscripts and art pieces), in various fields (18th-20th century);
22
+ - Noisy digitization (with fingers for example, 20th-21st century);
23
+ - Academic papers (mostly Humanities and Social Sciences) (19th-21st century);
24
+ - Magazines about technologies and video games, from 1920s to 2010;
25
+ - Misc stuff found here and there.
26
+
27
+ The data are in YoloV8 bbox format (center_x center_y width height).
28
+
29
+ The script in document is mostly Latin script, and language is mostly French with some representation of the main
30
+ western academic languages.
31
+
32
+ ### Annotations
33
+
34
+ Label Annotation have been conducted using the [SegmOnto](https://segmonto.github.io/) vocabulary.
35
+ An annotation guide is available [here](https://github.com/DEFI-COLaF/LADaS/blob/25d4cf3f850cf79d18af572153cfbc73deff4160/AnnotationGuide.md).
36
+
37
+
38
+ ### Dataset Description
39
+
40
+
41
+ - **Curated by:** Thibault Clérice & Juliette Janès
42
+ - **Funded by:** Défi COLaF, Inria
43
+ - **License:** CC-BY
44
+
45
+ ## Uses
46
+
47
+ ### Direct Use
48
+
49
+ - Layout Analysis
50
+
51
+ ## Dataset Structure
52
+
53
+ - Data contains the main `split` that is loaded through `load_dataset("CATMuS/medieval")`
54
+ - Data can be split with each manuscript inside train, val and test using the `gen_split` columns which results in a 90/5/5 split
55
+ - The image is in the `im` column, and the text in the `text` column
56
+
57
+ ### Annotations [optional]
58
+
59
+ #### Annotation process
60
+
61
+ The annotation process is described in the [dataset paper](https://inria.hal.science/hal-04453952).
62
+
63
+ #### Who are the annotators?
64
+
65
+
66
+ ## Citation
67
+
68
+ **BibTeX:**
69
+
70
+ ```tex
71
+ @misc{Clerice_Layout_Analysis_Dataset,
72
+ author = {Clérice, Thibault and Janès, Juliette and Scheithauer, Hugo and Bénière, Sarah and Langlais, Pierre-Carl and Romary, Laurent and Sagot, Benoit and Bougrelle, Roxane},
73
+ title = {{Layout Analysis Dataset with SegmOnto (LADaS)}},
74
+ url = {https://github.com/DEFI-COLaF/LADaS}
75
+ }
76
+ ```
77
+
78
+ ## Dataset Card Contact
79
+
80
+ Thibault Clérice or Juliette Janes (first.last@inria.fr)
build.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datasets import load_dataset
3
+ from datasets import config
4
+ from datasets.utils.py_utils import convert_file_size_to_int
5
+ from datasets.table import embed_table_storage
6
+ from tqdm import tqdm
7
+
8
+
9
+ def build_parquet(split):
10
+ # Source: https://discuss.huggingface.co/t/how-to-save-audio-dataset-with-parquet-format-on-disk/66179
11
+ dataset = load_dataset("./src/LADaS.py", split=split, trust_remote_code=True)
12
+ max_shard_size = '500MB'
13
+
14
+ dataset_nbytes = dataset._estimate_nbytes()
15
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
16
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
17
+ num_shards = max(num_shards, 1)
18
+ shards = (dataset.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
19
+
20
+ def shards_with_embedded_external_files(shards):
21
+ for shard in shards:
22
+ format = shard.format
23
+ shard = shard.with_format("arrow")
24
+ shard = shard.map(
25
+ embed_table_storage,
26
+ batched=True,
27
+ batch_size=1000,
28
+ keep_in_memory=True,
29
+ )
30
+ shard = shard.with_format(**format)
31
+ yield shard
32
+
33
+ shards = shards_with_embedded_external_files(shards)
34
+
35
+ os.makedirs("data", exist_ok=True)
36
+
37
+ for index, shard in tqdm(
38
+ enumerate(shards),
39
+ desc="Save the dataset shards",
40
+ total=num_shards,
41
+ ):
42
+ shard_path = f"data/{split}-{index:05d}-of-{num_shards:05d}.parquet"
43
+ shard.to_parquet(shard_path)
44
+
45
+
46
+ if __name__ == "__main__":
47
+ build_parquet("train")
48
+ build_parquet("validation")
49
+ build_parquet("test")
50
+
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f8d4c8d639b746fef3265a8f17ade95ebbe91a71f80803343754b82401baff4
3
+ size 109132271
data/train-00000-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50510535956adfcce298f3666a6fe04485a1b6bfc43753b8ca9254fb1086f771
3
+ size 520039514
data/train-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15f15a34695f67ff0457efcf7339f1225e36b697c53537641b5785d0eaaa4037
3
+ size 587754651
data/train-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7316055009e5773364ddbd7cf47b6dd4cbff4b7ff03620368fda3e6e3a5702a
3
+ size 154107108
data/train-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e07ed6a4a4a37ef5107bbb92448497f8fe1a5f634b5c7f53e6a3dea0232ab4e3
3
+ size 152476613
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da1a0c88505d6de40c42452a82c9e9f1e9bdf67631b207c04e8ed7b445d3d4ad
3
+ size 258990056
src/LADaS.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ import datasets
4
+ from PIL import Image
5
+ import csv
6
+
7
+
8
+ _VERSION = "2024-07-17"
9
+ _URL = f"https://github.com/DEFI-COLaF/LADaS/archive/refs/tags/{_VERSION}.tar.gz"
10
+ _HOMEPAGE = "https://github.com/DEFI-COLaF/LADaS"
11
+ _LICENSE = "CC BY 4.0"
12
+ _CITATION = """\
13
+ @misc{Clerice_Layout_Analysis_Dataset,
14
+ author = {Clérice, Thibault and Janès, Juliette and Scheithauer, Hugo and Bénière, Sarah and Romary, Laurent and Sagot, Benoit and Bougrelle, Roxane},
15
+ title = {{Layout Analysis Dataset with SegmOnto (LADaS)}},
16
+ url = {https://github.com/DEFI-COLaF/LADaS}
17
+ }
18
+ """
19
+
20
+ _CATEGORIES: list[str] = ["AdvertisementZone", "DigitizationArtefactZone", "DropCapitalZone", "FigureZone",
21
+ "FigureZone-FigDesc", "FigureZone-Head", "GraphicZone", "GraphicZone-Decoration",
22
+ "GraphicZone-FigDesc", "GraphicZone-Head", "GraphicZone-Maths", "GraphicZone-Part",
23
+ "GraphicZone-TextualContent", "MainZone-Date", "MainZone-Entry", "MainZone-Entry-Continued",
24
+ "MainZone-Form", "MainZone-Head", "MainZone-Lg", "MainZone-Lg-Continued", "MainZone-List",
25
+ "MainZone-List-Continued", "MainZone-Other", "MainZone-P", "MainZone-P-Continued",
26
+ "MainZone-Signature", "MainZone-Sp", "MainZone-Sp-Continued",
27
+ "MarginTextZone-ManuscriptAddendum", "MarginTextZone-Notes", "MarginTextZone-Notes-Continued",
28
+ "NumberingZone", "TitlePageZone", "TitlePageZone-Index", "QuireMarksZone", "RunningTitleZone",
29
+ "StampZone", "StampZone-Sticker", "TableZone", "TableZone-Continued", "TableZone-Head"]
30
+
31
+
32
+ class LadasConfig(datasets.BuilderConfig):
33
+ """Builder Config for LADaS"""
34
+ def __init__(self, *args, **kwargs):
35
+ super().__init__(*args, **kwargs)
36
+
37
+
38
+ class LadasDataset(datasets.GeneratorBasedBuilder):
39
+ VERSION = datasets.Version(_VERSION.replace("-", "."))
40
+ BUILDER_CONFIGS = [
41
+ LadasConfig(
42
+ name="full",
43
+ description="Full version of the dataset"
44
+ )
45
+ ]
46
+
47
+ def _info(self) -> datasets.DatasetInfo:
48
+ features = datasets.Features({
49
+ "image_path": datasets.Value("string"),
50
+ "year": datasets.Value("int32"),
51
+ "dating-certainty": datasets.Value("bool"),
52
+ "set": datasets.Value("string"),
53
+ "image": datasets.Image(),
54
+ "width": datasets.Value("int32"),
55
+ "height": datasets.Value("int32"),
56
+ "objects": datasets.Sequence(
57
+ {
58
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
59
+ "category": datasets.Value("string"),
60
+ }
61
+ )
62
+ })
63
+ return datasets.DatasetInfo(
64
+ features=features,
65
+ homepage=_HOMEPAGE,
66
+ citation=_CITATION,
67
+ license=_LICENSE
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ urls_to_download = _URL
72
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
73
+ return [
74
+ datasets.SplitGenerator(
75
+ name=datasets.Split.TRAIN,
76
+ gen_kwargs={
77
+ "local_dir": downloaded_files,
78
+ "split": "train"
79
+ },
80
+ ),
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.VALIDATION,
83
+ gen_kwargs={
84
+ "local_dir": downloaded_files,
85
+ "split": "valid"
86
+ },
87
+ ),
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TEST,
90
+ gen_kwargs={
91
+ "local_dir": downloaded_files,
92
+ "split": "test"
93
+ },
94
+ ),
95
+ ]
96
+
97
+ def _generate_examples(self, local_dir: str, split: str):
98
+ idx = 0
99
+
100
+ df = {}
101
+ for file in glob.glob(os.path.join(local_dir, "*", "metadata.csv")):
102
+ with open(file) as f:
103
+ reader = csv.DictReader(f)
104
+ for line in reader:
105
+ df[line["file"]] = line
106
+
107
+ for file in glob.glob(os.path.join(local_dir, "*", "data", "*", split, "labels", "*.txt")):
108
+ objects = []
109
+ with open(file) as f:
110
+ for line in f:
111
+ cls, *bbox = line.strip().split()
112
+ objects.append({"category": _CATEGORIES[int(cls)], "bbox": list(map(float, bbox))})
113
+
114
+ image_path = os.path.normpath(file).split(os.sep)
115
+ image_path = os.path.join(*image_path[:-2], "images", image_path[-1].replace(".txt", ".jpg"))
116
+ if file.startswith("/") and not image_path.startswith("/"):
117
+ image_path = "/" + image_path
118
+
119
+ with open(image_path, "rb") as f:
120
+ image_bytes = f.read()
121
+
122
+ with Image.open(image_path) as im:
123
+ width, height = im.size
124
+
125
+ filename = os.path.basename(image_path)
126
+ line = df[filename]
127
+
128
+ yield idx, {
129
+ "image_path": f"{line['subset']}/{filename}",
130
+ "image": {"path": image_path, "bytes": image_bytes},
131
+ "year": line["year"] or None,
132
+ "dating-certainty": line["dating-certainty"],
133
+ "set": line["subset"],
134
+ "width": width,
135
+ "height": height,
136
+ "objects": objects,
137
+ }
138
+ idx += 1
139
+
140
+ if __name__ == "__main__":
141
+ LadasDataset().download_and_prepare()