facial_keypoint_detection / facial_keypoint_detection.py
Vadzim Kashko
refactor: remove useless
250cd79
raw
history blame
No virus
2.78 kB
import datasets
import pandas as pd
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {facial_keypoint_detection},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
The dataset is designed for computer vision and machine learning tasks
involving the identification and analysis of key points on a human face.
It consists of images of human faces, each accompanied by key point
annotations in XML format.
"""
_NAME = 'facial_keypoint_detection'
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
_LICENSE = "cc-by-nc-nd-4.0"
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
class FacialKeypointDetection(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(description=_DESCRIPTION,
features=datasets.Features({
'image_id': datasets.Value('uint32'),
'image': datasets.Image(),
'mask': datasets.Image(),
'key_points': datasets.Value('string')
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE)
def _split_generators(self, dl_manager):
images = dl_manager.download(f"{_DATA}images.tar.gz")
masks = dl_manager.download(f"{_DATA}masks.tar.gz")
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
images = dl_manager.iter_archive(images)
masks = dl_manager.iter_archive(masks)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={
"images": images,
"masks": masks,
'annotations': annotations
}),
]
def _generate_examples(self, images, masks, annotations):
annotations_df = pd.read_csv(annotations, sep=',')
for idx, ((image_path, image),
(mask_path, mask)) in enumerate(zip(images, masks)):
yield idx, {
'image_id': annotations_df['image_id'].iloc[idx],
"image": {
"path": image_path,
"bytes": image.read()
},
"mask": {
"path": mask_path,
"bytes": mask.read()
},
'key_points': annotations_df['key_points'].iloc[idx]
}