zhuchi76 commited on
Commit
dca63fe
1 Parent(s): 65ff9ab

Update script to hub

Browse files
Files changed (1) hide show
  1. Boat_dataset.py +34 -70
Boat_dataset.py CHANGED
@@ -27,7 +27,7 @@ import datasets
27
  _CITATION = """\
28
  @InProceedings{huggingface:dataset,
29
  title = {Boat dataset},
30
- author={huggingface, Inc.
31
  },
32
  year={2024}
33
  }
@@ -36,11 +36,11 @@ year={2024}
36
  # Add description of the dataset here
37
  # You can copy an official description
38
  _DESCRIPTION = """\
39
- This new dataset is designed to solve this great object detection task.
40
  """
41
 
42
  # Add a link to an official homepage for the dataset here
43
- _HOMEPAGE = ""
44
 
45
  # Add the licence for the dataset here if you can find it
46
  _LICENSE = ""
@@ -49,112 +49,76 @@ _LICENSE = ""
49
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
  _URLS = {
52
- "Boat_dataset": "ftp://arg.lab.nycu.edu.tw/arg-projectfile-download/detr/dataset/annotations.zip",
 
 
 
 
 
53
  }
54
 
55
-
56
-
57
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
58
  class BoatDataset(datasets.GeneratorBasedBuilder):
59
- """TODO: Short description of my dataset."""
60
 
61
  VERSION = datasets.Version("1.1.0")
62
 
63
- # This is an example of a dataset with multiple configurations.
64
- # If you don't want/need to define several sub-sets in your dataset,
65
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
66
-
67
- # If you need to make complex sub-parts in the datasets with configurable options
68
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
69
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
70
-
71
- # You will be able to load one or the other configurations in the following list with
72
- # data = datasets.load_dataset('my_dataset', 'first_domain')
73
- # data = datasets.load_dataset('my_dataset', 'second_domain')
74
- BUILDER_CONFIGS = [
75
- datasets.BuilderConfig(name="Boat_dataset", version=VERSION, description="Images of real and virtual boats."),
76
- ]
77
-
78
- DEFAULT_CONFIG_NAME = "Boat_dataset" # It's not mandatory to have a default configuration. Just use one if it make sense.
79
-
80
  def _info(self):
81
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
82
-
83
- objects = datasets.Features({
84
- 'id': datasets.Sequence(datasets.Value('int32')),
85
- 'area': datasets.Sequence(datasets.Value('float32')),
86
- 'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('float32'), length=4)), # [x, y, width, height]
87
- 'category': datasets.Sequence(datasets.Value('int32'))
88
- })
89
-
90
  features=datasets.Features({
91
  'image_id': datasets.Value('int32'),
92
- # 'image': datasets.Image(), # This is commented out because you can't directly store PIL images in the dataset.
93
- 'image_path': datasets.Value('string'), # Store the path to the image file instead.
94
  'width': datasets.Value('int32'),
95
  'height': datasets.Value('int32'),
96
- 'objects': objects,
 
 
 
 
 
97
  })
98
 
99
  return datasets.DatasetInfo(
100
- # This is the description that will appear on the datasets page.
101
  description=_DESCRIPTION,
102
- # This defines the different columns of the dataset and their types
103
- features=features, # Here we define them above because they are different between the two configurations
104
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
105
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
106
- # supervised_keys=("sentence", "label"),
107
- # Homepage of the dataset for documentation
108
  homepage=_HOMEPAGE,
109
- # License for the dataset if available
110
  license=_LICENSE,
111
- # Citation for the dataset
112
  citation=_CITATION,
113
  )
114
 
115
  def _split_generators(self, dl_manager):
116
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
117
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
118
 
119
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
120
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
121
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
122
- urls = _URLS[self.config.name]
123
- data_dir = dl_manager.download_and_extract(urls)
124
  return [
125
  datasets.SplitGenerator(
126
  name=datasets.Split.TRAIN,
127
- # These kwargs will be passed to _generate_examples
128
  gen_kwargs={
129
- "filepath": os.path.join(data_dir, "instances_train2023.jsonl"),
130
- "split": "train",
131
- },
 
132
  ),
133
  datasets.SplitGenerator(
134
  name=datasets.Split.VALIDATION,
135
- # These kwargs will be passed to _generate_examples
136
  gen_kwargs={
137
- "filepath": os.path.join(data_dir, "instances_val2023.jsonl"),
138
- "split": "val",
139
- },
 
140
  ),
141
  datasets.SplitGenerator(
142
  name=datasets.Split.TEST,
143
- # These kwargs will be passed to _generate_examples
144
  gen_kwargs={
145
- "filepath": os.path.join(data_dir, "instances_val2023r.jsonl"),
146
- "split": "val_real",
147
- },
148
- ),
 
149
  ]
150
 
151
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
152
- def _generate_examples(self, filepath, split):
153
- with open(filepath, encoding="utf-8") as f:
154
  for key, row in enumerate(f):
155
  try:
156
  data = json.loads(row.strip())
157
- # Proceed to use 'data' for generating examples
158
  yield key, {
159
  "image_id": data["image_id"],
160
  "image_path": data["image_path"],
 
27
  _CITATION = """\
28
  @InProceedings{huggingface:dataset,
29
  title = {Boat dataset},
30
+ author={Tzu-Chi Chen, Inc.
31
  },
32
  year={2024}
33
  }
 
36
  # Add description of the dataset here
37
  # You can copy an official description
38
  _DESCRIPTION = """\
39
+ This dataset is designed to solve object detection task.
40
  """
41
 
42
  # Add a link to an official homepage for the dataset here
43
+ _HOMEPAGE = "https://huggingface.co/datasets/zhuchi76/Boat_dataset"
44
 
45
  # Add the licence for the dataset here if you can find it
46
  _LICENSE = ""
 
49
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
  _URLS = {
52
+ "images": f"{_HOMEPAGE}/data/images.tar.gz",
53
+ "anno": {
54
+ "train": f"{_HOMEPAGE}/data/instances_train2023.jsonl",
55
+ "val": f"{_HOMEPAGE}/data/instances_val2023.jsonl",
56
+ "test": f"{_HOMEPAGE}/data/instances_val2023r.jsonl"
57
+ },
58
  }
59
 
 
 
 
60
  class BoatDataset(datasets.GeneratorBasedBuilder):
 
61
 
62
  VERSION = datasets.Version("1.1.0")
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  def _info(self):
 
 
 
 
 
 
 
 
 
65
  features=datasets.Features({
66
  'image_id': datasets.Value('int32'),
67
+ 'image_path': datasets.Value('string'),
 
68
  'width': datasets.Value('int32'),
69
  'height': datasets.Value('int32'),
70
+ 'objects': datasets.Features({
71
+ 'id': datasets.Sequence(datasets.Value('int32')),
72
+ 'area': datasets.Sequence(datasets.Value('float32')),
73
+ 'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('float32'), length=4)), # [x, y, width, height]
74
+ 'category': datasets.Sequence(datasets.Value('int32'))
75
+ }),
76
  })
77
 
78
  return datasets.DatasetInfo(
 
79
  description=_DESCRIPTION,
80
+ features=features,
 
 
 
 
 
81
  homepage=_HOMEPAGE,
 
82
  license=_LICENSE,
 
83
  citation=_CITATION,
84
  )
85
 
86
  def _split_generators(self, dl_manager):
87
+ downloaded_files = dl_manager.download_and_extract(_URLS)
 
88
 
89
+ image_dir = dl_manager.extract(downloaded_files['images'])
 
 
 
 
90
  return [
91
  datasets.SplitGenerator(
92
  name=datasets.Split.TRAIN,
 
93
  gen_kwargs={
94
+ 'image_dir': image_dir,
95
+ 'annotations_file': downloaded_files['anno']['train'],
96
+ 'split': 'train'
97
+ }
98
  ),
99
  datasets.SplitGenerator(
100
  name=datasets.Split.VALIDATION,
 
101
  gen_kwargs={
102
+ 'image_dir': image_dir,
103
+ 'annotations_file': downloaded_files['anno']['val'],
104
+ 'split': 'val'
105
+ }
106
  ),
107
  datasets.SplitGenerator(
108
  name=datasets.Split.TEST,
 
109
  gen_kwargs={
110
+ 'image_dir': image_dir,
111
+ 'annotations_file': downloaded_files['anno']['test'],
112
+ 'split': 'val_real'
113
+ }
114
+ )
115
  ]
116
 
117
+ def _generate_examples(self, image_dir, annotations_file, split):
118
+ with open(annotations_file, encoding="utf-8") as f:
 
119
  for key, row in enumerate(f):
120
  try:
121
  data = json.loads(row.strip())
 
122
  yield key, {
123
  "image_id": data["image_id"],
124
  "image_path": data["image_path"],