jherng commited on
Commit
50d238f
1 Parent(s): 7a6945d

Add transforms and datasets script

Browse files
Files changed (3) hide show
  1. datasets-demo.ipynb +0 -0
  2. rsna_datasets.py +661 -0
  3. rsna_transforms.py +629 -0
datasets-demo.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
rsna_datasets.py ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal, TypedDict
2
+
3
+ import datasets
4
+ import torch
5
+ import monai.transforms
6
+ import torchvision
7
+ from torch.utils.data import IterableDataset
8
+
9
+ import rsna_transforms
10
+
11
+
12
+ class Segmentation3DDataset(IterableDataset):
13
+ def __init__(
14
+ self,
15
+ split: Literal["train", "test"],
16
+ streaming: bool = True,
17
+ volume_transforms: monai.transforms.Compose = None,
18
+ mask_transforms: monai.transforms.Compose = None,
19
+ transform_configs: TypedDict(
20
+ "",
21
+ {
22
+ "crop_strategy": Literal["oversample", "center", "random", "none"],
23
+ "voxel_spacing": tuple[float, float, float],
24
+ "volume_size": tuple[int, int, int],
25
+ "axcodes": str,
26
+ },
27
+ ) = {
28
+ "crop_strategy": "oversample",
29
+ "voxel_spacing": (3.0, 3.0, 3.0),
30
+ "volume_size": (96, 96, 96),
31
+ "axcodes": "RAS",
32
+ },
33
+ test_size: float = 0.1,
34
+ random_state: int = 42,
35
+ ):
36
+ self.hf_dataset = datasets.load_dataset(
37
+ "jherng/rsna-2023-abdominal-trauma-detection",
38
+ "segmentation",
39
+ split=split,
40
+ streaming=streaming,
41
+ num_proc=4
42
+ if not streaming
43
+ else None, # Use multiprocessing if not streaming to download faster
44
+ test_size=test_size,
45
+ random_state=random_state,
46
+ )
47
+
48
+ self.volume_transforms = volume_transforms or rsna_transforms.volume_transforms(
49
+ crop_strategy=transform_configs["crop_strategy"],
50
+ voxel_spacing=transform_configs["voxel_spacing"],
51
+ volume_size=transform_configs["volume_size"],
52
+ axcodes=transform_configs["axcodes"],
53
+ streaming=streaming,
54
+ )
55
+
56
+ self.mask_transforms = mask_transforms or rsna_transforms.mask_transforms(
57
+ crop_strategy=transform_configs["crop_strategy"],
58
+ voxel_spacing=transform_configs["voxel_spacing"],
59
+ volume_size=transform_configs["volume_size"],
60
+ axcodes=transform_configs["axcodes"],
61
+ streaming=streaming,
62
+ )
63
+ self.yield_extra_info = True # For debugging purposes
64
+
65
+ def __iter__(self):
66
+ worker_info = torch.utils.data.get_worker_info()
67
+ worker_id = worker_info.id if worker_info else -1
68
+
69
+ if isinstance(self.hf_dataset, datasets.Dataset):
70
+ start_idx = worker_id if worker_id != -1 else 0
71
+ step_size = worker_info.num_workers if worker_id != -1 else 1
72
+
73
+ for i in range(start_idx, len(self.hf_dataset), step_size):
74
+ data = self.hf_dataset[i]
75
+ yield from self._process_one_sample(data, worker_id=worker_id)
76
+ else:
77
+ for i, data in enumerate(self.hf_dataset):
78
+ yield from self._process_one_sample(data, worker_id=worker_id)
79
+
80
+ def _process_one_sample(self, data, worker_id):
81
+ img_data = self.volume_transforms(
82
+ {"img": data["img_path"], "metadata": data["metadata"]}
83
+ )
84
+ seg_data = self.mask_transforms({"seg": data["seg_path"]})
85
+
86
+ img_data = [img_data] if not isinstance(img_data, (list, tuple)) else img_data
87
+ seg_data = [seg_data] if not isinstance(seg_data, (list, tuple)) else seg_data
88
+
89
+ for img, seg in zip(img_data, seg_data):
90
+ to_yield = {
91
+ "img": img["img"],
92
+ "seg": seg["seg"],
93
+ }
94
+ if self.yield_extra_info:
95
+ to_yield["worker_id"] = worker_id
96
+ to_yield["series_id"] = data["metadata"]["series_id"]
97
+
98
+ yield to_yield
99
+
100
+
101
+ class Classification3DDataset(IterableDataset):
102
+ def __init__(
103
+ self,
104
+ split: Literal["train", "test"],
105
+ streaming: bool = True,
106
+ volume_transforms: monai.transforms.Compose = None,
107
+ transform_configs: TypedDict(
108
+ "",
109
+ {
110
+ "crop_strategy": Literal["oversample", "center", "random", "none"],
111
+ "voxel_spacing": tuple[float, float, float],
112
+ "volume_size": tuple[int, int, int],
113
+ "axcodes": str,
114
+ },
115
+ ) = {
116
+ "crop_strategy": "oversample",
117
+ "voxel_spacing": (3.0, 3.0, 3.0),
118
+ "volume_size": (96, 96, 96),
119
+ "axcodes": "RAS",
120
+ },
121
+ test_size: float = 0.1,
122
+ random_state: int = 42,
123
+ ):
124
+ self.hf_dataset = datasets.load_dataset(
125
+ "jherng/rsna-2023-abdominal-trauma-detection",
126
+ "classification",
127
+ split=split,
128
+ streaming=streaming,
129
+ num_proc=4
130
+ if not streaming
131
+ else None, # Use multiprocessing if not streaming to download faster
132
+ test_size=test_size,
133
+ random_state=random_state,
134
+ )
135
+
136
+ self.volume_transforms = volume_transforms or rsna_transforms.volume_transforms(
137
+ crop_strategy=transform_configs["crop_strategy"],
138
+ voxel_spacing=transform_configs["voxel_spacing"],
139
+ volume_size=transform_configs["volume_size"],
140
+ axcodes=transform_configs["axcodes"],
141
+ streaming=streaming,
142
+ )
143
+
144
+ self.yield_extra_info = True
145
+
146
+ def __iter__(self):
147
+ worker_info = torch.utils.data.get_worker_info()
148
+ worker_id = worker_info.id if worker_info else -1
149
+
150
+ if isinstance(self.hf_dataset, datasets.Dataset):
151
+ start_idx = worker_id if worker_id != -1 else 0
152
+ step_size = worker_info.num_workers if worker_id != -1 else 1
153
+
154
+ for i in range(start_idx, len(self.hf_dataset), step_size):
155
+ data = self.hf_dataset[i]
156
+ yield from self._process_one_sample(data, worker_id=worker_id)
157
+ else:
158
+ for i, data in enumerate(self.hf_dataset):
159
+ yield from self._process_one_sample(data, worker_id=worker_id)
160
+
161
+ def _process_one_sample(self, data, worker_id):
162
+ img_data = self.volume_transforms(
163
+ {"img": data["img_path"], "metadata": data["metadata"]}
164
+ )
165
+ img_data = [img_data] if not isinstance(img_data, (list, tuple)) else img_data
166
+
167
+ for img in img_data:
168
+ to_yield = {
169
+ "img": img["img"],
170
+ "bowel": data["bowel"],
171
+ "extravasation": data["extravasation"],
172
+ "kidney": data["kidney"],
173
+ "liver": data["liver"],
174
+ "spleen": data["spleen"],
175
+ "any_injury": data["any_injury"],
176
+ }
177
+
178
+ if self.yield_extra_info:
179
+ to_yield["worker_id"] = worker_id
180
+ to_yield["series_id"] = data["metadata"]["series_id"]
181
+
182
+ yield to_yield
183
+
184
+
185
+ class MaskedClassification3DDataset(IterableDataset):
186
+ def __init__(
187
+ self,
188
+ split: Literal["train", "test"],
189
+ streaming: bool = True,
190
+ volume_transforms: monai.transforms.Compose = None,
191
+ mask_transforms: monai.transforms.Compose = None,
192
+ transform_configs: TypedDict(
193
+ "",
194
+ {
195
+ "crop_strategy": Literal["oversample", "center", "random", "none"],
196
+ "voxel_spacing": tuple[float, float, float],
197
+ "volume_size": tuple[int, int, int],
198
+ "axcodes": str,
199
+ },
200
+ ) = {
201
+ "crop_strategy": "oversample",
202
+ "voxel_spacing": (3.0, 3.0, 3.0),
203
+ "volume_size": (96, 96, 96),
204
+ "axcodes": "RAS",
205
+ },
206
+ test_size: float = 0.1,
207
+ random_state: int = 42,
208
+ ):
209
+ self.hf_dataset = datasets.load_dataset(
210
+ "jherng/rsna-2023-abdominal-trauma-detection",
211
+ "classification-with-mask",
212
+ split=split,
213
+ streaming=streaming,
214
+ num_proc=4
215
+ if not streaming
216
+ else None, # Use multiprocessing if not streaming to download faster
217
+ test_size=test_size,
218
+ random_state=random_state,
219
+ )
220
+
221
+ self.volume_transforms = volume_transforms or rsna_transforms.volume_transforms(
222
+ crop_strategy=transform_configs["crop_strategy"],
223
+ voxel_spacing=transform_configs["voxel_spacing"],
224
+ volume_size=transform_configs["volume_size"],
225
+ axcodes=transform_configs["axcodes"],
226
+ streaming=streaming,
227
+ )
228
+ self.mask_transforms = mask_transforms or rsna_transforms.mask_transforms(
229
+ crop_strategy=transform_configs["crop_strategy"],
230
+ voxel_spacing=transform_configs["voxel_spacing"],
231
+ volume_size=transform_configs["volume_size"],
232
+ axcodes=transform_configs["axcodes"],
233
+ streaming=streaming,
234
+ )
235
+
236
+ self.yield_extra_info = True
237
+
238
+ def __iter__(self):
239
+ worker_info = torch.utils.data.get_worker_info()
240
+ worker_id = worker_info.id if worker_info else -1
241
+
242
+ if isinstance(self.hf_dataset, datasets.Dataset):
243
+ start_idx = worker_id if worker_id != -1 else 0
244
+ step_size = worker_info.num_workers if worker_id != -1 else 1
245
+
246
+ for i in range(start_idx, len(self.hf_dataset), step_size):
247
+ data = self.hf_dataset[i]
248
+ yield from self._process_one_sample(data, worker_id=worker_id)
249
+ else:
250
+ for i, data in enumerate(self.hf_dataset):
251
+ yield from self._process_one_sample(data, worker_id=worker_id)
252
+
253
+ def _process_one_sample(self, data, worker_id):
254
+ img_data = self.volume_transforms(
255
+ {"img": data["img_path"], "metadata": data["metadata"]}
256
+ )
257
+ seg_data = self.mask_transforms({"seg": data["seg_path"]})
258
+ img_data = [img_data] if not isinstance(img_data, (list, tuple)) else img_data
259
+ seg_data = [seg_data] if not isinstance(seg_data, (list, tuple)) else seg_data
260
+
261
+ for img, seg in zip(img_data, seg_data):
262
+ to_yield = {
263
+ "img": img["img"],
264
+ "seg": seg["seg"],
265
+ "bowel": data["bowel"],
266
+ "extravasation": data["extravasation"],
267
+ "kidney": data["kidney"],
268
+ "liver": data["liver"],
269
+ "spleen": data["spleen"],
270
+ "any_injury": data["any_injury"],
271
+ }
272
+
273
+ if self.yield_extra_info:
274
+ to_yield["worker_id"] = worker_id
275
+ to_yield["series_id"] = data["metadata"]["series_id"]
276
+
277
+ yield to_yield
278
+
279
+
280
+ class Segmentation2DDataset(IterableDataset):
281
+ def __init__(
282
+ self,
283
+ split: Literal["train", "test"],
284
+ streaming: bool = True,
285
+ volume_transforms: monai.transforms.Compose = None,
286
+ mask_transforms: monai.transforms.Compose = None,
287
+ slice_transforms: torchvision.transforms.Compose = None,
288
+ volume_transform_configs: TypedDict(
289
+ "",
290
+ {
291
+ "crop_strategy": Literal["oversample", "center", "random", "none"],
292
+ "voxel_spacing": tuple[float, float, float],
293
+ "volume_size": tuple[int, int, int],
294
+ "axcodes": str,
295
+ },
296
+ ) = {
297
+ "crop_strategy": "none",
298
+ "voxel_spacing": (3.0, 3.0, 3.0),
299
+ "volume_size": None,
300
+ "axcodes": "RAS",
301
+ },
302
+ slice_transform_configs: TypedDict(
303
+ "",
304
+ {
305
+ "crop_strategy": Literal["ten", "five", "center", "random"],
306
+ "shorter_edge_length": int,
307
+ "slice_size": tuple[int, int],
308
+ },
309
+ ) = {
310
+ "crop_strategy": "center",
311
+ "shorter_edge_length": 256,
312
+ "slice_size": (224, 224),
313
+ },
314
+ test_size: float = 0.1,
315
+ random_state: int = 42,
316
+ ):
317
+ self.hf_dataset = datasets.load_dataset(
318
+ "jherng/rsna-2023-abdominal-trauma-detection",
319
+ "segmentation",
320
+ split=split,
321
+ streaming=streaming,
322
+ num_proc=4
323
+ if not streaming
324
+ else None, # Use multiprocessing if not streaming to download faster
325
+ test_size=test_size,
326
+ random_state=random_state,
327
+ )
328
+
329
+ self.volume_transforms = volume_transforms or rsna_transforms.volume_transforms(
330
+ crop_strategy=volume_transform_configs["crop_strategy"],
331
+ voxel_spacing=volume_transform_configs["voxel_spacing"],
332
+ volume_size=volume_transform_configs["volume_size"],
333
+ axcodes=volume_transform_configs["axcodes"],
334
+ streaming=streaming,
335
+ )
336
+ self.mask_transforms = mask_transforms or rsna_transforms.mask_transforms(
337
+ crop_strategy=volume_transform_configs["crop_strategy"],
338
+ voxel_spacing=volume_transform_configs["voxel_spacing"],
339
+ volume_size=volume_transform_configs["volume_size"],
340
+ axcodes=volume_transform_configs["axcodes"],
341
+ streaming=streaming,
342
+ )
343
+ self.slice_transforms = slice_transforms or rsna_transforms.slice_transforms(
344
+ crop_strategy=slice_transform_configs["crop_strategy"],
345
+ shorter_edge_length=slice_transform_configs["shorter_edge_length"],
346
+ slice_size=slice_transform_configs["slice_size"],
347
+ )
348
+ self.yield_extra_info = True # For debugging purposes
349
+
350
+ def __iter__(self):
351
+ worker_info = torch.utils.data.get_worker_info()
352
+ worker_id = worker_info.id if worker_info else -1
353
+
354
+ if isinstance(self.hf_dataset, datasets.Dataset):
355
+ start_idx = worker_id if worker_id != -1 else 0
356
+ step_size = worker_info.num_workers if worker_id != -1 else 1
357
+
358
+ for i in range(start_idx, len(self.hf_dataset), step_size):
359
+ data = self.hf_dataset[i]
360
+ yield from self._process_one_sample(data, worker_id=worker_id)
361
+ else:
362
+ for i, data in enumerate(self.hf_dataset):
363
+ yield from self._process_one_sample(data, worker_id=worker_id)
364
+
365
+ def _process_one_sample(self, data, worker_id):
366
+ vol_img_data = self.volume_transforms(
367
+ {"img": data["img_path"], "metadata": data["metadata"]}
368
+ )
369
+ vol_seg_data = self.mask_transforms({"seg": data["seg_path"]})
370
+ vol_img_data = (
371
+ [vol_img_data]
372
+ if not isinstance(vol_img_data, (list, tuple))
373
+ else vol_img_data
374
+ )
375
+ vol_seg_data = (
376
+ [vol_seg_data]
377
+ if not isinstance(vol_seg_data, (list, tuple))
378
+ else vol_seg_data
379
+ )
380
+
381
+ for vol_img, vol_seg in zip(vol_img_data, vol_seg_data):
382
+ slice_len = vol_img["img"].size()[-1]
383
+ for i in range(slice_len):
384
+ slice_img_data = self.slice_transforms(vol_img["img"][..., i])
385
+ slice_seg_data = self.slice_transforms(vol_seg["seg"][..., i])
386
+
387
+ slice_img_data = (
388
+ [slice_img_data]
389
+ if not isinstance(slice_img_data, (list, tuple))
390
+ else slice_img_data
391
+ )
392
+ slice_seg_data = (
393
+ [slice_seg_data]
394
+ if not isinstance(slice_seg_data, (list, tuple))
395
+ else slice_seg_data
396
+ )
397
+
398
+ for slice_img, slice_seg in zip(slice_img_data, slice_seg_data):
399
+ to_yield = {
400
+ "img": slice_img,
401
+ "seg": slice_seg,
402
+ }
403
+ if self.yield_extra_info:
404
+ to_yield["worker_id"] = worker_id
405
+ to_yield["series_id"] = data["metadata"]["series_id"]
406
+
407
+ yield to_yield
408
+
409
+
410
+ class Classification2DDataset(IterableDataset):
411
+ def __init__(
412
+ self,
413
+ split: Literal["train", "test"],
414
+ streaming: bool = True,
415
+ volume_transforms: monai.transforms.Compose = None,
416
+ slice_transforms: torchvision.transforms.Compose = None,
417
+ volume_transform_configs: TypedDict(
418
+ "",
419
+ {
420
+ "crop_strategy": Literal["oversample", "center", "random", "none"],
421
+ "voxel_spacing": tuple[float, float, float],
422
+ "volume_size": tuple[int, int, int],
423
+ "axcodes": str,
424
+ },
425
+ ) = {
426
+ "crop_strategy": "none",
427
+ "voxel_spacing": (3.0, 3.0, 3.0),
428
+ "volume_size": None,
429
+ "axcodes": "RAS",
430
+ },
431
+ slice_transform_configs: TypedDict(
432
+ "",
433
+ {
434
+ "crop_strategy": Literal["ten", "five", "center", "random"],
435
+ "shorter_edge_length": int,
436
+ "slice_size": tuple[int, int],
437
+ },
438
+ ) = {
439
+ "crop_strategy": "center",
440
+ "shorter_edge_length": 256,
441
+ "slice_size": (224, 224),
442
+ },
443
+ test_size: float = 0.1,
444
+ random_state: int = 42,
445
+ ):
446
+ self.hf_dataset = datasets.load_dataset(
447
+ "jherng/rsna-2023-abdominal-trauma-detection",
448
+ "classification",
449
+ split=split,
450
+ streaming=streaming,
451
+ num_proc=4
452
+ if not streaming
453
+ else None, # Use multiprocessing if not streaming to download faster
454
+ test_size=test_size,
455
+ random_state=random_state,
456
+ )
457
+
458
+ self.volume_transforms = volume_transforms or rsna_transforms.volume_transforms(
459
+ crop_strategy=volume_transform_configs["crop_strategy"],
460
+ voxel_spacing=volume_transform_configs["voxel_spacing"],
461
+ volume_size=volume_transform_configs["volume_size"],
462
+ axcodes=volume_transform_configs["axcodes"],
463
+ streaming=streaming,
464
+ )
465
+ self.slice_transforms = slice_transforms or rsna_transforms.slice_transforms(
466
+ crop_strategy=slice_transform_configs["crop_strategy"],
467
+ shorter_edge_length=slice_transform_configs["shorter_edge_length"],
468
+ slice_size=slice_transform_configs["slice_size"],
469
+ )
470
+ self.yield_extra_info = True # For debugging purposes
471
+
472
+ def __iter__(self):
473
+ worker_info = torch.utils.data.get_worker_info()
474
+ worker_id = worker_info.id if worker_info else -1
475
+
476
+ if isinstance(self.hf_dataset, datasets.Dataset):
477
+ start_idx = worker_id if worker_id != -1 else 0
478
+ step_size = worker_info.num_workers if worker_id != -1 else 1
479
+
480
+ for i in range(start_idx, len(self.hf_dataset), step_size):
481
+ data = self.hf_dataset[i]
482
+ yield from self._process_one_sample(data, worker_id=worker_id)
483
+ else:
484
+ for i, data in enumerate(self.hf_dataset):
485
+ yield from self._process_one_sample(data, worker_id=worker_id)
486
+
487
+ def _process_one_sample(self, data, worker_id):
488
+ vol_img_data = self.volume_transforms(
489
+ {"img": data["img_path"], "metadata": data["metadata"]}
490
+ )
491
+ vol_img_data = (
492
+ [vol_img_data]
493
+ if not isinstance(vol_img_data, (list, tuple))
494
+ else vol_img_data
495
+ )
496
+
497
+ for vol_img in vol_img_data:
498
+ slice_len = vol_img["img"].size()[-1]
499
+ for i in range(slice_len):
500
+ slice_img_data = self.slice_transforms(vol_img["img"][..., i])
501
+
502
+ slice_img_data = (
503
+ [slice_img_data]
504
+ if not isinstance(slice_img_data, (list, tuple))
505
+ else slice_img_data
506
+ )
507
+
508
+ for slice_img in slice_img_data:
509
+ to_yield = {
510
+ "img": slice_img,
511
+ "bowel": data["bowel"],
512
+ "extravasation": data["extravasation"],
513
+ "kidney": data["kidney"],
514
+ "liver": data["liver"],
515
+ "spleen": data["spleen"],
516
+ "any_injury": data["any_injury"],
517
+ }
518
+ if self.yield_extra_info:
519
+ to_yield["worker_id"] = worker_id
520
+ to_yield["series_id"] = data["metadata"]["series_id"]
521
+
522
+ yield to_yield
523
+
524
+
525
+ class MaskedClassification2DDataset(IterableDataset):
526
+ def __init__(
527
+ self,
528
+ split: Literal["train", "test"],
529
+ streaming: bool = True,
530
+ volume_transforms: monai.transforms.Compose = None,
531
+ mask_transforms: monai.transforms.Compose = None,
532
+ slice_transforms: torchvision.transforms.Compose = None,
533
+ volume_transform_configs: TypedDict(
534
+ "",
535
+ {
536
+ "crop_strategy": Literal["oversample", "center", "random", "none"],
537
+ "voxel_spacing": tuple[float, float, float],
538
+ "volume_size": tuple[int, int, int],
539
+ "axcodes": str,
540
+ },
541
+ ) = {
542
+ "crop_strategy": "none",
543
+ "voxel_spacing": (3.0, 3.0, 3.0),
544
+ "volume_size": None,
545
+ "axcodes": "RAS",
546
+ },
547
+ slice_transform_configs: TypedDict(
548
+ "",
549
+ {
550
+ "crop_strategy": Literal["ten", "five", "center", "random"],
551
+ "shorter_edge_length": int,
552
+ "slice_size": tuple[int, int],
553
+ },
554
+ ) = {
555
+ "crop_strategy": "center",
556
+ "shorter_edge_length": 256,
557
+ "slice_size": (224, 224),
558
+ },
559
+ test_size: float = 0.1,
560
+ random_state: int = 42,
561
+ ):
562
+ self.hf_dataset = datasets.load_dataset(
563
+ "jherng/rsna-2023-abdominal-trauma-detection",
564
+ "classification-with-mask",
565
+ split=split,
566
+ streaming=streaming,
567
+ num_proc=4
568
+ if not streaming
569
+ else None, # Use multiprocessing if not streaming to download faster
570
+ test_size=test_size,
571
+ random_state=random_state,
572
+ )
573
+
574
+ self.volume_transforms = volume_transforms or rsna_transforms.volume_transforms(
575
+ crop_strategy=volume_transform_configs["crop_strategy"],
576
+ voxel_spacing=volume_transform_configs["voxel_spacing"],
577
+ volume_size=volume_transform_configs["volume_size"],
578
+ axcodes=volume_transform_configs["axcodes"],
579
+ streaming=streaming,
580
+ )
581
+
582
+ self.mask_transforms = mask_transforms or rsna_transforms.mask_transforms(
583
+ crop_strategy=volume_transform_configs["crop_strategy"],
584
+ voxel_spacing=volume_transform_configs["voxel_spacing"],
585
+ volume_size=volume_transform_configs["volume_size"],
586
+ axcodes=volume_transform_configs["axcodes"],
587
+ streaming=streaming,
588
+ )
589
+
590
+ self.slice_transforms = slice_transforms or rsna_transforms.slice_transforms(
591
+ crop_strategy=slice_transform_configs["crop_strategy"],
592
+ shorter_edge_length=slice_transform_configs["shorter_edge_length"],
593
+ slice_size=slice_transform_configs["slice_size"],
594
+ )
595
+ self.yield_extra_info = True # For debugging purposes
596
+
597
+ def __iter__(self):
598
+ worker_info = torch.utils.data.get_worker_info()
599
+ worker_id = worker_info.id if worker_info else -1
600
+
601
+ if isinstance(self.hf_dataset, datasets.Dataset):
602
+ start_idx = worker_id if worker_id != -1 else 0
603
+ step_size = worker_info.num_workers if worker_id != -1 else 1
604
+
605
+ for i in range(start_idx, len(self.hf_dataset), step_size):
606
+ data = self.hf_dataset[i]
607
+ yield from self._process_one_sample(data, worker_id=worker_id)
608
+ else:
609
+ for i, data in enumerate(self.hf_dataset):
610
+ yield from self._process_one_sample(data, worker_id=worker_id)
611
+
612
+ def _process_one_sample(self, data, worker_id):
613
+ vol_img_data = self.volume_transforms(
614
+ {"img": data["img_path"], "metadata": data["metadata"]}
615
+ )
616
+ vol_seg_data = self.mask_transforms({"seg": data["seg_path"]})
617
+ vol_img_data = (
618
+ [vol_img_data]
619
+ if not isinstance(vol_img_data, (list, tuple))
620
+ else vol_img_data
621
+ )
622
+ vol_seg_data = (
623
+ [vol_seg_data]
624
+ if not isinstance(vol_seg_data, (list, tuple))
625
+ else vol_seg_data
626
+ )
627
+
628
+ for vol_img, vol_seg in zip(vol_img_data, vol_seg_data):
629
+ slice_len = vol_img["img"].size()[-1]
630
+ for i in range(slice_len):
631
+ slice_img_data = self.slice_transforms(vol_img["img"][..., i])
632
+ slice_seg_data = self.slice_transforms(vol_seg["seg"][..., i])
633
+
634
+ slice_img_data = (
635
+ [slice_img_data]
636
+ if not isinstance(slice_img_data, (list, tuple))
637
+ else slice_img_data
638
+ )
639
+ slice_seg_data = (
640
+ [slice_seg_data]
641
+ if not isinstance(slice_seg_data, (list, tuple))
642
+ else slice_seg_data
643
+ )
644
+
645
+ for slice_img, slice_seg in zip(slice_img_data, slice_seg_data):
646
+ to_yield = {
647
+ "img": slice_img,
648
+ "seg": slice_seg,
649
+ "bowel": data["bowel"],
650
+ "extravasation": data["extravasation"],
651
+ "kidney": data["kidney"],
652
+ "liver": data["liver"],
653
+ "spleen": data["spleen"],
654
+ "any_injury": data["any_injury"],
655
+ }
656
+ if self.yield_extra_info:
657
+ to_yield["worker_id"] = worker_id
658
+ to_yield["series_id"] = data["metadata"]["series_id"]
659
+
660
+ yield to_yield
661
+
rsna_transforms.py ADDED
@@ -0,0 +1,629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Literal
2
+
3
+ from io import BytesIO
4
+ import numpy as np
5
+ import nibabel as nib
6
+ import torch
7
+ import torchvision
8
+ import monai
9
+ import monai.transforms
10
+ from indexed_gzip import IndexedGzipFile
11
+ from monai.data.image_reader import NibabelReader
12
+ from monai.transforms.io.array import switch_endianness
13
+ from monai.transforms.transform import MapTransform, Transform
14
+ from monai.data import MetaTensor
15
+ from monai.data.utils import correct_nifti_header_if_necessary
16
+ from monai.config import KeysCollection, DtypeLike
17
+ from monai.utils import (
18
+ ImageMetaKey,
19
+ convert_to_dst_type,
20
+ ensure_tuple_rep,
21
+ ensure_tuple,
22
+ )
23
+ from monai.utils.enums import PostFix
24
+ from huggingface_hub import HfFileSystem
25
+
26
+
27
+ class LoadNIfTIFromLocalCache(Transform):
28
+ def __init__(
29
+ self,
30
+ dtype: DtypeLike | None = np.float32,
31
+ ensure_channel_first: bool = False,
32
+ simple_keys: bool = False,
33
+ prune_meta_pattern: str | None = None,
34
+ prune_meta_sep: str = ".",
35
+ ):
36
+ self.dtype = dtype
37
+ self.ensure_channel_first = ensure_channel_first
38
+ self.simple_keys = simple_keys
39
+ self.pattern = prune_meta_pattern
40
+ self.sep = prune_meta_sep
41
+
42
+ self.reader = NibabelReader()
43
+
44
+ def __call__(self, path: str):
45
+ with open(path, mode="rb") as f:
46
+ img = nib.Nifti1Image.from_stream(
47
+ IndexedGzipFile(fileobj=BytesIO(f.read()))
48
+ )
49
+
50
+ img = correct_nifti_header_if_necessary(img)
51
+ img_array, meta_data = self.reader.get_data(img)
52
+ img_array = convert_to_dst_type(img_array, dst=img_array, dtype=self.dtype)[0]
53
+ if not isinstance(meta_data, dict):
54
+ raise ValueError(f"`meta_data` must be a dict, got type {type(meta_data)}.")
55
+ # make sure all elements in metadata are little endian
56
+ meta_data = switch_endianness(meta_data, "<")
57
+
58
+ meta_data[ImageMetaKey.FILENAME_OR_OBJ] = path
59
+ img = MetaTensor.ensure_torch_and_prune_meta(
60
+ img_array,
61
+ meta_data,
62
+ simple_keys=self.simple_keys,
63
+ pattern=self.pattern,
64
+ sep=self.sep,
65
+ )
66
+ if self.ensure_channel_first:
67
+ img = monai.transforms.EnsureChannelFirst()(img)
68
+ return img
69
+
70
+
71
+ class LoadNIfTIFromLocalCached(MapTransform):
72
+ def __init__(
73
+ self,
74
+ keys: KeysCollection,
75
+ allow_missing_keys: bool = False,
76
+ dtype: DtypeLike | None = np.float32,
77
+ meta_keys: KeysCollection | None = None,
78
+ meta_key_postfix: str = PostFix.meta(),
79
+ overwriting: bool = False,
80
+ ensure_channel_first: bool = False,
81
+ simple_keys: bool = False,
82
+ prune_meta_pattern: str | None = None,
83
+ prune_meta_sep: str = ".",
84
+ ):
85
+ super().__init__(keys, allow_missing_keys)
86
+ self._loader = LoadNIfTIFromLocalCache(
87
+ dtype=dtype,
88
+ ensure_channel_first=ensure_channel_first,
89
+ simple_keys=simple_keys,
90
+ prune_meta_pattern=prune_meta_pattern,
91
+ prune_meta_sep=prune_meta_sep,
92
+ )
93
+ if not isinstance(meta_key_postfix, str):
94
+ raise TypeError(
95
+ f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}."
96
+ )
97
+ self.meta_keys = (
98
+ ensure_tuple_rep(None, len(self.keys))
99
+ if meta_keys is None
100
+ else ensure_tuple(meta_keys)
101
+ )
102
+ if len(self.keys) != len(self.meta_keys):
103
+ raise ValueError(
104
+ f"meta_keys should have the same length as keys, got {len(self.keys)} and {len(self.meta_keys)}."
105
+ )
106
+ self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
107
+ self.overwriting = overwriting
108
+
109
+ def __call__(self, data):
110
+ d = dict(data)
111
+ for key, meta_key, meta_key_postfix in self.key_iterator(
112
+ d, self.meta_keys, self.meta_key_postfix
113
+ ):
114
+ data = self._loader(d[key])
115
+ d[key] = data
116
+ return d
117
+
118
+
119
+ class LoadNIfTIFromHFHub(Transform):
120
+ def __init__(
121
+ self,
122
+ dtype: DtypeLike | None = np.float32,
123
+ ensure_channel_first: bool = False,
124
+ simple_keys: bool = False,
125
+ prune_meta_pattern: str | None = None,
126
+ prune_meta_sep: str = ".",
127
+ ):
128
+ self.dtype = dtype
129
+ self.ensure_channel_first = ensure_channel_first
130
+ self.simple_keys = simple_keys
131
+ self.pattern = prune_meta_pattern
132
+ self.sep = prune_meta_sep
133
+
134
+ self.fs = HfFileSystem()
135
+ self.reader = NibabelReader()
136
+
137
+ def __call__(self, url: str):
138
+ url = LoadNIfTIFromHFHub._convert_to_hffs_path(url)
139
+ with self.fs.open(url, mode="rb") as f:
140
+ img = nib.Nifti1Image.from_stream(
141
+ IndexedGzipFile(fileobj=BytesIO(f.read()))
142
+ )
143
+ img = correct_nifti_header_if_necessary(img)
144
+ img_array, meta_data = self.reader.get_data(img)
145
+ img_array = convert_to_dst_type(img_array, dst=img_array, dtype=self.dtype)[0]
146
+ if not isinstance(meta_data, dict):
147
+ raise ValueError(f"`meta_data` must be a dict, got type {type(meta_data)}.")
148
+ # make sure all elements in metadata are little endian
149
+ meta_data = switch_endianness(meta_data, "<")
150
+
151
+ meta_data[ImageMetaKey.FILENAME_OR_OBJ] = url
152
+ img = MetaTensor.ensure_torch_and_prune_meta(
153
+ img_array,
154
+ meta_data,
155
+ simple_keys=self.simple_keys,
156
+ pattern=self.pattern,
157
+ sep=self.sep,
158
+ )
159
+ if self.ensure_channel_first:
160
+ img = monai.transforms.EnsureChannelFirst()(img)
161
+ return img
162
+
163
+ @staticmethod
164
+ def _convert_to_hffs_path(url: str):
165
+ if url.startswith("https://huggingface.co/datasets/"):
166
+ parts = url.split("/")
167
+ return f"hf://{'/'.join(parts[3:6])}/{'/'.join(parts[8:])}"
168
+ return url
169
+
170
+
171
+ class LoadNIfTIFromHFHubd(MapTransform):
172
+ def __init__(
173
+ self,
174
+ keys: KeysCollection,
175
+ allow_missing_keys: bool = False,
176
+ dtype: DtypeLike | None = np.float32,
177
+ meta_keys: KeysCollection | None = None,
178
+ meta_key_postfix: str = PostFix.meta(),
179
+ overwriting: bool = False,
180
+ ensure_channel_first: bool = False,
181
+ simple_keys: bool = False,
182
+ prune_meta_pattern: str | None = None,
183
+ prune_meta_sep: str = ".",
184
+ ):
185
+ super().__init__(keys, allow_missing_keys)
186
+ self._loader = LoadNIfTIFromHFHub(
187
+ dtype=dtype,
188
+ ensure_channel_first=ensure_channel_first,
189
+ simple_keys=simple_keys,
190
+ prune_meta_pattern=prune_meta_pattern,
191
+ prune_meta_sep=prune_meta_sep,
192
+ )
193
+ if not isinstance(meta_key_postfix, str):
194
+ raise TypeError(
195
+ f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}."
196
+ )
197
+ self.meta_keys = (
198
+ ensure_tuple_rep(None, len(self.keys))
199
+ if meta_keys is None
200
+ else ensure_tuple(meta_keys)
201
+ )
202
+ if len(self.keys) != len(self.meta_keys):
203
+ raise ValueError(
204
+ f"meta_keys should have the same length as keys, got {len(self.keys)} and {len(self.meta_keys)}."
205
+ )
206
+ self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
207
+ self.overwriting = overwriting
208
+
209
+ def __call__(self, data):
210
+ d = dict(data)
211
+ for key, meta_key, meta_key_postfix in self.key_iterator(
212
+ d, self.meta_keys, self.meta_key_postfix
213
+ ):
214
+ data = self._loader(d[key])
215
+ d[key] = data
216
+ return d
217
+
218
+
219
+ class UnifyUnusualDICOM(Transform):
220
+ """
221
+ Correct DICOM pixel_array if PixelRepresentation == 1 and BitsAllocated != BitsStored
222
+
223
+ Steps:
224
+ 1. Convert data back to the original signed int16.
225
+ 2. Compute the number of bits to shift over (BitsShift = BitsAllocated - BitsStored)
226
+ 3. Left shift by BitsShift then right shift by BitsShift
227
+ 4. Convert data back to the default dtype for metatensor (float32)
228
+
229
+ By default all dicom files in this dataset `rsna-2023-abdominal-trauma-detection` is in
230
+ - uint16 if Pixel Representation = 0
231
+ - int16 if Pixel Representation = 1
232
+ Refer: https://dicom.innolitics.com/ciods/rt-dose/image-pixel/00280103
233
+
234
+ Warning:
235
+ - Use this transform on the test set as we expect to take DICOM series as input instead of NIfTI.
236
+ - The passed in metatensor must have the following DICOM metadata:
237
+ - Pixel Representation
238
+ - Bits Allocated
239
+ - Bits Stored
240
+ - To have a metatensor that has those metadata:
241
+ - Set reader to be PydicomReader with prune_metadata=False, i.e., monai.transforms.LoadImaged(..., reader=PydicomReader(prune_metadata=False))
242
+
243
+ """
244
+
245
+ def __init__(self):
246
+ self.DCM_ATTR2TAG = {
247
+ "Bits Allocated": "00280100", # http://dicomlookup.com/lookup.asp?sw=Tnumber&q=(0028,0100)
248
+ "Bits Stored": "00280101", # http://dicomlookup.com/lookup.asp?sw=Tnumber&q=(0028,0101)
249
+ "Pixel Representation": "00280103", # http://dicomlookup.com/lookup.asp?sw=Tnumber&q=(0028,0103)
250
+ }
251
+
252
+ def __call__(self, data):
253
+ if not all([dcm_tag in data.meta for dcm_tag in self.DCM_ATTR2TAG.values()]):
254
+ raise Exception(
255
+ f"Attribute tags of {self.DCM_ATTR2TAG} must exist in the dicom metadata to use this transform `{self.__class__.__name__}. Hint: Set reader to be PydicomReader with prune_metadata=False, i.e., monai.transforms.LoadImaged(..., reader=PydicomReader(prune_metadata=False))`"
256
+ )
257
+ pixel_representation = data.meta[self.DCM_ATTR2TAG["Pixel Representation"]][
258
+ "Value"
259
+ ][0]
260
+ bits_allocated = data.meta[self.DCM_ATTR2TAG["Bits Allocated"]]["Value"][0]
261
+ bits_stored = data.meta[self.DCM_ATTR2TAG["Bits Stored"]]["Value"][0]
262
+ data = UnifyUnusualDICOM._standardize_dicom_pixels(
263
+ data, pixel_representation, bits_allocated, bits_stored
264
+ )
265
+ return data
266
+
267
+ @staticmethod
268
+ def _standardize_dicom_pixels(
269
+ data: torch.Tensor,
270
+ pixel_representation: int,
271
+ bits_allocated: int,
272
+ bits_stored: int,
273
+ ):
274
+ bits_shift = bits_allocated - bits_stored
275
+
276
+ if pixel_representation == 1 and bits_shift != 0:
277
+ dtype_before = data.dtype
278
+ dtype_shift = torch.int16
279
+ data = data.to(dtype_shift)
280
+ data = (data << bits_shift).to(dtype_shift) >> bits_shift
281
+ data = data.to(dtype_before)
282
+ return data
283
+
284
+
285
+ class UnifyUnusualDICOMd(MapTransform):
286
+ def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False):
287
+ super().__init__(keys, allow_missing_keys)
288
+ self._unify_unusual_dicom = UnifyUnusualDICOM()
289
+
290
+ def __call__(self, data):
291
+ d = dict(data)
292
+ for key in self.key_iterator(d):
293
+ data = self._unify_unusual_dicom(d[key])
294
+ d[key] = data
295
+ return d
296
+
297
+
298
+ class UnifyUnusualNIfTI(Transform):
299
+ """
300
+ Correct NIfTI pixel values if PixelRepresentation == 1 and BitsAllocated != BitsStored.
301
+
302
+ Steps:
303
+ 1. Convert data back to the original signed int16.
304
+ 2. Compute the number of bits to shift over (BitsShift = BitsAllocated - BitsStored)
305
+ 3. Left shift by BitsShift then right shift by BitsShift
306
+ 4. Convert data back to the default dtype for metatensor (float32)
307
+
308
+ By default all dicom files in this dataset `rsna-2023-abdominal-trauma-detection` is in
309
+ - uint16 if Pixel Representation = 0
310
+ - int16 if Pixel Representation = 1
311
+ Refer: https://dicom.innolitics.com/ciods/rt-dose/image-pixel/00280103
312
+
313
+ Warning:
314
+ - This transform only works for DICOM series that has been converted to NIfTI format and
315
+ has a precomputed csv file that tracks the series that has unusual DICOM pixel representation format (`potential_unusual_dicom_series_meta.csv`).
316
+ - This transform is not applicable for data that we have not preprocess yet (e.g. test set)
317
+ - Use a different custom transform for test set (e.g. `UnifyUnusualDICOM`) as we expect to take DICOM series as input instead of NIfTI
318
+
319
+ Why do we this?
320
+ - NIfTI file doesn't store the Pixel Representation, Bits Allocated, and Bits Stored metadata.
321
+ - The reason behind using a NIfTI file is to allow for easier data loading during training phase.
322
+
323
+ """
324
+
325
+ def __init__(
326
+ self,
327
+ x_key: str = "img",
328
+ metadata_key: str = "metadata",
329
+ meta_pixel_representation_key: str = "pixel_representation",
330
+ meta_bits_allocated_key: str = "bits_allocated",
331
+ meta_bits_stored_key: str = "bits_stored",
332
+ ):
333
+ self.x_key = x_key
334
+ self.metadata_key = metadata_key
335
+ self.pixel_representation_key = meta_pixel_representation_key
336
+ self.bits_allocated_key = meta_bits_allocated_key
337
+ self.bits_stored_key = meta_bits_stored_key
338
+
339
+ def __call__(self, data):
340
+ if not self.metadata_key in data or not self.x_key in data:
341
+ raise KeyError(
342
+ f"Key `{self.metadata_key}` of transform `{self.__class__.__name__}` was missing in the data."
343
+ )
344
+
345
+ if (
346
+ not self.pixel_representation_key in data[self.metadata_key]
347
+ or not self.bits_allocated_key in data[self.metadata_key]
348
+ or not self.bits_stored_key in data[self.metadata_key]
349
+ ):
350
+ raise KeyError(
351
+ f"Key `{self.pixel_representation_key}` or `{self.bits_allocated_key}` or `{self.bits_stored_key}` of transform `{self.__class__.__name__}` was missing in the metadata."
352
+ )
353
+
354
+ data[self.x_key] = UnifyUnusualNIfTI._standardize_dicom_pixels(
355
+ data[self.x_key],
356
+ data[self.metadata_key][self.pixel_representation_key],
357
+ data[self.metadata_key][self.bits_allocated_key],
358
+ data[self.metadata_key][self.bits_stored_key],
359
+ )
360
+
361
+ return data
362
+
363
+ @staticmethod
364
+ def _standardize_dicom_pixels(
365
+ data: torch.Tensor,
366
+ pixel_representation: int,
367
+ bits_allocated: int,
368
+ bits_stored: int,
369
+ ):
370
+ bits_shift = bits_allocated - bits_stored
371
+
372
+ if pixel_representation == 1 and bits_shift != 0:
373
+ dtype_before = data.dtype
374
+ dtype_shift = torch.int16
375
+ data = data.to(dtype_shift)
376
+ data = (data << bits_shift).to(dtype_shift) >> bits_shift
377
+ data = data.to(dtype_before)
378
+ return data
379
+
380
+
381
+ def volume_transforms(
382
+ crop_strategy: Optional[
383
+ Literal["oversample", "center", "random", "none"]
384
+ ] = "oversample",
385
+ voxel_spacing: tuple[float, float, float] = (3.0, 3.0, 3.0),
386
+ volume_size: tuple[int, int, int] = (96, 96, 96),
387
+ axcodes: str = "RAS",
388
+ streaming: bool = False,
389
+ ) -> monai.transforms.Compose:
390
+ if crop_strategy == "oversample":
391
+ return monai.transforms.Compose(
392
+ [
393
+ LoadNIfTIFromHFHubd(keys=["img"])
394
+ if streaming
395
+ else LoadNIfTIFromLocalCached(keys=["img"]),
396
+ monai.transforms.EnsureTyped(
397
+ keys=["img"], data_type="tensor", dtype=torch.float32
398
+ ),
399
+ UnifyUnusualNIfTI(
400
+ x_key="img",
401
+ metadata_key="metadata",
402
+ meta_pixel_representation_key="pixel_representation",
403
+ meta_bits_allocated_key="bits_allocated",
404
+ meta_bits_stored_key="bits_stored",
405
+ ),
406
+ monai.transforms.EnsureChannelFirstd(keys=["img"]),
407
+ monai.transforms.Orientationd(keys=["img"], axcodes=axcodes),
408
+ monai.transforms.Spacingd(
409
+ keys=["img"], pixdim=voxel_spacing, mode=["bilinear"]
410
+ ),
411
+ monai.transforms.NormalizeIntensityd(keys=["img"], nonzero=False),
412
+ monai.transforms.ScaleIntensityd(keys=["img"], minv=-1.0, maxv=1.0),
413
+ monai.transforms.SpatialPadd(keys=["img"], spatial_size=volume_size),
414
+ monai.transforms.RandSpatialCropSamplesd(
415
+ keys=["img"],
416
+ roi_size=volume_size,
417
+ num_samples=3,
418
+ random_center=True,
419
+ random_size=False,
420
+ ),
421
+ ]
422
+ )
423
+
424
+ elif crop_strategy == "center":
425
+ return monai.transforms.Compose(
426
+ [
427
+ LoadNIfTIFromHFHubd(keys=["img"])
428
+ if streaming
429
+ else LoadNIfTIFromLocalCached(keys=["img"]),
430
+ monai.transforms.EnsureTyped(
431
+ keys=["img"], data_type="tensor", dtype=torch.float32
432
+ ),
433
+ UnifyUnusualNIfTI(
434
+ x_key="img",
435
+ metadata_key="metadata",
436
+ meta_pixel_representation_key="pixel_representation",
437
+ meta_bits_allocated_key="bits_allocated",
438
+ meta_bits_stored_key="bits_stored",
439
+ ),
440
+ monai.transforms.EnsureChannelFirstd(keys=["img"]),
441
+ monai.transforms.Orientationd(keys=["img"], axcodes=axcodes),
442
+ monai.transforms.Spacingd(
443
+ keys=["img"], pixdim=voxel_spacing, mode=["bilinear"]
444
+ ),
445
+ monai.transforms.NormalizeIntensityd(keys=["img"], nonzero=False),
446
+ monai.transforms.ScaleIntensityd(keys=["img"], minv=-1.0, maxv=1.0),
447
+ monai.transforms.SpatialPadd(keys=["img"], spatial_size=volume_size),
448
+ monai.transforms.CenterSpatialCropd(keys=["img"], roi_size=volume_size),
449
+ ]
450
+ )
451
+
452
+ elif crop_strategy == "random":
453
+ return monai.transforms.Compose(
454
+ [
455
+ LoadNIfTIFromHFHubd(keys=["img"])
456
+ if streaming
457
+ else LoadNIfTIFromLocalCached(keys=["img"]),
458
+ monai.transforms.EnsureTyped(
459
+ keys=["img"], data_type="tensor", dtype=torch.float32
460
+ ),
461
+ UnifyUnusualNIfTI(
462
+ x_key="img",
463
+ metadata_key="metadata",
464
+ meta_pixel_representation_key="pixel_representation",
465
+ meta_bits_allocated_key="bits_allocated",
466
+ meta_bits_stored_key="bits_stored",
467
+ ),
468
+ monai.transforms.EnsureChannelFirstd(keys=["img"]),
469
+ monai.transforms.Orientationd(keys=["img"], axcodes=axcodes),
470
+ monai.transforms.Spacingd(
471
+ keys=["img"], pixdim=voxel_spacing, mode=["bilinear"]
472
+ ),
473
+ monai.transforms.NormalizeIntensityd(keys=["img"], nonzero=False),
474
+ monai.transforms.ScaleIntensityd(keys=["img"], minv=-1.0, maxv=1.0),
475
+ monai.transforms.SpatialPadd(keys=["img"], spatial_size=volume_size),
476
+ monai.transforms.RandSpatialCropd(
477
+ keys=["img"],
478
+ roi_size=volume_size,
479
+ random_center=True,
480
+ random_size=False,
481
+ ),
482
+ ]
483
+ )
484
+
485
+ elif crop_strategy == "none" or crop_strategy is None:
486
+ return monai.transforms.Compose(
487
+ [
488
+ LoadNIfTIFromHFHubd(keys=["img"])
489
+ if streaming
490
+ else LoadNIfTIFromLocalCached(keys=["img"]),
491
+ monai.transforms.EnsureTyped(
492
+ keys=["img"], data_type="tensor", dtype=torch.float32
493
+ ),
494
+ UnifyUnusualNIfTI(
495
+ x_key="img",
496
+ metadata_key="metadata",
497
+ meta_pixel_representation_key="pixel_representation",
498
+ meta_bits_allocated_key="bits_allocated",
499
+ meta_bits_stored_key="bits_stored",
500
+ ),
501
+ monai.transforms.EnsureChannelFirstd(keys=["img"]),
502
+ monai.transforms.Orientationd(keys=["img"], axcodes=axcodes),
503
+ monai.transforms.Spacingd(
504
+ keys=["img"], pixdim=voxel_spacing, mode=["bilinear"]
505
+ ),
506
+ monai.transforms.NormalizeIntensityd(keys=["img"], nonzero=False),
507
+ monai.transforms.ScaleIntensityd(keys=["img"], minv=-1.0, maxv=1.0),
508
+ ]
509
+ )
510
+
511
+ else:
512
+ raise ValueError(
513
+ f"crop_strategy must be one of ['oversample', 'center', 'random', 'none'], got {crop_strategy}."
514
+ )
515
+
516
+
517
+ def mask_transforms(
518
+ crop_strategy: Optional[Literal["oversample", "center", "none"]] = "oversample",
519
+ voxel_spacing: tuple[float, float, float] = (3.0, 3.0, 3.0),
520
+ volume_size: tuple[int, int, int] = (96, 96, 96),
521
+ axcodes: str = "RAS",
522
+ streaming: bool = False,
523
+ ) -> monai.transforms.Compose:
524
+ if crop_strategy == "oversample":
525
+ return monai.transforms.Compose(
526
+ [
527
+ LoadNIfTIFromHFHubd(keys=["seg"])
528
+ if streaming
529
+ else LoadNIfTIFromLocalCached(keys=["seg"]),
530
+ monai.transforms.EnsureTyped(
531
+ keys=["seg"], data_type="tensor", dtype=torch.float32
532
+ ),
533
+ monai.transforms.EnsureChannelFirstd(keys=["seg"]),
534
+ monai.transforms.Orientationd(keys=["seg"], axcodes=axcodes),
535
+ monai.transforms.Spacingd(
536
+ keys=["seg"], pixdim=voxel_spacing, mode=["nearest"]
537
+ ),
538
+ monai.transforms.SpatialPadd(keys=["seg"], spatial_size=volume_size),
539
+ monai.transforms.RandSpatialCropSamplesd(
540
+ keys=["seg"],
541
+ roi_size=volume_size,
542
+ num_samples=3,
543
+ random_center=True,
544
+ random_size=False,
545
+ ),
546
+ ]
547
+ )
548
+
549
+ elif crop_strategy == "center":
550
+ return monai.transforms.Compose(
551
+ [
552
+ LoadNIfTIFromHFHubd(keys=["seg"])
553
+ if streaming
554
+ else LoadNIfTIFromLocalCached(keys=["seg"]),
555
+ monai.transforms.EnsureTyped(
556
+ keys=["seg"], data_type="tensor", dtype=torch.float32
557
+ ),
558
+ monai.transforms.EnsureChannelFirstd(keys=["seg"]),
559
+ monai.transforms.Orientationd(keys=["seg"], axcodes=axcodes),
560
+ monai.transforms.Spacingd(
561
+ keys=["seg"], pixdim=voxel_spacing, mode=["nearest"]
562
+ ),
563
+ monai.transforms.SpatialPadd(keys=["seg"], spatial_size=volume_size),
564
+ monai.transforms.CenterSpatialCropd(keys=["seg"], roi_size=volume_size),
565
+ ]
566
+ )
567
+
568
+ elif crop_strategy == "none" or crop_strategy is None:
569
+ return monai.transforms.Compose(
570
+ [
571
+ LoadNIfTIFromHFHubd(keys=["seg"])
572
+ if streaming
573
+ else LoadNIfTIFromLocalCached(keys=["seg"]),
574
+ monai.transforms.EnsureTyped(
575
+ keys=["seg"], data_type="tensor", dtype=torch.float32
576
+ ),
577
+ monai.transforms.EnsureChannelFirstd(keys=["seg"]),
578
+ monai.transforms.Orientationd(keys=["seg"], axcodes=axcodes),
579
+ monai.transforms.Spacingd(
580
+ keys=["seg"], pixdim=voxel_spacing, mode=["nearest"]
581
+ ),
582
+ ]
583
+ )
584
+ else:
585
+ raise ValueError(
586
+ f"crop_strategy must be one of ['oversample', 'center', 'none'], got {crop_strategy}."
587
+ )
588
+
589
+
590
+ def slice_transforms(
591
+ crop_strategy: Literal["ten", "five", "center", "random"] = "ten",
592
+ shorter_edge_length: int = 256,
593
+ slice_size: tuple[int, int] = (224, 224),
594
+ ) -> torchvision.transforms.Compose:
595
+ if crop_strategy == "ten":
596
+ return torchvision.transforms.Compose(
597
+ [
598
+ torchvision.transforms.Resize(size=shorter_edge_length, antialias=True),
599
+ torchvision.transforms.TenCrop(size=slice_size),
600
+ ]
601
+ )
602
+
603
+ elif crop_strategy == "five":
604
+ return torchvision.transforms.Compose(
605
+ [
606
+ torchvision.transforms.Resize(size=shorter_edge_length, antialias=True),
607
+ torchvision.transforms.FiveCrop(size=slice_size),
608
+ ]
609
+ )
610
+
611
+ elif crop_strategy == "center":
612
+ return torchvision.transforms.Compose(
613
+ [
614
+ torchvision.transforms.Resize(size=shorter_edge_length, antialias=True),
615
+ torchvision.transforms.CenterCrop(size=slice_size),
616
+ ]
617
+ )
618
+ elif crop_strategy == "random":
619
+ return torchvision.transforms.Compose(
620
+ [
621
+ torchvision.transforms.Resize(size=shorter_edge_length, antialias=True),
622
+ torchvision.transforms.RandomCrop(size=slice_size),
623
+ ]
624
+ )
625
+
626
+ else:
627
+ raise ValueError(
628
+ f"crop_strategy must be one of ['ten', 'five', 'center', 'random'], got {crop_strategy}."
629
+ )