Luffy503 commited on
Commit
d5ba135
·
verified ·
1 Parent(s): 6ca1968

Upload 6 files

Browse files
preprocess_3D_to_2D/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *Step 1: Normalization 3D images*
2
+
3
+ CT, [-175, 250] for abdomen, [-1000, 500] for chest:
4
+
5
+ ```
6
+ python normalize_CT.py --test_data_path $YOUR_3D_IMAGE_PATH --save_path $YOUR_PATH_SAVE_NORMALIZE_DATA --a_min -175 --a_max 250
7
+ ```
8
+
9
+ MRI:
10
+
11
+ ```
12
+ python normalize_MRI.py --test_data_path $YOUR_3D_IMAGE_PATH --save_path $YOUR_PATH_SAVE_NORMALIZE_DATA
13
+ ```
14
+
15
+ *Step 2: pre_process slices*
16
+
17
+ You need to modify the your own label keys in preprocess_2D_slices.py
18
+
19
+ ```
20
+ python preprocess_2D_slices.py
21
+ ```
preprocess_3D_to_2D/create_annotations.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image # (pip install Pillow)
2
+ import numpy as np # (pip install numpy)
3
+ from skimage import measure # (pip install scikit-image)
4
+ #from shapely.geometry import Polygon, MultiPolygon # (pip install Shapely)
5
+ import os
6
+ import json
7
+
8
+ def create_sub_masks(mask_image, width, height):
9
+ # Initialize a dictionary of sub-masks indexed by RGB colors
10
+ sub_masks = {}
11
+ for x in range(width):
12
+ for y in range(height):
13
+ # Get the RGB values of the pixel
14
+ pixel = mask_image.getpixel((x,y))[:3]
15
+
16
+ # Check to see if we have created a sub-mask...
17
+ pixel_str = str(pixel)
18
+ sub_mask = sub_masks.get(pixel_str)
19
+ if sub_mask is None:
20
+ # Create a sub-mask (one bit per pixel) and add to the dictionary
21
+ # Note: we add 1 pixel of padding in each direction
22
+ # because the contours module doesn"t handle cases
23
+ # where pixels bleed to the edge of the image
24
+ sub_masks[pixel_str] = Image.new("1", (width+2, height+2))
25
+
26
+ # Set the pixel value to 1 (default is 0), accounting for padding
27
+ sub_masks[pixel_str].putpixel((x+1, y+1), 1)
28
+
29
+ return sub_masks
30
+
31
+ # def create_sub_mask_annotation(sub_mask):
32
+ # # Find contours (boundary lines) around each sub-mask
33
+ # # Note: there could be multiple contours if the object
34
+ # # is partially occluded. (E.g. an elephant behind a tree)
35
+ # contours = measure.find_contours(np.array(sub_mask), 0.5, positive_orientation="low")
36
+
37
+ # polygons = []
38
+ # segmentations = []
39
+ # for contour in contours:
40
+ # # Flip from (row, col) representation to (x, y)
41
+ # # and subtract the padding pixel
42
+ # for i in range(len(contour)):
43
+ # row, col = contour[i]
44
+ # contour[i] = (col - 1, row - 1)
45
+
46
+ # # Make a polygon and simplify it
47
+ # poly = Polygon(contour)
48
+ # if poly.length > 100:
49
+ # poly = poly.simplify(0.5, preserve_topology=True)
50
+
51
+ # if(poly.is_empty):
52
+ # # Go to next iteration, dont save empty values in list
53
+ # continue
54
+
55
+ # polygons.append(poly)
56
+
57
+ # segmentation = np.array(poly.exterior.coords).ravel().tolist()
58
+ # segmentations.append(segmentation)
59
+
60
+ # return polygons, segmentations
61
+
62
+ def create_category_annotation(category_dict):
63
+ category_list = []
64
+
65
+ for key, value in category_dict.items():
66
+ category = {
67
+ "supercategory": key,
68
+ "id": value,
69
+ "name": key
70
+ }
71
+ category_list.append(category)
72
+
73
+ return category_list
74
+
75
+ def create_image_annotation(file_name, width, height, image_id):
76
+ images = {
77
+ "file_name": file_name,
78
+ "height": height,
79
+ "width": width,
80
+ "id": image_id
81
+ }
82
+
83
+ return images
84
+
85
+ def create_annotation_format(polygon, segmentation, image_id, category_id, annotation_id):
86
+ min_x, min_y, max_x, max_y = polygon.bounds
87
+ width = max_x - min_x
88
+ height = max_y - min_y
89
+ bbox = (min_x, min_y, width, height)
90
+ area = polygon.area
91
+
92
+ annotation = {
93
+ "segmentation": segmentation,
94
+ "area": area,
95
+ "iscrowd": 0,
96
+ "image_id": image_id,
97
+ "bbox": bbox,
98
+ "category_id": category_id,
99
+ "id": annotation_id
100
+ }
101
+
102
+ return annotation
103
+
104
+ def get_coco_json_format():
105
+ # Standard COCO format
106
+ coco_format = {
107
+ "info": {},
108
+ "licenses": [],
109
+ "images": [{}],
110
+ "categories": [{}],
111
+ "annotations": [{}]
112
+ }
113
+
114
+ return coco_format
preprocess_3D_to_2D/create_customer_datasets.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ from tqdm import tqdm
3
+ import pandas as pd
4
+
5
+ from create_annotations import *
6
+
7
+
8
+ # provide the path to the dataset. There should be train, train_mask, test, test_mask under this folder
9
+
10
+ image_size = 1024
11
+
12
+
13
+ ### Load Biomed Label Base
14
+ # provide path to predefined label base
15
+ with open('label_base.json', 'r') as f:
16
+ label_base = json.load(f)
17
+
18
+
19
+ # get parent class for the names
20
+ parent_class = {}
21
+ for i in label_base:
22
+ subnames = [label_base[i]['name']] + label_base[i].get('child', [])
23
+ for label in subnames:
24
+ parent_class[label] = int(i)
25
+
26
+ # Label ids of the dataset
27
+ category_ids = {label_base[i]['name']: int(i) for i in label_base if 'name' in label_base[i]}
28
+
29
+ # Get "images" and "annotations" info
30
+ def images_annotations_info(maskpath, keyword):
31
+
32
+ imagepath = maskpath.replace('_mask', '')
33
+ # This id will be automatically increased as we go
34
+ annotation_id = 0
35
+
36
+ sent_id = 0
37
+ ref_id = 0
38
+
39
+ annotations = []
40
+ images = []
41
+ image_to_id = {}
42
+ n_total = len(glob.glob(maskpath + "*.png"))
43
+ n_errors = 0
44
+
45
+ def extra_annotation(ann, file_name, target):
46
+ nonlocal sent_id, ref_id
47
+ ann['file_name'] = file_name
48
+ ann['split'] = keyword
49
+
50
+ ### modality
51
+ mod = file_name.split('.')[0].split('_')[-2]
52
+ ### site
53
+ site = file_name.split('.')[0].split('_')[-1]
54
+
55
+ task = {'target': target, 'modality': mod, 'site': site}
56
+ if 'T1' in mod or 'T2' in mod or 'FLAIR' in mod or 'ADC' in mod:
57
+ task['modality'] = 'MRI'
58
+ if 'MRI' not in mod:
59
+ task['sequence'] = mod
60
+ else:
61
+ task['sequence'] = mod[4:]
62
+
63
+ prompts = [f'{target} in {site} {mod}']
64
+
65
+ ann['sentences'] = []
66
+ for p in prompts:
67
+ ann['sentences'].append({'raw': p, 'sent': p, 'sent_id': sent_id})
68
+ sent_id += 1
69
+ ann['sent_ids'] = [s['sent_id'] for s in ann['sentences']]
70
+
71
+ ann['ann_id'] = ann['id']
72
+ ann['ref_id'] = ref_id
73
+ ref_id += 1
74
+
75
+ return ann
76
+
77
+ for mask_image in tqdm(glob.glob(maskpath + "*.png")):
78
+ # The mask image is *.png but the original image is *.jpg.
79
+ # We make a reference to the original file in the COCO JSON file
80
+ filename_parsed = os.path.basename(mask_image).split("_")
81
+ target_name = filename_parsed[-1].split(".")[0].replace("+", " ")
82
+
83
+ original_file_name = "_".join(filename_parsed[:-1]) + ".png"
84
+
85
+ if original_file_name not in os.listdir(imagepath):
86
+ print("Original file not found: {}".format(original_file_name))
87
+ n_errors += 1
88
+ print(n_errors)
89
+ continue
90
+
91
+ if original_file_name not in image_to_id:
92
+ image_to_id[original_file_name] = len(image_to_id)
93
+
94
+ # "images" info
95
+ image_id = image_to_id[original_file_name]
96
+ image = create_image_annotation(original_file_name, image_size, image_size, image_id)
97
+ images.append(image)
98
+
99
+
100
+ annotation = {
101
+ "mask_file": os.path.basename(mask_image),
102
+ "iscrowd": 0,
103
+ "image_id": image_to_id[original_file_name],
104
+ "category_id": parent_class[target_name],
105
+ "id": annotation_id,
106
+ }
107
+
108
+ annotation = extra_annotation(annotation, original_file_name, target_name)
109
+
110
+ annotations.append(annotation)
111
+ annotation_id += 1
112
+
113
+ #print(f"Number of errors in conversion: {n_errors}/{n_total}")
114
+ return images, annotations, annotation_id
115
+
116
+
117
+ def create(targetpath):
118
+ # Get the standard COCO JSON format
119
+ coco_format = get_coco_json_format()
120
+
121
+ for keyword in ['train', 'test']:
122
+ mask_path = os.path.join(targetpath, "{}_mask/".format(keyword))
123
+
124
+ # Create category section
125
+ coco_format["categories"] = create_category_annotation(category_ids)
126
+
127
+ # Create images and annotations sections
128
+ coco_format["images"], coco_format["annotations"], annotation_cnt = images_annotations_info(mask_path, keyword)
129
+
130
+ # post-process file
131
+ images_with_ann = set()
132
+ for ann in coco_format['annotations']:
133
+ images_with_ann.add(ann['file_name'])
134
+ for im in coco_format['images']:
135
+ if im["file_name"] not in images_with_ann:
136
+ coco_format['images'].remove(im)
137
+
138
+ with open(os.path.join(targetpath, "{}.json".format(keyword)),"w") as outfile:
139
+ json.dump(coco_format, outfile)
140
+
141
+ print("Created %d annotations for %d images in folder: %s" % (annotation_cnt, len(coco_format['images']), mask_path))
preprocess_3D_to_2D/normalize_CT.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 - 2022 MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ import argparse
13
+ import os
14
+ from functools import partial
15
+ import nibabel as nib
16
+ import numpy as np
17
+ import torch
18
+ import torch.nn.functional as F
19
+ from torch.cuda.amp import GradScaler, autocast
20
+ import SimpleITK as sitk
21
+ from monai.inferers import sliding_window_inference
22
+ # from monai.data import decollate_batch
23
+ from monai.losses import DiceCELoss
24
+ from monai.metrics import DiceMetric
25
+ from monai.networks.nets import SwinUNETR
26
+ from monai.transforms import *
27
+ from monai.utils.enums import MetricReduction
28
+ from monai.handlers import StatsHandler, from_engine
29
+ import matplotlib.pyplot as plt
30
+ from PIL import Image
31
+ from monai import data, transforms
32
+ from monai.data import *
33
+ import resource
34
+
35
+ rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
36
+ resource.setrlimit(resource.RLIMIT_NOFILE, (8192, rlimit[1]))
37
+ print('Setting resource limit:', str(resource.getrlimit(resource.RLIMIT_NOFILE)))
38
+
39
+ os.environ['MASTER_ADDR'] = 'localhost'
40
+ os.environ['MASTER_PORT'] = '28890'
41
+
42
+ parser = argparse.ArgumentParser(description="process 3d to 2d")
43
+ parser.add_argument(
44
+ "--test_data_path", default="/data/imagesTr/", type=str,
45
+ help="The path to 3d image")
46
+
47
+ parser.add_argument(
48
+ "--save_path", default="/data/YOUR_DATASET_NAME/process_image/", type=str,
49
+ help="The path to save 2d image")
50
+
51
+ roi = 96
52
+ parser.add_argument("--use_normal_dataset", default=True, help="use monai Dataset class")
53
+ parser.add_argument("--feature_size", default=48, type=int, help="feature size")
54
+ parser.add_argument("--batch_size", default=1, type=int, help="number of batch size")
55
+ parser.add_argument("--sw_batch_size", default=1, type=int, help="number of sliding window batch size")
56
+ parser.add_argument("--infer_overlap", default=0.75, type=float, help="sliding window inference overlap")
57
+ parser.add_argument("--in_channels", default=1, type=int, help="number of input channels")
58
+ parser.add_argument("--out_channels", default=7, type=int, help="number of output channels")
59
+ parser.add_argument("--a_min", default=-175.0, type=float, help="a_min in ScaleIntensityRanged")
60
+ parser.add_argument("--a_max", default=250.0, type=float, help="a_max in ScaleIntensityRanged")
61
+ parser.add_argument("--b_min", default=0.0, type=float, help="b_min in ScaleIntensityRanged")
62
+ parser.add_argument("--b_max", default=1.0, type=float, help="b_max in ScaleIntensityRanged")
63
+ parser.add_argument("--space_x", default=1.5, type=float, help="spacing in x direction")
64
+ parser.add_argument("--space_y", default=1.5, type=float, help="spacing in y direction")
65
+ parser.add_argument("--space_z", default=1.5, type=float, help="spacing in z direction")
66
+ parser.add_argument("--roi_x", default=roi, type=int, help="roi size in x direction")
67
+ parser.add_argument("--roi_y", default=roi, type=int, help="roi size in y direction")
68
+ parser.add_argument("--roi_z", default=roi, type=int, help="roi size in z direction")
69
+ parser.add_argument("--dropout_rate", default=0.0, type=float, help="dropout rate")
70
+ parser.add_argument("--distributed", action="store_true", help="start distributed training")
71
+ parser.add_argument("--workers", default=4, type=int, help="number of workers")
72
+ parser.add_argument("--spatial_dims", default=3, type=int, help="spatial dimension of input data")
73
+ parser.add_argument("--use_checkpoint", default=True, help="use gradient checkpointing to save memory")
74
+ parser.add_argument("--rank", default=0, type=int, help="node rank for distributed training")
75
+
76
+
77
+ def check_dir(dir):
78
+ if not os.path.exists(dir):
79
+ os.makedirs(dir)
80
+
81
+
82
+ def get_test_loader(args):
83
+ """
84
+ Creates training transforms, constructs a dataset, and returns a dataloader.
85
+
86
+ Args:
87
+ args: Command line arguments containing dataset paths and hyperparameters.
88
+ """
89
+ test_transforms = transforms.Compose([
90
+ LoadImaged(keys=["image"]),
91
+ EnsureChannelFirstd(keys=["image"]),
92
+ Orientationd(keys=["image"], axcodes="RAS"),
93
+ Spacingd(keys=["image"], pixdim=(args.space_x, args.space_y, args.space_z),
94
+ mode=("bilinear")),
95
+ ScaleIntensityRanged(
96
+ keys=["image"],
97
+ a_min=args.a_min,
98
+ a_max=args.a_max,
99
+ b_min=0.0,
100
+ b_max=1.0,
101
+ clip=True,
102
+ ),
103
+ CropForegroundd(keys=["image"], source_key="image"),
104
+ SpatialPadd(keys=["image"], spatial_size=(args.roi_x, args.roi_y, args.roi_z),
105
+ mode='constant'),
106
+ ])
107
+
108
+ # constructing training dataset
109
+ test_img = []
110
+ test_name = []
111
+
112
+ dataset_list = os.listdir(args.test_data_path)
113
+
114
+ check_dir(args.save_path)
115
+ already_exist_list = os.listdir(args.save_path)
116
+ new_list = []
117
+
118
+ for item in dataset_list:
119
+ if item not in already_exist_list:
120
+ new_list.append(item)
121
+
122
+ for item in new_list:
123
+ name = item
124
+ print(name)
125
+ test_img_path = os.path.join(args.test_data_path, name)
126
+
127
+ test_img.append(test_img_path)
128
+ test_name.append(name)
129
+
130
+ data_dicts_test = [{'image': image, 'name': name}
131
+ for image, name in zip(test_img, test_name)]
132
+
133
+ print('test len {}'.format(len(data_dicts_test)))
134
+
135
+ test_ds = Dataset(data=data_dicts_test, transform=test_transforms)
136
+ test_loader = DataLoader(
137
+ test_ds, batch_size=1, shuffle=False, num_workers=args.workers, sampler=None, pin_memory=True
138
+ )
139
+ return test_loader, test_transforms
140
+
141
+
142
+ def main():
143
+ args = parser.parse_args()
144
+
145
+ test_loader, test_transforms = get_test_loader(args)
146
+
147
+ post_ori_transforms = Compose([EnsureTyped(keys=["image"]),
148
+ Invertd(keys=["image"],
149
+ transform=test_transforms,
150
+ orig_keys="image",
151
+ meta_keys="image_meta_dict",
152
+ orig_meta_keys="image_meta_dict",
153
+ meta_key_postfix="meta_dict",
154
+ nearest_interp=True,
155
+ to_tensor=True),
156
+ SaveImaged(keys="image", meta_keys="img_meta_dict",
157
+ output_dir=args.save_path,
158
+ separate_folder=False, folder_layout=None,
159
+ resample=False),
160
+ ])
161
+
162
+ num = 0
163
+ with torch.no_grad():
164
+ for idx, batch_data in enumerate(test_loader):
165
+ img = batch_data["image"]
166
+
167
+ name = batch_data['name'][0]
168
+ with autocast(enabled=True):
169
+
170
+ for i in decollate_batch(batch_data):
171
+ post_ori_transforms(i)
172
+
173
+ os.rename(os.path.join(args.save_path, name.split('/')[-1][:-7] + '_trans.nii.gz'),
174
+ os.path.join(args.save_path, name.split('/')[-1][:-7] + '.nii.gz'))
175
+
176
+
177
+ if __name__ == "__main__":
178
+ main()
preprocess_3D_to_2D/normalize_MRI.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 - 2022 MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ import argparse
13
+ import os
14
+ from functools import partial
15
+ import nibabel as nib
16
+ import numpy as np
17
+ import torch
18
+ import torch.nn.functional as F
19
+ from torch.cuda.amp import GradScaler, autocast
20
+ import SimpleITK as sitk
21
+ from monai.inferers import sliding_window_inference
22
+ # from monai.data import decollate_batch
23
+ from monai.losses import DiceCELoss
24
+ from monai.metrics import DiceMetric
25
+ from monai.networks.nets import SwinUNETR
26
+ from monai.transforms import *
27
+ from monai.utils.enums import MetricReduction
28
+ from monai.handlers import StatsHandler, from_engine
29
+ import matplotlib.pyplot as plt
30
+ from PIL import Image
31
+ from monai import data, transforms
32
+ from monai.data import *
33
+ import resource
34
+
35
+ rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
36
+ resource.setrlimit(resource.RLIMIT_NOFILE, (8192, rlimit[1]))
37
+ print('Setting resource limit:', str(resource.getrlimit(resource.RLIMIT_NOFILE)))
38
+
39
+ os.environ['MASTER_ADDR'] = 'localhost'
40
+ os.environ['MASTER_PORT'] = '28890'
41
+
42
+ parser = argparse.ArgumentParser(description="process 3d to 2d")
43
+ parser.add_argument(
44
+ "--test_data_path", default="/data/imagesTr/", type=str,
45
+ help="The path to 3d image")
46
+
47
+ parser.add_argument(
48
+ "--save_path", default="/data/YOUR_DATASET_NAME/process_image/", type=str,
49
+ help="The path to save 2d image")
50
+
51
+ roi = 96
52
+ parser.add_argument("--use_normal_dataset", default=True, help="use monai Dataset class")
53
+ parser.add_argument("--feature_size", default=48, type=int, help="feature size")
54
+ parser.add_argument("--batch_size", default=1, type=int, help="number of batch size")
55
+ parser.add_argument("--sw_batch_size", default=1, type=int, help="number of sliding window batch size")
56
+ parser.add_argument("--infer_overlap", default=0.75, type=float, help="sliding window inference overlap")
57
+ parser.add_argument("--in_channels", default=1, type=int, help="number of input channels")
58
+ parser.add_argument("--out_channels", default=7, type=int, help="number of output channels")
59
+ parser.add_argument("--a_min", default=-175.0, type=float, help="a_min in ScaleIntensityRanged")
60
+ parser.add_argument("--a_max", default=250.0, type=float, help="a_max in ScaleIntensityRanged")
61
+ parser.add_argument("--b_min", default=0.0, type=float, help="b_min in ScaleIntensityRanged")
62
+ parser.add_argument("--b_max", default=1.0, type=float, help="b_max in ScaleIntensityRanged")
63
+ parser.add_argument("--space_x", default=1.5, type=float, help="spacing in x direction")
64
+ parser.add_argument("--space_y", default=1.5, type=float, help="spacing in y direction")
65
+ parser.add_argument("--space_z", default=1.5, type=float, help="spacing in z direction")
66
+ parser.add_argument("--roi_x", default=roi, type=int, help="roi size in x direction")
67
+ parser.add_argument("--roi_y", default=roi, type=int, help="roi size in y direction")
68
+ parser.add_argument("--roi_z", default=roi, type=int, help="roi size in z direction")
69
+ parser.add_argument("--dropout_rate", default=0.0, type=float, help="dropout rate")
70
+ parser.add_argument("--distributed", action="store_true", help="start distributed training")
71
+ parser.add_argument("--workers", default=4, type=int, help="number of workers")
72
+ parser.add_argument("--spatial_dims", default=3, type=int, help="spatial dimension of input data")
73
+ parser.add_argument("--use_checkpoint", default=True, help="use gradient checkpointing to save memory")
74
+ parser.add_argument("--rank", default=0, type=int, help="node rank for distributed training")
75
+
76
+
77
+ def check_dir(dir):
78
+ if not os.path.exists(dir):
79
+ os.makedirs(dir)
80
+
81
+
82
+ def get_test_loader(args):
83
+ """
84
+ Creates training transforms, constructs a dataset, and returns a dataloader.
85
+
86
+ Args:
87
+ args: Command line arguments containing dataset paths and hyperparameters.
88
+ """
89
+ test_transforms = transforms.Compose([
90
+ LoadImaged(keys=["image"]),
91
+ EnsureChannelFirstd(keys=["image"]),
92
+ Orientationd(keys=["image"], axcodes="RAS"),
93
+ Spacingd(keys=["image"], pixdim=(args.space_x, args.space_y, args.space_z),
94
+ mode=("bilinear")),
95
+ NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
96
+ CropForegroundd(keys=["image"], source_key="image"),
97
+ SpatialPadd(keys=["image"], spatial_size=(args.roi_x, args.roi_y, args.roi_z),
98
+ mode='constant'),
99
+ ])
100
+
101
+ # constructing training dataset
102
+ test_img = []
103
+ test_name = []
104
+
105
+ dataset_list = os.listdir(args.test_data_path)
106
+
107
+ check_dir(args.save_path)
108
+ already_exist_list = os.listdir(args.save_path)
109
+ new_list = []
110
+
111
+ for item in dataset_list:
112
+ if item not in already_exist_list:
113
+ new_list.append(item)
114
+
115
+ for item in new_list:
116
+ name = item
117
+ print(name)
118
+ test_img_path = os.path.join(args.test_data_path, name)
119
+
120
+ test_img.append(test_img_path)
121
+ test_name.append(name)
122
+
123
+ data_dicts_test = [{'image': image, 'name': name}
124
+ for image, name in zip(test_img, test_name)]
125
+
126
+ print('test len {}'.format(len(data_dicts_test)))
127
+
128
+ test_ds = Dataset(data=data_dicts_test, transform=test_transforms)
129
+ test_loader = DataLoader(
130
+ test_ds, batch_size=1, shuffle=False, num_workers=args.workers, sampler=None, pin_memory=True
131
+ )
132
+ return test_loader, test_transforms
133
+
134
+
135
+ def main():
136
+ args = parser.parse_args()
137
+
138
+ test_loader, test_transforms = get_test_loader(args)
139
+
140
+ post_ori_transforms = Compose([EnsureTyped(keys=["image"]),
141
+ Invertd(keys=["image"],
142
+ transform=test_transforms,
143
+ orig_keys="image",
144
+ meta_keys="image_meta_dict",
145
+ orig_meta_keys="image_meta_dict",
146
+ meta_key_postfix="meta_dict",
147
+ nearest_interp=True,
148
+ to_tensor=True),
149
+ SaveImaged(keys="image", meta_keys="img_meta_dict",
150
+ output_dir=args.save_path,
151
+ separate_folder=False, folder_layout=None,
152
+ resample=False),
153
+ ])
154
+
155
+ num = 0
156
+ with torch.no_grad():
157
+ for idx, batch_data in enumerate(test_loader):
158
+ img = batch_data["image"]
159
+
160
+ name = batch_data['name'][0]
161
+ with autocast(enabled=True):
162
+
163
+ for i in decollate_batch(batch_data):
164
+ post_ori_transforms(i)
165
+
166
+ os.rename(os.path.join(args.save_path, name.split('/')[-1][:-7] + '_trans.nii.gz'),
167
+ os.path.join(args.save_path, name.split('/')[-1][:-7] + '.nii.gz'))
168
+
169
+
170
+ if __name__ == "__main__":
171
+ main()
preprocess_3D_to_2D/preprocess_2D_slices.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import numpy as np
4
+ import SimpleITK as sitk
5
+ from PIL import Image
6
+
7
+ dataset_name = 'YOUR_DATASET_NAME'
8
+ root = '/data/YOUR_DATASET_NAME/'
9
+
10
+ path_3d = root + 'process_image'
11
+ path_2d = root + 'process_image_2d'
12
+
13
+ path_3d_label = root + 'labelsTr'
14
+ path_2d_label = root + 'process_label_2d'
15
+
16
+ suffix = '_CT_abdomen.png'
17
+
18
+ target_path = '/data/'
19
+
20
+ # define your label dict, for example
21
+ label_dict = {'liver': 1, 'esophagus': 10, 'stomach': 11, 'duodenum': 12, 'left+kidney': 13, 'right+kidney': 2,
22
+ 'spleen': 3, 'pancreas': 4, 'aorta': 5, 'inferior+vena+cava': 6,
23
+ 'right+adrenal+gland': 7, 'left+adrenal+gland': 8, 'gallbladder': 9}
24
+
25
+
26
+ def read(img, transpose=False):
27
+ img = sitk.ReadImage(img)
28
+ direction = img.GetDirection()
29
+ origin = img.GetOrigin()
30
+ Spacing = img.GetSpacing()
31
+
32
+ img = sitk.GetArrayFromImage(img)
33
+ if transpose:
34
+ img = img.transpose(1, 2, 0)
35
+
36
+ return img, direction, origin, Spacing
37
+
38
+
39
+ def check_dir(dir):
40
+ if not os.path.exists(dir):
41
+ os.makedirs(dir)
42
+
43
+
44
+ def find_index(path, target):
45
+ lst = os.listdir(path)
46
+ lst.sort()
47
+
48
+ try:
49
+ index = lst.index(target)
50
+ return index
51
+ except ValueError:
52
+ return f"Element {target} not found in the list."
53
+
54
+
55
+ def exe_each_image(i):
56
+ ct = os.path.join(path_3d, i)
57
+ ct = read(ct, True)[0]
58
+ ct = (ct * 255).astype(np.uint8)
59
+ print(ct.shape)
60
+
61
+ h, w, c = ct.shape
62
+
63
+ idx = find_index(path_3d, i)
64
+ name = dataset_name + '-' + str(idx) + '-'
65
+
66
+ for j in range(c):
67
+ img = ct[:, :, j]
68
+
69
+ img = Image.fromarray(img)
70
+ image_name = name + str(j) + suffix
71
+ img.save(os.path.join(path_2d, image_name))
72
+
73
+
74
+ def trans_3d_to_2d_image():
75
+ check_dir(path_2d)
76
+
77
+ ls = os.listdir(path_3d)
78
+ ls.sort()
79
+
80
+ import multiprocessing
81
+ with multiprocessing.Pool(5) as pool:
82
+ pool.map(exe_each_image, ls, 1)
83
+
84
+
85
+ def exe_each_label(i):
86
+ old_lab = os.path.join(path_3d_label, i)
87
+ old_lab = read(old_lab, True)[0].astype(np.uint8)
88
+
89
+ label_keys = label_dict.keys()
90
+ for label_key in label_keys:
91
+ label_value = label_dict[label_key]
92
+
93
+ lab = old_lab.copy()
94
+
95
+ lab[old_lab > 0] = 0
96
+ lab[old_lab == label_value] = 1
97
+
98
+ suffix_class = suffix[:-4] + '_' + label_key + '.png'
99
+
100
+ lab = (lab*255).astype(np.uint8)
101
+ print(lab.shape)
102
+
103
+ h, w, c = lab.shape
104
+ idx = find_index(path_3d_label, i)
105
+ name = dataset_name + '-' + str(idx) + '-'
106
+
107
+ for j in range(c):
108
+ la = lab[:, :, j]
109
+
110
+ if la.sum() > 0:
111
+ la = Image.fromarray(la)
112
+ la_name = name + str(j) + suffix_class
113
+ la.save(os.path.join(path_2d_label, la_name))
114
+
115
+
116
+ def trans_3d_to_2d_label():
117
+ check_dir(path_2d_label)
118
+
119
+ ls = os.listdir(path_3d_label)
120
+ ls.sort()
121
+
122
+ import multiprocessing
123
+ with multiprocessing.Pool(5) as pool:
124
+ pool.map(exe_each_label, ls, 1)
125
+
126
+
127
+ def shuffle_to_get_test():
128
+
129
+ train_path = os.path.join(target_path+dataset_name, 'train')
130
+ train_mask_path = os.path.join(target_path+dataset_name, 'train_mask')
131
+
132
+ test_path = os.path.join(target_path+dataset_name, 'test')
133
+ test_mask_path = os.path.join(target_path+dataset_name, 'test_mask')
134
+
135
+ check_dir(test_path), check_dir(test_mask_path)
136
+
137
+ ls = os.listdir(train_path)
138
+ ls.sort()
139
+
140
+ prefix = dataset_name + '-'
141
+
142
+ new_ls = []
143
+ for i in ls:
144
+ name = i[len(prefix):].split('-')[0]
145
+ new_ls.append(name)
146
+
147
+ new_ls.sort()
148
+ new_ls = list(np.unique(new_ls))
149
+ print(new_ls)
150
+ import random
151
+ random.shuffle(new_ls)
152
+
153
+ to_test_ls = []
154
+
155
+ for j in new_ls[:len(new_ls)//5]:
156
+ to_test_ls.append(j)
157
+
158
+ print(len(to_test_ls))
159
+
160
+ # move
161
+ train_img_ls = os.listdir(train_path)
162
+ train_mask_ls = os.listdir(train_mask_path)
163
+
164
+ for train_img_name in train_img_ls:
165
+ if str(train_img_name[len(prefix):].split('-')[0]) in to_test_ls:
166
+ print('move img:', train_img_name)
167
+ shutil.move(os.path.join(train_path, train_img_name), os.path.join(test_path, train_img_name))
168
+
169
+ for train_mask_name in train_mask_ls:
170
+ if train_mask_name[len(prefix):].split('-')[0] in to_test_ls:
171
+ print('move mask:', train_mask_name)
172
+ shutil.move(os.path.join(train_mask_path, train_mask_name), os.path.join(test_mask_path, train_mask_name))
173
+
174
+
175
+ def exe_resize(name):
176
+
177
+ original_image = Image.open(name)
178
+ resized_image = original_image.resize((1024, 1024))
179
+ resized_image.save(name)
180
+
181
+
182
+ def resize_images_in_directory(input_dir):
183
+ ls = os.listdir(input_dir)
184
+
185
+ new_ls = [os.path.join(input_dir, i) for i in ls]
186
+
187
+ import multiprocessing
188
+ with multiprocessing.Pool(10) as pool:
189
+ pool.map(exe_resize, new_ls, 1)
190
+
191
+
192
+ if __name__ == "__main__":
193
+
194
+ trans_3d_to_2d_image()
195
+ trans_3d_to_2d_label()
196
+
197
+ resize_images_in_directory(path_2d)
198
+ resize_images_in_directory(path_2d_label)
199
+
200
+ shutil.copytree(path_2d, os.path.join(target_path+dataset_name, 'train'))
201
+ shutil.copytree(path_2d_label, os.path.join(target_path + dataset_name, 'train_mask'))
202
+
203
+ shuffle_to_get_test()
204
+
205
+ from create_customer_datasets import create
206
+ create(target_path+dataset_name)