|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| from sklearn.model_selection import KFold |
| import pickle |
| import os |
| import json |
| import math |
| import numpy as np |
| import torch |
| from monai import transforms |
| import SimpleITK as sitk |
| from tqdm import tqdm |
| from torch.utils.data import Dataset |
| import glob |
| from light_training.dataloading.utils import unpack_dataset |
| import random |
| import torch |
| import numpy as np |
| from scipy.ndimage import distance_transform_edt as distance |
| from skimage import segmentation as skimage_seg |
| from skimage.morphology import dilation, disk |
| import scipy.ndimage as ndimage |
|
|
| def get_edge_points(img): |
| """ |
| get edge points of a binary segmentation result |
| """ |
| dim = len(img.shape) |
| if (dim == 2): |
| strt = ndimage.generate_binary_structure(2, 1) |
| else: |
| strt = ndimage.generate_binary_structure(3, 1) |
| ero = ndimage.binary_erosion(img, strt) |
| edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8) |
| return edge |
|
|
| def edge_3d(image_3d): |
| |
| return_edge = np.zeros_like(image_3d) |
|
|
| for i in range(image_3d.shape[0]): |
| for j in range(image_3d.shape[1]): |
| return_edge[i, j] = get_edge_points(image_3d[i, j]) |
| |
| return return_edge |
|
|
| def compute_sdf(img_gt, out_shape): |
| """ |
| compute the signed distance map of binary mask |
| input: segmentation, shape = (batch_size,c, x, y, z) |
| output: the Signed Distance Map (SDM) |
| sdf(x) = 0; x in segmentation boundary |
| -inf|x-y|; x in segmentation |
| +inf|x-y|; x out of segmentation |
| normalize sdf to [-1,1] |
| |
| """ |
|
|
| img_gt = img_gt.astype(np.uint8) |
| normalized_sdf = np.zeros(out_shape) |
|
|
| for b in range(out_shape[0]): |
| for c in range(out_shape[1]): |
| posmask = img_gt[b, c].astype(np.bool_) |
| if posmask.any(): |
| negmask = ~posmask |
| posdis = distance(posmask) |
| negdis = distance(negmask) |
| boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8) |
| sdf = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) - (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis)) |
| sdf[boundary==1] = 0 |
| normalized_sdf[b][c] = sdf |
| assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis)) |
| assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis)) |
|
|
| return normalized_sdf |
|
|
| def convert_labels(labels): |
| |
| labels = labels[None, None] |
| result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] |
| |
| return torch.cat(result, dim=1).float() |
|
|
| class MedicalDataset(Dataset): |
| def __init__(self, datalist, test=False) -> None: |
| super().__init__() |
| |
| self.datalist = datalist |
| self.test = test |
|
|
| self.data_cached = [] |
| for p in tqdm(self.datalist, total=len(self.datalist)): |
| info = self.load_pkl(p) |
|
|
| self.data_cached.append(info) |
|
|
| |
| print(f"unpacking data ....") |
| |
| folder = [] |
| for p in self.datalist: |
| f = os.path.dirname(p) |
| if f not in folder: |
| folder.append(f) |
| for f in folder: |
| unpack_dataset(f, |
| unpack_segmentation=True, |
| overwrite_existing=False, |
| num_processes=8) |
|
|
|
|
| print(f"data length is {len(self.datalist)}") |
| |
| def load_pkl(self, data_path): |
| pass |
| properties_path = f"{data_path[:-4]}.pkl" |
| df = open(properties_path, "rb") |
| info = pickle.load(df) |
|
|
| return info |
| |
| def read_data(self, data_path): |
| |
| image_path = data_path.replace(".npz", ".npy") |
| seg_path = data_path.replace(".npz", "_seg.npy") |
| image_data = np.load(image_path, "r") |
| |
| seg_data = None |
| if not self.test: |
| seg_data = np.load(seg_path, "r") |
| return image_data, seg_data |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| def __getitem__(self, i): |
| |
| image, seg = self.read_data(self.datalist[i]) |
|
|
| properties = self.data_cached[i] |
| case_name = properties["name"] |
|
|
| if seg is not None: |
| sdm = np.load(os.path.join("./data/fullres/train_sdm/", f"{case_name}_seg_sdm.npy"), "r") |
|
|
| |
| sdm = sdm[0] |
| seg = np.concatenate([seg, sdm], axis=0) |
|
|
| |
| if seg is None: |
| return { |
| "data": image, |
| "properties": properties |
| } |
| else : |
| return { |
| "data": image, |
| "seg": seg, |
| "properties": properties |
| } |
|
|
| def __len__(self): |
| return len(self.datalist) |
|
|
| def get_kfold_data(data_paths, n_splits, shuffle=False): |
| X = np.arange(len(data_paths)) |
| kfold = KFold(n_splits=n_splits, shuffle=shuffle) |
| return_res = [] |
| for a, b in kfold.split(X): |
| fold_train = [] |
| fold_val = [] |
| for i in a: |
| fold_train.append(data_paths[i]) |
| for j in b: |
| fold_val.append(data_paths[j]) |
| return_res.append({"train_data": fold_train, "val_data": fold_val}) |
|
|
| return return_res |
|
|
| def get_kfold_loader(data_dir, fold=0, test_dir=None): |
|
|
| all_paths = glob.glob(f"{data_dir}/*.npz") |
| fold_data = get_kfold_data(all_paths, 5)[fold] |
|
|
| train_datalist = fold_data["train_data"] |
| val_datalist = fold_data["val_data"] |
|
|
| print(f"training data is {len(train_datalist)}") |
| print(f"validation data is {len(val_datalist)}") |
| train_ds = MedicalDataset(train_datalist) |
| |
| val_ds = MedicalDataset(val_datalist) |
|
|
| if test_dir is not None: |
| test_paths = glob.glob(f"{test_dir}/*.npz") |
| test_ds = MedicalDataset(test_paths, test=True) |
| else: |
| test_ds = None |
|
|
| loader = [train_ds, val_ds, test_ds] |
|
|
| return loader |
|
|
| def get_all_training_loader(data_dir, fold=0, test_dir=None): |
| |
| |
| all_paths = glob.glob(f"{data_dir}/*.npz") |
| fold_data = get_kfold_data(all_paths, 5)[fold] |
|
|
| train_datalist = all_paths |
| val_datalist = fold_data["val_data"] |
|
|
| print(f"training data is {len(train_datalist)}") |
| print(f"validation data is {len(val_datalist)}") |
| train_ds = MedicalDataset(train_datalist) |
| |
| val_ds = MedicalDataset(val_datalist) |
|
|
| if test_dir is not None: |
| test_paths = glob.glob(f"{test_dir}/*.npz") |
| test_ds = MedicalDataset(test_paths, test=True) |
| else: |
| test_ds = None |
|
|
| loader = [train_ds, val_ds, test_ds] |
|
|
| return loader |
|
|
| def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None): |
| train_datalist = glob.glob(f"{train_dir}/*.npz") |
| val_datalist = glob.glob(f"{val_dir}/*.npz") |
|
|
| print(f"training data is {len(train_datalist)}") |
| print(f"validation data is {len(val_datalist)}") |
|
|
| if test_dir is not None: |
| test_datalist = glob.glob(f"{test_dir}/*.npz") |
| print(f"test data is {len(test_datalist)}") |
| test_ds = MedicalDataset(test_datalist, test=True) |
| else : |
| test_ds = None |
|
|
| train_ds = MedicalDataset(train_datalist) |
| val_ds = MedicalDataset(val_datalist) |
| |
| loader = [train_ds, val_ds, test_ds] |
|
|
| return loader |
|
|
| def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2): |
| |
| |
| all_paths = glob.glob(f"{data_dir}/*.npz") |
| |
|
|
| train_number = int(len(all_paths) * train_rate) |
| val_number = int(len(all_paths) * val_rate) |
| test_number = int(len(all_paths) * test_rate) |
|
|
| random.shuffle(all_paths) |
|
|
| train_datalist = all_paths[:train_number] |
| val_datalist = all_paths[train_number: train_number + val_number] |
| test_datalist = all_paths[-test_number:] |
|
|
| print(f"training data is {len(train_datalist)}") |
| print(f"validation data is {len(val_datalist)}") |
| print(f"test data is {len(test_datalist)}") |
|
|
| train_ds = MedicalDataset(train_datalist) |
| val_ds = MedicalDataset(val_datalist) |
| test_ds = MedicalDataset(test_datalist) |
|
|
| loader = [train_ds, val_ds, test_ds] |
|
|
| return loader |
|
|
| def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None): |
| |
| |
| all_paths = [] |
| for p in data_dir: |
| paths = glob.glob(f"{p}/*.npz") |
| for pp in paths: |
| all_paths.append(pp) |
|
|
| |
| fold_data = get_kfold_data(all_paths, 5)[fold] |
|
|
| train_datalist = all_paths |
| val_datalist = fold_data["val_data"] |
|
|
| print(f"training data is {len(train_datalist)}") |
| print(f"validation data is {len(val_datalist)}") |
| train_ds = MedicalDataset(train_datalist) |
| |
| val_ds = MedicalDataset(val_datalist) |
|
|
| if test_dir is not None: |
| test_paths = glob.glob(f"{test_dir}/*.npz") |
| test_ds = MedicalDataset(test_paths, test=True) |
| else: |
| test_ds = None |
|
|
| loader = [train_ds, val_ds, test_ds] |
|
|
| return loader |