-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
2 nifti stream and patch transforms (#28)
* initial unit tests for 2d/3d unet * adding license info * Adding definitions for reading Nifti files in streams and stream transforms for selecting patches (windowing) * Remove tests * Renaming fix * Update arrayutils.py * Removed blank lines in comments * Added Dataset based Nifti reader, grid patch sampler, and transforms * Added example segmentation notebook * Cleanup deletion * Update cardiac_segmentation.ipynb Co-authored-by: Wenqi Li <wenqi.li@ucl.ac.uk>
- Loading branch information
Showing
24 changed files
with
844 additions
and
1,732 deletions.
There are no files selected for viewing
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,233 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"# Nifti Read Example\n", | ||
"\n", | ||
"The purpose of this notebook is to illustrate reading Nifti files and iterating over patches of the volumes loaded from them." | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 1, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"MONAI version: 0.0.1\n", | ||
"Python version: 3.7.3 (default, Mar 27 2019, 22:11:17) [GCC 7.3.0]\n", | ||
"Numpy version: 1.16.4\n", | ||
"Pytorch version: 1.3.1\n", | ||
"Ignite version: 0.2.1\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"%matplotlib inline\n", | ||
"\n", | ||
"import os\n", | ||
"import sys\n", | ||
"from glob import glob\n", | ||
"import tempfile\n", | ||
"\n", | ||
"import numpy as np\n", | ||
"import matplotlib.pyplot as plt\n", | ||
"import nibabel as nib\n", | ||
"\n", | ||
"\n", | ||
"import torch\n", | ||
"from torch.utils.data import DataLoader\n", | ||
"import torchvision.transforms as transforms\n", | ||
"\n", | ||
"sys.path.append('..') # assumes this is where MONAI is\n", | ||
"\n", | ||
"from monai import application, data, networks, utils\n", | ||
"from monai.data.readers import NiftiDataset\n", | ||
"from monai.data.transforms import AddChannel, Transpose, Rescale, ToTensor, UniformRandomPatch, GridPatchDataset\n", | ||
"\n", | ||
"application.config.print_config()" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Define a function for creating test images and segmentations:" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 2, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"def create_test_image_3d(height, width, depth, numObjs=12, radMax=30, noiseMax=0.0, numSegClasses=5):\n", | ||
" '''Return a noisy 3D image and segmentation.'''\n", | ||
" image = np.zeros((width, height,depth))\n", | ||
"\n", | ||
" for i in range(numObjs):\n", | ||
" x = np.random.randint(radMax, width - radMax)\n", | ||
" y = np.random.randint(radMax, height - radMax)\n", | ||
" z = np.random.randint(radMax, depth - radMax)\n", | ||
" rad = np.random.randint(5, radMax)\n", | ||
" spy, spx, spz = np.ogrid[-x:width - x, -y:height - y, -z:depth - z]\n", | ||
" circle = (spx * spx + spy * spy + spz * spz) <= rad * rad\n", | ||
"\n", | ||
" if numSegClasses > 1:\n", | ||
" image[circle] = np.ceil(np.random.random() * numSegClasses)\n", | ||
" else:\n", | ||
" image[circle] = np.random.random() * 0.5 + 0.5\n", | ||
"\n", | ||
" labels = np.ceil(image).astype(np.int32)\n", | ||
"\n", | ||
" norm = np.random.uniform(0, numSegClasses * noiseMax, size=image.shape)\n", | ||
" noisyimage = utils.arrayutils.rescale_array(np.maximum(image, norm))\n", | ||
"\n", | ||
" return noisyimage, labels" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Create a number of test Nifti files:" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 3, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"tempdir = tempfile.mkdtemp()\n", | ||
"\n", | ||
"for i in range(5):\n", | ||
" im, seg = create_test_image_3d(256,256,256)\n", | ||
" \n", | ||
" n = nib.Nifti1Image(im, np.eye(4))\n", | ||
" nib.save(n, os.path.join(tempdir, 'im%i.nii.gz'%i))\n", | ||
" \n", | ||
" n = nib.Nifti1Image(seg, np.eye(4))\n", | ||
" nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz'%i))" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Create a data loader which yields uniform random patches from loaded Nifti files:" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 4, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"torch.Size([5, 1, 64, 64, 64]) torch.Size([5, 1, 64, 64, 64])\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"images = sorted(glob(os.path.join(tempdir,'im*.nii.gz')))\n", | ||
"segs = sorted(glob(os.path.join(tempdir,'seg*.nii.gz')))\n", | ||
"\n", | ||
"imtrans=transforms.Compose([\n", | ||
" Rescale(),\n", | ||
" AddChannel(),\n", | ||
" UniformRandomPatch((64, 64, 64)),\n", | ||
" ToTensor()\n", | ||
"]) \n", | ||
"\n", | ||
"segtrans=transforms.Compose([\n", | ||
" AddChannel(),\n", | ||
" UniformRandomPatch((64, 64, 64)),\n", | ||
" ToTensor()\n", | ||
"]) \n", | ||
" \n", | ||
"ds = NiftiDataset(images, segs, imtrans, segtrans)\n", | ||
"\n", | ||
"loader = DataLoader(ds, batch_size=10, num_workers=2, pin_memory=torch.cuda.is_available())\n", | ||
"im, seg = utils.mathutils.first(loader)\n", | ||
"print(im.shape, seg.shape)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Alternatively create a data loader which yields patches in regular grid order from loaded images:" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 5, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"torch.Size([10, 1, 64, 64, 64]) torch.Size([10, 1, 64, 64, 64])\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"imtrans=transforms.Compose([\n", | ||
" Rescale(),\n", | ||
" AddChannel(),\n", | ||
" ToTensor()\n", | ||
"]) \n", | ||
"\n", | ||
"segtrans=transforms.Compose([\n", | ||
" AddChannel(),\n", | ||
" ToTensor()\n", | ||
"]) \n", | ||
" \n", | ||
"ds = NiftiDataset(images, segs, imtrans, segtrans)\n", | ||
"ds = GridPatchDataset(ds, (64, 64, 64))\n", | ||
"\n", | ||
"loader = DataLoader(ds, batch_size=10, num_workers=2, pin_memory=torch.cuda.is_available())\n", | ||
"im, seg = utils.mathutils.first(loader)\n", | ||
"print(im.shape, seg.shape)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 6, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"!rm -rf {tempdir}" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.7.3" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 4 | ||
} |
Oops, something went wrong.