diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 7673b4b4ef..10ac5aae52 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -20,6 +20,7 @@ jobs: "command": "black", "permission": "none", "issue_type": "pull-request", + "allow_edits": true, "repository": "project-monai/monai-code-formatter" } ] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4ba63fa310..917c87e55f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,6 +45,7 @@ Ideally, the new branch should be based on the latest `master` branch. ### Coding style Coding style is checked by flake8, using [a flake8 configuration](./setup.cfg) similar to [PyTorch's](https://github.com/pytorch/pytorch/blob/master/.flake8). +Python code file formatting could be done locally before submitting a pull request (e.g. using [`psf/Black`](https://github.com/psf/black)), or during the pull request review using MONAI's automatic code formatting workflow. License information: all source code files should start with this paragraph: ``` diff --git a/docs/source/conf.py b/docs/source/conf.py index 2e08e2033f..10bc3ceafa 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -14,50 +14,63 @@ import sys import subprocess -sys.path.insert(0, os.path.abspath('..')) -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) +sys.path.insert(0, os.path.abspath("..")) +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) print(sys.path) import monai # noqa: E402 # -- Project information ----------------------------------------------------- -project = 'MONAI' -copyright = '2020, MONAI Contributors' -author = 'MONAI Contributors' +project = "MONAI" +copyright = "2020, MONAI Contributors" +author = "MONAI Contributors" # The full version, including alpha/beta/rc tags -short_version = monai.__version__.split('+')[0] +short_version = monai.__version__.split("+")[0] release = short_version version = short_version # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['transforms', 'networks', 'metrics', 'engine', 'data', - 'application', 'config', 'handlers', 'losses', 'visualize', 'utils'] +exclude_patterns = [ + "transforms", + "networks", + "metrics", + "engine", + "data", + "application", + "config", + "handlers", + "losses", + "visualize", + "utils", +] def generate_apidocs(*args): """Generate API docs automatically by trawling the available modules""" - module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'monai')) - output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'apidocs')) - apidoc_command_path = 'sphinx-apidoc' - if hasattr(sys, 'real_prefix'): # called from a virtualenv - apidoc_command_path = os.path.join(sys.prefix, 'bin', 'sphinx-apidoc') + module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "monai")) + output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "apidocs")) + apidoc_command_path = "sphinx-apidoc" + if hasattr(sys, "real_prefix"): # called from a virtualenv + apidoc_command_path = os.path.join(sys.prefix, "bin", "sphinx-apidoc") apidoc_command_path = os.path.abspath(apidoc_command_path) - print('output_path {}'.format(output_path)) - print('module_path {}'.format(module_path)) + print("output_path {}".format(output_path)) + print("module_path {}".format(module_path)) subprocess.check_call( - [apidoc_command_path, '-f', '-e'] + - ['-o', output_path] + - [module_path] + - [os.path.join(module_path, p) for p in exclude_patterns]) + [apidoc_command_path, "-f", "-e"] + + ["-o", output_path] + + [module_path] + + [os.path.join(module_path, p) for p in exclude_patterns] + ) def setup(app): # Hook to allow for automatic generation of API docs # before doc deployment begins. - app.connect('builder-inited', generate_apidocs) + app.connect("builder-inited", generate_apidocs) + # -- General configuration --------------------------------------------------- @@ -65,56 +78,56 @@ def setup(app): # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. source_suffix = { - '.rst': 'restructuredtext', - '.txt': 'restructuredtext', - '.md': 'markdown', + ".rst": "restructuredtext", + ".txt": "restructuredtext", + ".md": "markdown", } extensions = [ - 'recommonmark', - 'sphinx.ext.intersphinx', - 'sphinx.ext.mathjax', - 'sphinx.ext.napoleon', - 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - 'sphinx.ext.autosectionlabel' + "recommonmark", + "sphinx.ext.intersphinx", + "sphinx.ext.mathjax", + "sphinx.ext.napoleon", + "sphinx.ext.autodoc", + "sphinx.ext.viewcode", + "sphinx.ext.autosectionlabel", ] -autoclass_content = 'both' +autoclass_content = "both" add_module_names = False autosectionlabel_prefix_document = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_theme_options = { - 'collapse_navigation': True, - 'display_version': True, - 'sticky_navigation': True, # Set to False to disable the sticky nav while scrolling. - 'logo_only': True, # if we have a html_logo below, this shows /only/ the logo with no title text - 'style_nav_header_background': '#FBFBFB', + "collapse_navigation": True, + "display_version": True, + "sticky_navigation": True, # Set to False to disable the sticky nav while scrolling. + "logo_only": True, # if we have a html_logo below, this shows /only/ the logo with no title text + "style_nav_header_background": "#FBFBFB", } html_context = { - 'display_github': True, - 'github_user': 'Project-MONAI', - 'github_repo': 'MONAI', - 'github_version': 'master', - 'conf_py_path': '/docs/', + "display_github": True, + "github_user": "Project-MONAI", + "github_repo": "MONAI", + "github_version": "master", + "conf_py_path": "/docs/", } html_scaled_image_link = False html_show_sourcelink = True -html_favicon = '../images/favicon.ico' -html_logo = '../images/MONAI-logo-color.png' +html_favicon = "../images/favicon.ico" +html_logo = "../images/MONAI-logo-color.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['../_static'] -html_css_files = ['custom.css'] +html_static_path = ["../_static"] +html_css_files = ["custom.css"] diff --git a/examples/classification_3d/densenet_evaluation_array.py b/examples/classification_3d/densenet_evaluation_array.py index 1716133a09..52cb04ff75 100644 --- a/examples/classification_3d/densenet_evaluation_array.py +++ b/examples/classification_3d/densenet_evaluation_array.py @@ -19,35 +19,29 @@ from monai.data import NiftiDataset, CSVSaver from monai.transforms import Compose, AddChannel, ScaleIntensity, Resize, ToTensor + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ - '/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz' + "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz", ] # 2 binary labels for gender classification: man and woman - labels = np.array([ - 0, 0, 1, 0, 1, 0, 1, 0, 1, 0 - ]) + labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) # Define transforms for image - val_transforms = Compose([ - ScaleIntensity(), - AddChannel(), - Resize((96, 96, 96)), - ToTensor() - ]) + val_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()]) # Define nifti dataset val_ds = NiftiDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False) @@ -55,19 +49,15 @@ def main(): val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) # Create DenseNet121 - device = torch.device('cuda:0') - model = monai.networks.nets.densenet.densenet121( - spatial_dims=3, - in_channels=1, - out_channels=2, - ).to(device) + device = torch.device("cuda:0") + model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,).to(device) - model.load_state_dict(torch.load('best_metric_model.pth')) + model.load_state_dict(torch.load("best_metric_model.pth")) model.eval() with torch.no_grad(): - num_correct = 0. + num_correct = 0.0 metric_count = 0 - saver = CSVSaver(output_dir='./output') + saver = CSVSaver(output_dir="./output") for val_data in val_loader: val_images, val_labels = val_data[0].to(device), val_data[1].to(device) val_outputs = model(val_images).argmax(dim=1) @@ -76,8 +66,9 @@ def main(): num_correct += value.sum().item() saver.save_batch(val_outputs, val_data[2]) metric = num_correct / metric_count - print('evaluation metric:', metric) + print("evaluation metric:", metric) saver.finalize() -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/classification_3d/densenet_evaluation_dict.py b/examples/classification_3d/densenet_evaluation_dict.py index 45bbd46b06..712856409b 100644 --- a/examples/classification_3d/densenet_evaluation_dict.py +++ b/examples/classification_3d/densenet_evaluation_dict.py @@ -19,66 +19,64 @@ from monai.transforms import Compose, LoadNiftid, AddChanneld, ScaleIntensityd, Resized, ToTensord from monai.data import CSVSaver + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ - '/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz' + "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz", ] # 2 binary labels for gender classification: man and woman - labels = np.array([ - 0, 0, 1, 0, 1, 0, 1, 0, 1, 0 - ]) - val_files = [{'img': img, 'label': label} for img, label in zip(images, labels)] + labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) + val_files = [{"img": img, "label": label} for img, label in zip(images, labels)] # Define transforms for image - val_transforms = Compose([ - LoadNiftid(keys=['img']), - AddChanneld(keys=['img']), - ScaleIntensityd(keys=['img']), - Resized(keys=['img'], spatial_size=(96, 96, 96)), - ToTensord(keys=['img']) - ]) + val_transforms = Compose( + [ + LoadNiftid(keys=["img"]), + AddChanneld(keys=["img"]), + ScaleIntensityd(keys=["img"]), + Resized(keys=["img"], spatial_size=(96, 96, 96)), + ToTensord(keys=["img"]), + ] + ) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) # Create DenseNet121 - device = torch.device('cuda:0') - model = monai.networks.nets.densenet.densenet121( - spatial_dims=3, - in_channels=1, - out_channels=2, - ).to(device) + device = torch.device("cuda:0") + model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,).to(device) - model.load_state_dict(torch.load('best_metric_model.pth')) + model.load_state_dict(torch.load("best_metric_model.pth")) model.eval() with torch.no_grad(): - num_correct = 0. + num_correct = 0.0 metric_count = 0 - saver = CSVSaver(output_dir='./output') + saver = CSVSaver(output_dir="./output") for val_data in val_loader: - val_images, val_labels = val_data['img'].to(device), val_data['label'].to(device) + val_images, val_labels = val_data["img"].to(device), val_data["label"].to(device) val_outputs = model(val_images).argmax(dim=1) value = torch.eq(val_outputs, val_labels) metric_count += len(value) num_correct += value.sum().item() - saver.save_batch(val_outputs, {'filename_or_obj': val_data['img.filename_or_obj']}) + saver.save_batch(val_outputs, {"filename_or_obj": val_data["img.filename_or_obj"]}) metric = num_correct / metric_count - print('evaluation metric:', metric) + print("evaluation metric:", metric) saver.finalize() -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/classification_3d/densenet_training_array.py b/examples/classification_3d/densenet_training_array.py index 0fbbda6caa..71743cfe85 100644 --- a/examples/classification_3d/densenet_training_array.py +++ b/examples/classification_3d/densenet_training_array.py @@ -20,52 +20,40 @@ from monai.data import NiftiDataset from monai.transforms import Compose, AddChannel, ScaleIntensity, Resize, RandRotate90, ToTensor + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ - '/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz' + "/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz", ] # 2 binary labels for gender classification: man and woman - labels = np.array([ - 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0 - ]) + labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) # Define transforms - train_transforms = Compose([ - ScaleIntensity(), - AddChannel(), - Resize((96, 96, 96)), - RandRotate90(), - ToTensor() - ]) - val_transforms = Compose([ - ScaleIntensity(), - AddChannel(), - Resize((96, 96, 96)), - ToTensor() - ]) + train_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), RandRotate90(), ToTensor()]) + val_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()]) # Define nifti dataset, data loader check_ds = NiftiDataset(image_files=images, labels=labels, transform=train_transforms) @@ -82,12 +70,8 @@ def main(): val_loader = DataLoader(val_ds, batch_size=2, num_workers=2, pin_memory=torch.cuda.is_available()) # Create DenseNet121, CrossEntropyLoss and Adam optimizer - device = torch.device('cuda:0') - model = monai.networks.nets.densenet.densenet121( - spatial_dims=3, - in_channels=1, - out_channels=2, - ).to(device) + device = torch.device("cuda:0") + model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,).to(device) loss_function = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), 1e-5) @@ -99,8 +83,8 @@ def main(): metric_values = list() writer = SummaryWriter() for epoch in range(5): - print('-' * 10) - print('epoch {}/{}'.format(epoch + 1, 5)) + print("-" * 10) + print("epoch {}/{}".format(epoch + 1, 5)) model.train() epoch_loss = 0 step = 0 @@ -114,16 +98,16 @@ def main(): optimizer.step() epoch_loss += loss.item() epoch_len = len(train_ds) // train_loader.batch_size - print('{}/{}, train_loss: {:.4f}'.format(step, epoch_len, loss.item())) - writer.add_scalar('train_loss', loss.item(), epoch_len * epoch + step) + print("{}/{}, train_loss: {:.4f}".format(step, epoch_len, loss.item())) + writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step) epoch_loss /= step epoch_loss_values.append(epoch_loss) - print('epoch {} average loss: {:.4f}'.format(epoch + 1, epoch_loss)) + print("epoch {} average loss: {:.4f}".format(epoch + 1, epoch_loss)) if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): - num_correct = 0. + num_correct = 0.0 metric_count = 0 for val_data in val_loader: val_images, val_labels = val_data[0].to(device), val_data[1].to(device) @@ -136,13 +120,17 @@ def main(): if metric > best_metric: best_metric = metric best_metric_epoch = epoch + 1 - torch.save(model.state_dict(), 'best_metric_model.pth') - print('saved new best metric model') - print('current epoch: {} current accuracy: {:.4f} best accuracy: {:.4f} at epoch {}'.format( - epoch + 1, metric, best_metric, best_metric_epoch)) - writer.add_scalar('val_accuracy', metric, epoch + 1) - print('train completed, best_metric: {:.4f} at epoch: {}'.format(best_metric, best_metric_epoch)) + torch.save(model.state_dict(), "best_metric_model.pth") + print("saved new best metric model") + print( + "current epoch: {} current accuracy: {:.4f} best accuracy: {:.4f} at epoch {}".format( + epoch + 1, metric, best_metric, best_metric_epoch + ) + ) + writer.add_scalar("val_accuracy", metric, epoch + 1) + print("train completed, best_metric: {:.4f} at epoch: {}".format(best_metric, best_metric_epoch)) writer.close() -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/classification_3d/densenet_training_dict.py b/examples/classification_3d/densenet_training_dict.py index ee8c944adb..f80dc3752f 100644 --- a/examples/classification_3d/densenet_training_dict.py +++ b/examples/classification_3d/densenet_training_dict.py @@ -20,62 +20,65 @@ from monai.transforms import Compose, LoadNiftid, AddChanneld, ScaleIntensityd, Resized, RandRotate90d, ToTensord from monai.metrics import compute_roc_auc + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ - '/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz' + "/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz", ] # 2 binary labels for gender classification: man and woman - labels = np.array([ - 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0 - ]) - train_files = [{'img': img, 'label': label} for img, label in zip(images[:10], labels[:10])] - val_files = [{'img': img, 'label': label} for img, label in zip(images[-10:], labels[-10:])] + labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) + train_files = [{"img": img, "label": label} for img, label in zip(images[:10], labels[:10])] + val_files = [{"img": img, "label": label} for img, label in zip(images[-10:], labels[-10:])] # Define transforms for image - train_transforms = Compose([ - LoadNiftid(keys=['img']), - AddChanneld(keys=['img']), - ScaleIntensityd(keys=['img']), - Resized(keys=['img'], spatial_size=(96, 96, 96)), - RandRotate90d(keys=['img'], prob=0.8, spatial_axes=[0, 2]), - ToTensord(keys=['img']) - ]) - val_transforms = Compose([ - LoadNiftid(keys=['img']), - AddChanneld(keys=['img']), - ScaleIntensityd(keys=['img']), - Resized(keys=['img'], spatial_size=(96, 96, 96)), - ToTensord(keys=['img']) - ]) + train_transforms = Compose( + [ + LoadNiftid(keys=["img"]), + AddChanneld(keys=["img"]), + ScaleIntensityd(keys=["img"]), + Resized(keys=["img"], spatial_size=(96, 96, 96)), + RandRotate90d(keys=["img"], prob=0.8, spatial_axes=[0, 2]), + ToTensord(keys=["img"]), + ] + ) + val_transforms = Compose( + [ + LoadNiftid(keys=["img"]), + AddChanneld(keys=["img"]), + ScaleIntensityd(keys=["img"]), + Resized(keys=["img"], spatial_size=(96, 96, 96)), + ToTensord(keys=["img"]), + ] + ) # Define dataset, data loader check_ds = monai.data.Dataset(data=train_files, transform=train_transforms) check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) check_data = monai.utils.misc.first(check_loader) - print(check_data['img'].shape, check_data['label']) + print(check_data["img"].shape, check_data["label"]) # create a training data loader train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) @@ -86,12 +89,8 @@ def main(): val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) # Create DenseNet121, CrossEntropyLoss and Adam optimizer - device = torch.device('cuda:0') - model = monai.networks.nets.densenet.densenet121( - spatial_dims=3, - in_channels=1, - out_channels=2, - ).to(device) + device = torch.device("cuda:0") + model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,).to(device) loss_function = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), 1e-5) @@ -101,14 +100,14 @@ def main(): best_metric_epoch = -1 writer = SummaryWriter() for epoch in range(5): - print('-' * 10) - print('epoch {}/{}'.format(epoch + 1, 5)) + print("-" * 10) + print("epoch {}/{}".format(epoch + 1, 5)) model.train() epoch_loss = 0 step = 0 for batch_data in train_loader: step += 1 - inputs, labels = batch_data['img'].to(device), batch_data['label'].to(device) + inputs, labels = batch_data["img"].to(device), batch_data["label"].to(device) optimizer.zero_grad() outputs = model(inputs) loss = loss_function(outputs, labels) @@ -116,10 +115,10 @@ def main(): optimizer.step() epoch_loss += loss.item() epoch_len = len(train_ds) // train_loader.batch_size - print('{}/{}, train_loss: {:.4f}'.format(step, epoch_len, loss.item())) - writer.add_scalar('train_loss', loss.item(), epoch_len * epoch + step) + print("{}/{}, train_loss: {:.4f}".format(step, epoch_len, loss.item())) + writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step) epoch_loss /= step - print('epoch {} average loss: {:.4f}'.format(epoch + 1, epoch_loss)) + print("epoch {} average loss: {:.4f}".format(epoch + 1, epoch_loss)) if (epoch + 1) % val_interval == 0: model.eval() @@ -127,7 +126,7 @@ def main(): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: - val_images, val_labels = val_data['img'].to(device), val_data['label'].to(device) + val_images, val_labels = val_data["img"].to(device), val_data["label"].to(device) y_pred = torch.cat([y_pred, model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) @@ -137,13 +136,17 @@ def main(): if acc_metric > best_metric: best_metric = acc_metric best_metric_epoch = epoch + 1 - torch.save(model.state_dict(), 'best_metric_model.pth') - print('saved new best metric model') - print('current epoch: {} current accuracy: {:.4f} current AUC: {:.4f} best accuracy: {:.4f} at epoch {}'.format( - epoch + 1, acc_metric, auc_metric, best_metric, best_metric_epoch)) - writer.add_scalar('val_accuracy', acc_metric, epoch + 1) - print('train completed, best_metric: {:.4f} at epoch: {}'.format(best_metric, best_metric_epoch)) + torch.save(model.state_dict(), "best_metric_model.pth") + print("saved new best metric model") + print( + "current epoch: {} current accuracy: {:.4f} current AUC: {:.4f} best accuracy: {:.4f} at epoch {}".format( + epoch + 1, acc_metric, auc_metric, best_metric, best_metric_epoch + ) + ) + writer.add_scalar("val_accuracy", acc_metric, epoch + 1) + print("train completed, best_metric: {:.4f} at epoch: {}".format(best_metric, best_metric_epoch)) writer.close() -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/classification_3d_ignite/densenet_evaluation_array.py b/examples/classification_3d_ignite/densenet_evaluation_array.py index 37b6425aa4..1319e4ed50 100644 --- a/examples/classification_3d_ignite/densenet_evaluation_array.py +++ b/examples/classification_3d_ignite/densenet_evaluation_array.py @@ -22,77 +22,69 @@ from monai.transforms import Compose, AddChannel, ScaleIntensity, Resize, ToTensor from monai.handlers import StatsHandler, ClassificationSaver, CheckpointLoader + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ - '/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz' + "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz", ] # 2 binary labels for gender classification: man and woman - labels = np.array([ - 0, 0, 1, 0, 1, 0, 1, 0, 1, 0 - ]) + labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) # define transforms for image - val_transforms = Compose([ - ScaleIntensity(), - AddChannel(), - Resize((96, 96, 96)), - ToTensor() - ]) + val_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()]) # define nifti dataset val_ds = NiftiDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False) # create DenseNet121 - net = monai.networks.nets.densenet.densenet121( - spatial_dims=3, - in_channels=1, - out_channels=2, - ) - device = torch.device('cuda:0') + net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,) + device = torch.device("cuda:0") - metric_name = 'Accuracy' + metric_name = "Accuracy" # add evaluation metric to the evaluator engine val_metrics = {metric_name: Accuracy()} - def prepare_batch(batch, device=None, non_blocking=False): return _prepare_batch((batch[0], batch[1]), device, non_blocking) - # ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration, # user can add output_transform to return other values evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch) # add stats event handler to print validation stats via evaluator val_stats_handler = StatsHandler( - name='evaluator', - output_transform=lambda x: None # no need to print loss value, so disable per iteration output + name="evaluator", + output_transform=lambda x: None, # no need to print loss value, so disable per iteration output ) val_stats_handler.attach(evaluator) # for the array data format, assume the 3rd item of batch data is the meta_data - prediction_saver = ClassificationSaver(output_dir='tempdir', batch_transform=lambda batch: batch[2], - output_transform=lambda output: output[0].argmax(1)) + prediction_saver = ClassificationSaver( + output_dir="tempdir", + batch_transform=lambda batch: batch[2], + output_transform=lambda output: output[0].argmax(1), + ) prediction_saver.attach(evaluator) # the model was trained by "densenet_training_array" example - CheckpointLoader(load_path='./runs/net_checkpoint_40.pth', load_dict={'net': net}).attach(evaluator) + CheckpointLoader(load_path="./runs/net_checkpoint_40.pth", load_dict={"net": net}).attach(evaluator) # create a validation data loader val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) state = evaluator.run(val_loader) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/classification_3d_ignite/densenet_evaluation_dict.py b/examples/classification_3d_ignite/densenet_evaluation_dict.py index f308efd94b..07f69a1faa 100644 --- a/examples/classification_3d_ignite/densenet_evaluation_dict.py +++ b/examples/classification_3d_ignite/densenet_evaluation_dict.py @@ -21,52 +21,47 @@ from monai.handlers import StatsHandler, CheckpointLoader, ClassificationSaver from monai.transforms import Compose, LoadNiftid, AddChanneld, ScaleIntensityd, Resized, ToTensord + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ - '/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz' + "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz", ] # 2 binary labels for gender classification: man and woman - labels = np.array([ - 0, 0, 1, 0, 1, 0, 1, 0, 1, 0 - ]) - val_files = [{'img': img, 'label': label} for img, label in zip(images, labels)] + labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) + val_files = [{"img": img, "label": label} for img, label in zip(images, labels)] # define transforms for image - val_transforms = Compose([ - LoadNiftid(keys=['img']), - AddChanneld(keys=['img']), - ScaleIntensityd(keys=['img']), - Resized(keys=['img'], spatial_size=(96, 96, 96)), - ToTensord(keys=['img']) - ]) - - # create DenseNet121 - net = monai.networks.nets.densenet.densenet121( - spatial_dims=3, - in_channels=1, - out_channels=2, + val_transforms = Compose( + [ + LoadNiftid(keys=["img"]), + AddChanneld(keys=["img"]), + ScaleIntensityd(keys=["img"]), + Resized(keys=["img"], spatial_size=(96, 96, 96)), + ToTensord(keys=["img"]), + ] ) - device = torch.device('cuda:0') + # create DenseNet121 + net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,) + device = torch.device("cuda:0") def prepare_batch(batch, device=None, non_blocking=False): - return _prepare_batch((batch['img'], batch['label']), device, non_blocking) + return _prepare_batch((batch["img"], batch["label"]), device, non_blocking) - - metric_name = 'Accuracy' + metric_name = "Accuracy" # add evaluation metric to the evaluator engine val_metrics = {metric_name: Accuracy()} # ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration, @@ -75,19 +70,22 @@ def prepare_batch(batch, device=None, non_blocking=False): # add stats event handler to print validation stats via evaluator val_stats_handler = StatsHandler( - name='evaluator', - output_transform=lambda x: None # no need to print loss value, so disable per iteration output + name="evaluator", + output_transform=lambda x: None, # no need to print loss value, so disable per iteration output ) val_stats_handler.attach(evaluator) # for the array data format, assume the 3rd item of batch data is the meta_data - prediction_saver = ClassificationSaver(output_dir='tempdir', name='evaluator', - batch_transform=lambda batch: {'filename_or_obj': batch['img.filename_or_obj']}, - output_transform=lambda output: output[0].argmax(1)) + prediction_saver = ClassificationSaver( + output_dir="tempdir", + name="evaluator", + batch_transform=lambda batch: {"filename_or_obj": batch["img.filename_or_obj"]}, + output_transform=lambda output: output[0].argmax(1), + ) prediction_saver.attach(evaluator) # the model was trained by "densenet_training_dict" example - CheckpointLoader(load_path='./runs/net_checkpoint_40.pth', load_dict={'net': net}).attach(evaluator) + CheckpointLoader(load_path="./runs/net_checkpoint_40.pth", load_dict={"net": net}).attach(evaluator) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) @@ -95,5 +93,6 @@ def prepare_batch(batch, device=None, non_blocking=False): state = evaluator.run(val_loader) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/classification_3d_ignite/densenet_training_array.py b/examples/classification_3d_ignite/densenet_training_array.py index 41e1c008aa..66611be7bb 100644 --- a/examples/classification_3d_ignite/densenet_training_array.py +++ b/examples/classification_3d_ignite/densenet_training_array.py @@ -23,52 +23,40 @@ from monai.transforms import Compose, AddChannel, ScaleIntensity, Resize, RandRotate90, ToTensor from monai.handlers import StatsHandler, TensorBoardStatsHandler, stopping_fn_from_metric + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ - '/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz' + "/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz", ] # 2 binary labels for gender classification: man and woman - labels = np.array([ - 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0 - ]) + labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) # define transforms - train_transforms = Compose([ - ScaleIntensity(), - AddChannel(), - Resize((96, 96, 96)), - RandRotate90(), - ToTensor() - ]) - val_transforms = Compose([ - ScaleIntensity(), - AddChannel(), - Resize((96, 96, 96)), - ToTensor() - ]) + train_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), RandRotate90(), ToTensor()]) + val_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()]) # define nifti dataset, data loader check_ds = NiftiDataset(image_files=images, labels=labels, transform=train_transforms) @@ -77,30 +65,26 @@ def main(): print(type(im), im.shape, label) # create DenseNet121, CrossEntropyLoss and Adam optimizer - net = monai.networks.nets.densenet.densenet121( - spatial_dims=3, - in_channels=1, - out_channels=2, - ) + net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,) loss = torch.nn.CrossEntropyLoss() lr = 1e-5 opt = torch.optim.Adam(net.parameters(), lr) - device = torch.device('cuda:0') + device = torch.device("cuda:0") # ignite trainer expects batch=(img, label) and returns output=loss at every iteration, # user can add output_transform to return other values, like: y_pred, y, etc. trainer = create_supervised_trainer(net, opt, loss, device, False) # adding checkpoint handler to save models (network params and optimizer stats) during training - checkpoint_handler = ModelCheckpoint('./runs/', 'net', n_saved=10, require_empty=False) - trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED, - handler=checkpoint_handler, - to_save={'net': net, 'opt': opt}) + checkpoint_handler = ModelCheckpoint("./runs/", "net", n_saved=10, require_empty=False) + trainer.add_event_handler( + event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"net": net, "opt": opt} + ) # StatsHandler prints loss at every iteration and print metrics at every epoch, # we don't set metrics for trainer here, so just print loss, user can also customize print functions # and can use output_transform to convert engine.state.output if it's not loss value - train_stats_handler = StatsHandler(name='trainer') + train_stats_handler = StatsHandler(name="trainer") train_stats_handler.attach(trainer) # TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler @@ -110,7 +94,7 @@ def main(): # set parameters for validation validation_every_n_epochs = 1 - metric_name = 'Accuracy' + metric_name = "Accuracy" # add evaluation metric to the evaluator engine val_metrics = {metric_name: Accuracy()} # ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration, @@ -119,33 +103,31 @@ def main(): # add stats event handler to print validation stats via evaluator val_stats_handler = StatsHandler( - name='evaluator', + name="evaluator", output_transform=lambda x: None, # no need to print loss value, so disable per iteration output - global_epoch_transform=lambda x: trainer.state.epoch) # fetch global epoch number from trainer + global_epoch_transform=lambda x: trainer.state.epoch, + ) # fetch global epoch number from trainer val_stats_handler.attach(evaluator) # add handler to record metrics to TensorBoard at every epoch val_tensorboard_stats_handler = TensorBoardStatsHandler( output_transform=lambda x: None, # no need to plot loss value, so disable per iteration output - global_epoch_transform=lambda x: trainer.state.epoch) # fetch global epoch number from trainer + global_epoch_transform=lambda x: trainer.state.epoch, + ) # fetch global epoch number from trainer val_tensorboard_stats_handler.attach(evaluator) # add early stopping handler to evaluator - early_stopper = EarlyStopping(patience=4, - score_function=stopping_fn_from_metric(metric_name), - trainer=trainer) + early_stopper = EarlyStopping(patience=4, score_function=stopping_fn_from_metric(metric_name), trainer=trainer) evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=early_stopper) # create a validation data loader val_ds = NiftiDataset(image_files=images[-10:], labels=labels[-10:], transform=val_transforms) val_loader = DataLoader(val_ds, batch_size=2, num_workers=2, pin_memory=torch.cuda.is_available()) - @trainer.on(Events.EPOCH_COMPLETED(every=validation_every_n_epochs)) def run_validation(engine): evaluator.run(val_loader) - # create a training data loader train_ds = NiftiDataset(image_files=images[:10], labels=labels[:10], transform=train_transforms) train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=2, pin_memory=torch.cuda.is_available()) @@ -153,5 +135,6 @@ def run_validation(engine): train_epochs = 30 state = trainer.run(train_loader, train_epochs) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/classification_3d_ignite/densenet_training_dict.py b/examples/classification_3d_ignite/densenet_training_dict.py index cd7f8b854e..ceb144fbad 100644 --- a/examples/classification_3d_ignite/densenet_training_dict.py +++ b/examples/classification_3d_ignite/densenet_training_dict.py @@ -22,94 +22,91 @@ from monai.transforms import Compose, LoadNiftid, AddChanneld, ScaleIntensityd, Resized, RandRotate90d, ToTensord from monai.handlers import StatsHandler, TensorBoardStatsHandler, stopping_fn_from_metric, ROCAUC + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ - '/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz', - '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz' + "/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", + "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz", ] # 2 binary labels for gender classification: man and woman - labels = np.array([ - 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0 - ]) - train_files = [{'img': img, 'label': label} for img, label in zip(images[:10], labels[:10])] - val_files = [{'img': img, 'label': label} for img, label in zip(images[-10:], labels[-10:])] + labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) + train_files = [{"img": img, "label": label} for img, label in zip(images[:10], labels[:10])] + val_files = [{"img": img, "label": label} for img, label in zip(images[-10:], labels[-10:])] # define transforms for image - train_transforms = Compose([ - LoadNiftid(keys=['img']), - AddChanneld(keys=['img']), - ScaleIntensityd(keys=['img']), - Resized(keys=['img'], spatial_size=(96, 96, 96)), - RandRotate90d(keys=['img'], prob=0.8, spatial_axes=[0, 2]), - ToTensord(keys=['img']) - ]) - val_transforms = Compose([ - LoadNiftid(keys=['img']), - AddChanneld(keys=['img']), - ScaleIntensityd(keys=['img']), - Resized(keys=['img'], spatial_size=(96, 96, 96)), - ToTensord(keys=['img']) - ]) + train_transforms = Compose( + [ + LoadNiftid(keys=["img"]), + AddChanneld(keys=["img"]), + ScaleIntensityd(keys=["img"]), + Resized(keys=["img"], spatial_size=(96, 96, 96)), + RandRotate90d(keys=["img"], prob=0.8, spatial_axes=[0, 2]), + ToTensord(keys=["img"]), + ] + ) + val_transforms = Compose( + [ + LoadNiftid(keys=["img"]), + AddChanneld(keys=["img"]), + ScaleIntensityd(keys=["img"]), + Resized(keys=["img"], spatial_size=(96, 96, 96)), + ToTensord(keys=["img"]), + ] + ) # define dataset, data loader check_ds = monai.data.Dataset(data=train_files, transform=train_transforms) check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) check_data = monai.utils.misc.first(check_loader) - print(check_data['img'].shape, check_data['label']) + print(check_data["img"].shape, check_data["label"]) # create DenseNet121, CrossEntropyLoss and Adam optimizer - net = monai.networks.nets.densenet.densenet121( - spatial_dims=3, - in_channels=1, - out_channels=2, - ) + net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,) loss = torch.nn.CrossEntropyLoss() lr = 1e-5 opt = torch.optim.Adam(net.parameters(), lr) - device = torch.device('cuda:0') - + device = torch.device("cuda:0") # ignite trainer expects batch=(img, label) and returns output=loss at every iteration, # user can add output_transform to return other values, like: y_pred, y, etc. def prepare_batch(batch, device=None, non_blocking=False): - return _prepare_batch((batch['img'], batch['label']), device, non_blocking) - + return _prepare_batch((batch["img"], batch["label"]), device, non_blocking) trainer = create_supervised_trainer(net, opt, loss, device, False, prepare_batch=prepare_batch) # adding checkpoint handler to save models (network params and optimizer stats) during training - checkpoint_handler = ModelCheckpoint('./runs/', 'net', n_saved=10, require_empty=False) - trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED, - handler=checkpoint_handler, - to_save={'net': net, 'opt': opt}) + checkpoint_handler = ModelCheckpoint("./runs/", "net", n_saved=10, require_empty=False) + trainer.add_event_handler( + event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"net": net, "opt": opt} + ) # StatsHandler prints loss at every iteration and print metrics at every epoch, # we don't set metrics for trainer here, so just print loss, user can also customize print functions # and can use output_transform to convert engine.state.output if it's not loss value - train_stats_handler = StatsHandler(name='trainer') + train_stats_handler = StatsHandler(name="trainer") train_stats_handler.attach(trainer) # TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler @@ -119,42 +116,40 @@ def prepare_batch(batch, device=None, non_blocking=False): # set parameters for validation validation_every_n_epochs = 1 - metric_name = 'Accuracy' + metric_name = "Accuracy" # add evaluation metric to the evaluator engine - val_metrics = {metric_name: Accuracy(), 'AUC': ROCAUC(to_onehot_y=True, add_softmax=True)} + val_metrics = {metric_name: Accuracy(), "AUC": ROCAUC(to_onehot_y=True, add_softmax=True)} # ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration, # user can add output_transform to return other values evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch) # add stats event handler to print validation stats via evaluator val_stats_handler = StatsHandler( - name='evaluator', + name="evaluator", output_transform=lambda x: None, # no need to print loss value, so disable per iteration output - global_epoch_transform=lambda x: trainer.state.epoch) # fetch global epoch number from trainer + global_epoch_transform=lambda x: trainer.state.epoch, + ) # fetch global epoch number from trainer val_stats_handler.attach(evaluator) # add handler to record metrics to TensorBoard at every epoch val_tensorboard_stats_handler = TensorBoardStatsHandler( output_transform=lambda x: None, # no need to plot loss value, so disable per iteration output - global_epoch_transform=lambda x: trainer.state.epoch) # fetch global epoch number from trainer + global_epoch_transform=lambda x: trainer.state.epoch, + ) # fetch global epoch number from trainer val_tensorboard_stats_handler.attach(evaluator) # add early stopping handler to evaluator - early_stopper = EarlyStopping(patience=4, - score_function=stopping_fn_from_metric(metric_name), - trainer=trainer) + early_stopper = EarlyStopping(patience=4, score_function=stopping_fn_from_metric(metric_name), trainer=trainer) evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=early_stopper) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) - @trainer.on(Events.EPOCH_COMPLETED(every=validation_every_n_epochs)) def run_validation(engine): evaluator.run(val_loader) - # create a training data loader train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4, pin_memory=torch.cuda.is_available()) @@ -162,5 +157,6 @@ def run_validation(engine): train_epochs = 30 state = trainer.run(train_loader, train_epochs) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/segmentation_3d/unet_evaluation_array.py b/examples/segmentation_3d/unet_evaluation_array.py index 6e974d4c8f..b18d059c21 100644 --- a/examples/segmentation_3d/unet_evaluation_array.py +++ b/examples/segmentation_3d/unet_evaluation_array.py @@ -26,23 +26,24 @@ from monai.data import create_test_image_3d, sliding_window_inference, NiftiSaver, NiftiDataset from monai.metrics import compute_meandice + def main(): config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) tempdir = tempfile.mkdtemp() - print('generating synthetic data to {} (this may take a while)'.format(tempdir)) + print("generating synthetic data to {} (this may take a while)".format(tempdir)) for i in range(5): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1) n = nib.Nifti1Image(im, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "im%i.nii.gz" % i)) n = nib.Nifti1Image(seg, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "seg%i.nii.gz" % i)) - images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz'))) - segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) + images = sorted(glob(os.path.join(tempdir, "im*.nii.gz"))) + segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) # define transforms for image and segmentation imtrans = Compose([ScaleIntensity(), AddChannel(), ToTensor()]) @@ -51,7 +52,7 @@ def main(): # sliding window inference for one image at every iteration val_loader = DataLoader(val_ds, batch_size=1, num_workers=1, pin_memory=torch.cuda.is_available()) - device = torch.device('cuda:0') + device = torch.device("cuda:0") model = UNet( dimensions=3, in_channels=1, @@ -61,27 +62,29 @@ def main(): num_res_units=2, ).to(device) - model.load_state_dict(torch.load('best_metric_model.pth')) + model.load_state_dict(torch.load("best_metric_model.pth")) model.eval() with torch.no_grad(): - metric_sum = 0. + metric_sum = 0.0 metric_count = 0 - saver = NiftiSaver(output_dir='./output') + saver = NiftiSaver(output_dir="./output") for val_data in val_loader: val_images, val_labels = val_data[0].to(device), val_data[1].to(device) # define sliding window size and batch size for windows inference roi_size = (96, 96, 96) sw_batch_size = 4 val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model) - value = compute_meandice(y_pred=val_outputs, y=val_labels, include_background=True, - to_onehot_y=False, add_sigmoid=True) + value = compute_meandice( + y_pred=val_outputs, y=val_labels, include_background=True, to_onehot_y=False, add_sigmoid=True + ) metric_count += len(value) metric_sum += value.sum().item() val_outputs = (val_outputs.sigmoid() >= 0.5).float() saver.save_batch(val_outputs, val_data[2]) metric = metric_sum / metric_count - print('evaluation metric:', metric) + print("evaluation metric:", metric) shutil.rmtree(tempdir) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/segmentation_3d/unet_evaluation_dict.py b/examples/segmentation_3d/unet_evaluation_dict.py index 573504b594..9402c8505e 100644 --- a/examples/segmentation_3d/unet_evaluation_dict.py +++ b/examples/segmentation_3d/unet_evaluation_dict.py @@ -26,38 +26,42 @@ from monai.networks.nets import UNet from monai.transforms import Compose, LoadNiftid, AsChannelFirstd, ScaleIntensityd, ToTensord + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) tempdir = tempfile.mkdtemp() - print('generating synthetic data to {} (this may take a while)'.format(tempdir)) + print("generating synthetic data to {} (this may take a while)".format(tempdir)) for i in range(5): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "im%i.nii.gz" % i)) n = nib.Nifti1Image(seg, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "seg%i.nii.gz" % i)) - images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz'))) - segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) - val_files = [{'img': img, 'seg': seg} for img, seg in zip(images, segs)] + images = sorted(glob(os.path.join(tempdir, "im*.nii.gz"))) + segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) + val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)] # define transforms for image and segmentation - val_transforms = Compose([ - LoadNiftid(keys=['img', 'seg']), - AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), - ScaleIntensityd(keys=['img', 'seg']), - ToTensord(keys=['img', 'seg']) - ]) + val_transforms = Compose( + [ + LoadNiftid(keys=["img", "seg"]), + AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), + ScaleIntensityd(keys=["img", "seg"]), + ToTensord(keys=["img", "seg"]), + ] + ) val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) # sliding window inference need to input 1 image in every iteration - val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate, - pin_memory=torch.cuda.is_available()) + val_loader = DataLoader( + val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available() + ) - device = torch.device('cuda:0') + device = torch.device("cuda:0") model = UNet( dimensions=3, in_channels=1, @@ -67,28 +71,31 @@ def main(): num_res_units=2, ).to(device) - model.load_state_dict(torch.load('best_metric_model.pth')) + model.load_state_dict(torch.load("best_metric_model.pth")) model.eval() with torch.no_grad(): - metric_sum = 0. + metric_sum = 0.0 metric_count = 0 - saver = NiftiSaver(output_dir='./output') + saver = NiftiSaver(output_dir="./output") for val_data in val_loader: - val_images, val_labels = val_data['img'].to(device), val_data['seg'].to(device) + val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device) # define sliding window size and batch size for windows inference roi_size = (96, 96, 96) sw_batch_size = 4 val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model) - value = compute_meandice(y_pred=val_outputs, y=val_labels, include_background=True, - to_onehot_y=False, add_sigmoid=True) + value = compute_meandice( + y_pred=val_outputs, y=val_labels, include_background=True, to_onehot_y=False, add_sigmoid=True + ) metric_count += len(value) metric_sum += value.sum().item() val_outputs = (val_outputs.sigmoid() >= 0.5).float() - saver.save_batch(val_outputs, {'filename_or_obj': val_data['img.filename_or_obj'], - 'affine': val_data['img.affine']}) + saver.save_batch( + val_outputs, {"filename_or_obj": val_data["img.filename_or_obj"], "affine": val_data["img.affine"]} + ) metric = metric_sum / metric_count - print('evaluation metric:', metric) + print("evaluation metric:", metric) shutil.rmtree(tempdir) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/segmentation_3d/unet_training_array.py b/examples/segmentation_3d/unet_training_array.py index 41bb719cdc..ef788a8284 100644 --- a/examples/segmentation_3d/unet_training_array.py +++ b/examples/segmentation_3d/unet_training_array.py @@ -27,48 +27,46 @@ from monai.metrics import compute_meandice from monai.visualize.img2tensorboard import plot_2d_or_3d_image + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # create a temporary directory and 40 random image, mask paris tempdir = tempfile.mkdtemp() - print('generating synthetic data to {} (this may take a while)'.format(tempdir)) + print("generating synthetic data to {} (this may take a while)".format(tempdir)) for i in range(40): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1) n = nib.Nifti1Image(im, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "im%i.nii.gz" % i)) n = nib.Nifti1Image(seg, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "seg%i.nii.gz" % i)) - images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz'))) - segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) + images = sorted(glob(os.path.join(tempdir, "im*.nii.gz"))) + segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) # define transforms for image and segmentation - train_imtrans = Compose([ - ScaleIntensity(), - AddChannel(), - RandSpatialCrop((96, 96, 96), random_size=False), - RandRotate90(prob=0.5, spatial_axes=(0, 2)), - ToTensor() - ]) - train_segtrans = Compose([ - AddChannel(), - RandSpatialCrop((96, 96, 96), random_size=False), - RandRotate90(prob=0.5, spatial_axes=(0, 2)), - ToTensor() - ]) - val_imtrans = Compose([ - ScaleIntensity(), - AddChannel(), - ToTensor() - ]) - val_segtrans = Compose([ - AddChannel(), - ToTensor() - ]) + train_imtrans = Compose( + [ + ScaleIntensity(), + AddChannel(), + RandSpatialCrop((96, 96, 96), random_size=False), + RandRotate90(prob=0.5, spatial_axes=(0, 2)), + ToTensor(), + ] + ) + train_segtrans = Compose( + [ + AddChannel(), + RandSpatialCrop((96, 96, 96), random_size=False), + RandRotate90(prob=0.5, spatial_axes=(0, 2)), + ToTensor(), + ] + ) + val_imtrans = Compose([ScaleIntensity(), AddChannel(), ToTensor()]) + val_segtrans = Compose([AddChannel(), ToTensor()]) # define nifti dataset, data loader check_ds = NiftiDataset(images, segs, transform=train_imtrans, seg_transform=train_segtrans) @@ -84,7 +82,7 @@ def main(): val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, pin_memory=torch.cuda.is_available()) # create UNet, DiceLoss and Adam optimizer - device = torch.device('cuda:0') + device = torch.device("cuda:0") model = monai.networks.nets.UNet( dimensions=3, in_channels=1, @@ -104,8 +102,8 @@ def main(): metric_values = list() writer = SummaryWriter() for epoch in range(5): - print('-' * 10) - print('epoch {}/{}'.format(epoch + 1, 5)) + print("-" * 10) + print("epoch {}/{}".format(epoch + 1, 5)) model.train() epoch_loss = 0 step = 0 @@ -119,16 +117,16 @@ def main(): optimizer.step() epoch_loss += loss.item() epoch_len = len(train_ds) // train_loader.batch_size - print('{}/{}, train_loss: {:.4f}'.format(step, epoch_len, loss.item())) - writer.add_scalar('train_loss', loss.item(), epoch_len * epoch + step) + print("{}/{}, train_loss: {:.4f}".format(step, epoch_len, loss.item())) + writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step) epoch_loss /= step epoch_loss_values.append(epoch_loss) - print('epoch {} average loss: {:.4f}'.format(epoch + 1, epoch_loss)) + print("epoch {} average loss: {:.4f}".format(epoch + 1, epoch_loss)) if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): - metric_sum = 0. + metric_sum = 0.0 metric_count = 0 val_images = None val_labels = None @@ -138,8 +136,9 @@ def main(): roi_size = (96, 96, 96) sw_batch_size = 4 val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model) - value = compute_meandice(y_pred=val_outputs, y=val_labels, include_background=True, - to_onehot_y=False, add_sigmoid=True) + value = compute_meandice( + y_pred=val_outputs, y=val_labels, include_background=True, to_onehot_y=False, add_sigmoid=True + ) metric_count += len(value) metric_sum += value.sum().item() metric = metric_sum / metric_count @@ -147,18 +146,22 @@ def main(): if metric > best_metric: best_metric = metric best_metric_epoch = epoch + 1 - torch.save(model.state_dict(), 'best_metric_model.pth') - print('saved new best metric model') - print('current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}'.format( - epoch + 1, metric, best_metric, best_metric_epoch)) - writer.add_scalar('val_mean_dice', metric, epoch + 1) + torch.save(model.state_dict(), "best_metric_model.pth") + print("saved new best metric model") + print( + "current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}".format( + epoch + 1, metric, best_metric, best_metric_epoch + ) + ) + writer.add_scalar("val_mean_dice", metric, epoch + 1) # plot the last model output as GIF image in TensorBoard with the corresponding image and label - plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag='image') - plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag='label') - plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag='output') + plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag="image") + plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag="label") + plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag="output") shutil.rmtree(tempdir) - print('train completed, best_metric: {:.4f} at epoch: {}'.format(best_metric, best_metric_epoch)) + print("train completed, best_metric: {:.4f} at epoch: {}".format(best_metric, best_metric_epoch)) writer.close() -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/segmentation_3d/unet_training_dict.py b/examples/segmentation_3d/unet_training_dict.py index 9e6958afda..8e442bd33f 100644 --- a/examples/segmentation_3d/unet_training_dict.py +++ b/examples/segmentation_3d/unet_training_dict.py @@ -22,69 +22,91 @@ from torch.utils.tensorboard import SummaryWriter import monai -from monai.transforms import \ - Compose, LoadNiftid, AsChannelFirstd, ScaleIntensityd, RandCropByPosNegLabeld, RandRotate90d, ToTensord +from monai.transforms import ( + Compose, + LoadNiftid, + AsChannelFirstd, + ScaleIntensityd, + RandCropByPosNegLabeld, + RandRotate90d, + ToTensord, +) from monai.data import create_test_image_3d, list_data_collate, sliding_window_inference from monai.metrics import compute_meandice from monai.visualize import plot_2d_or_3d_image + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # create a temporary directory and 40 random image, mask paris tempdir = tempfile.mkdtemp() - print('generating synthetic data to {} (this may take a while)'.format(tempdir)) + print("generating synthetic data to {} (this may take a while)".format(tempdir)) for i in range(40): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'img%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "img%i.nii.gz" % i)) n = nib.Nifti1Image(seg, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "seg%i.nii.gz" % i)) - images = sorted(glob(os.path.join(tempdir, 'img*.nii.gz'))) - segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) - train_files = [{'img': img, 'seg': seg} for img, seg in zip(images[:20], segs[:20])] - val_files = [{'img': img, 'seg': seg} for img, seg in zip(images[-20:], segs[-20:])] + images = sorted(glob(os.path.join(tempdir, "img*.nii.gz"))) + segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) + train_files = [{"img": img, "seg": seg} for img, seg in zip(images[:20], segs[:20])] + val_files = [{"img": img, "seg": seg} for img, seg in zip(images[-20:], segs[-20:])] # define transforms for image and segmentation - train_transforms = Compose([ - LoadNiftid(keys=['img', 'seg']), - AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), - ScaleIntensityd(keys=['img', 'seg']), - RandCropByPosNegLabeld(keys=['img', 'seg'], label_key='seg', size=[96, 96, 96], pos=1, neg=1, num_samples=4), - RandRotate90d(keys=['img', 'seg'], prob=0.5, spatial_axes=[0, 2]), - ToTensord(keys=['img', 'seg']) - ]) - val_transforms = Compose([ - LoadNiftid(keys=['img', 'seg']), - AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), - ScaleIntensityd(keys=['img', 'seg']), - ToTensord(keys=['img', 'seg']) - ]) + train_transforms = Compose( + [ + LoadNiftid(keys=["img", "seg"]), + AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), + ScaleIntensityd(keys=["img", "seg"]), + RandCropByPosNegLabeld( + keys=["img", "seg"], label_key="seg", size=[96, 96, 96], pos=1, neg=1, num_samples=4 + ), + RandRotate90d(keys=["img", "seg"], prob=0.5, spatial_axes=[0, 2]), + ToTensord(keys=["img", "seg"]), + ] + ) + val_transforms = Compose( + [ + LoadNiftid(keys=["img", "seg"]), + AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), + ScaleIntensityd(keys=["img", "seg"]), + ToTensord(keys=["img", "seg"]), + ] + ) # define dataset, data loader check_ds = monai.data.Dataset(data=train_files, transform=train_transforms) # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training - check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate, - pin_memory=torch.cuda.is_available()) + check_loader = DataLoader( + check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available() + ) check_data = monai.utils.misc.first(check_loader) - print(check_data['img'].shape, check_data['seg'].shape) + print(check_data["img"].shape, check_data["seg"].shape) # create a training data loader train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training - train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4, - collate_fn=list_data_collate, pin_memory=torch.cuda.is_available()) + train_loader = DataLoader( + train_ds, + batch_size=2, + shuffle=True, + num_workers=4, + collate_fn=list_data_collate, + pin_memory=torch.cuda.is_available(), + ) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) - val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate, - pin_memory=torch.cuda.is_available()) + val_loader = DataLoader( + val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available() + ) # create UNet, DiceLoss and Adam optimizer - device = torch.device('cuda:0') + device = torch.device("cuda:0") model = monai.networks.nets.UNet( dimensions=3, in_channels=1, @@ -104,14 +126,14 @@ def main(): metric_values = list() writer = SummaryWriter() for epoch in range(5): - print('-' * 10) - print('epoch {}/{}'.format(epoch + 1, 5)) + print("-" * 10) + print("epoch {}/{}".format(epoch + 1, 5)) model.train() epoch_loss = 0 step = 0 for batch_data in train_loader: step += 1 - inputs, labels = batch_data['img'].to(device), batch_data['seg'].to(device) + inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device) optimizer.zero_grad() outputs = model(inputs) loss = loss_function(outputs, labels) @@ -119,27 +141,28 @@ def main(): optimizer.step() epoch_loss += loss.item() epoch_len = len(train_ds) // train_loader.batch_size - print('{}/{}, train_loss: {:.4f}'.format(step, epoch_len, loss.item())) - writer.add_scalar('train_loss', loss.item(), epoch_len * epoch + step) + print("{}/{}, train_loss: {:.4f}".format(step, epoch_len, loss.item())) + writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step) epoch_loss /= step epoch_loss_values.append(epoch_loss) - print('epoch {} average loss: {:.4f}'.format(epoch + 1, epoch_loss)) + print("epoch {} average loss: {:.4f}".format(epoch + 1, epoch_loss)) if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): - metric_sum = 0. + metric_sum = 0.0 metric_count = 0 val_images = None val_labels = None val_outputs = None for val_data in val_loader: - val_images, val_labels = val_data['img'].to(device), val_data['seg'].to(device) + val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device) roi_size = (96, 96, 96) sw_batch_size = 4 val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model) - value = compute_meandice(y_pred=val_outputs, y=val_labels, include_background=True, - to_onehot_y=False, add_sigmoid=True) + value = compute_meandice( + y_pred=val_outputs, y=val_labels, include_background=True, to_onehot_y=False, add_sigmoid=True + ) metric_count += len(value) metric_sum += value.sum().item() metric = metric_sum / metric_count @@ -147,18 +170,22 @@ def main(): if metric > best_metric: best_metric = metric best_metric_epoch = epoch + 1 - torch.save(model.state_dict(), 'best_metric_model.pth') - print('saved new best metric model') - print('current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}'.format( - epoch + 1, metric, best_metric, best_metric_epoch)) - writer.add_scalar('val_mean_dice', metric, epoch + 1) + torch.save(model.state_dict(), "best_metric_model.pth") + print("saved new best metric model") + print( + "current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}".format( + epoch + 1, metric, best_metric, best_metric_epoch + ) + ) + writer.add_scalar("val_mean_dice", metric, epoch + 1) # plot the last model output as GIF image in TensorBoard with the corresponding image and label - plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag='image') - plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag='label') - plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag='output') + plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag="image") + plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag="label") + plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag="output") shutil.rmtree(tempdir) - print('train completed, best_metric: {:.4f} at epoch: {}'.format(best_metric, best_metric_epoch)) + print("train completed, best_metric: {:.4f} at epoch: {}".format(best_metric, best_metric_epoch)) writer.close() -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/segmentation_3d_ignite/unet_evaluation_array.py b/examples/segmentation_3d_ignite/unet_evaluation_array.py index 26be6fa9e6..48ea302bea 100644 --- a/examples/segmentation_3d_ignite/unet_evaluation_array.py +++ b/examples/segmentation_3d_ignite/unet_evaluation_array.py @@ -28,30 +28,31 @@ from monai.networks.nets import UNet from monai.networks.utils import predict_segmentation + def main(): config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) tempdir = tempfile.mkdtemp() - print('generating synthetic data to {} (this may take a while)'.format(tempdir)) + print("generating synthetic data to {} (this may take a while)".format(tempdir)) for i in range(5): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1) n = nib.Nifti1Image(im, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "im%i.nii.gz" % i)) n = nib.Nifti1Image(seg, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "seg%i.nii.gz" % i)) - images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz'))) - segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) + images = sorted(glob(os.path.join(tempdir, "im*.nii.gz"))) + segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) # define transforms for image and segmentation imtrans = Compose([ScaleIntensity(), AddChannel(), ToTensor()]) segtrans = Compose([AddChannel(), ToTensor()]) ds = NiftiDataset(images, segs, transform=imtrans, seg_transform=segtrans, image_only=False) - device = torch.device('cuda:0') + device = torch.device("cuda:0") net = UNet( dimensions=3, in_channels=1, @@ -66,7 +67,6 @@ def main(): roi_size = (96, 96, 96) sw_batch_size = 4 - def _sliding_window_processor(engine, batch): net.eval() with torch.no_grad(): @@ -74,28 +74,32 @@ def _sliding_window_processor(engine, batch): seg_probs = sliding_window_inference(val_images, roi_size, sw_batch_size, net) return seg_probs, val_labels - evaluator = Engine(_sliding_window_processor) # add evaluation metric to the evaluator engine - MeanDice(add_sigmoid=True, to_onehot_y=False).attach(evaluator, 'Mean_Dice') + MeanDice(add_sigmoid=True, to_onehot_y=False).attach(evaluator, "Mean_Dice") # StatsHandler prints loss at every iteration and print metrics at every epoch, # we don't need to print loss for evaluator, so just print metrics, user can also customize print functions val_stats_handler = StatsHandler( - name='evaluator', - output_transform=lambda x: None # no need to print loss value, so disable per iteration output + name="evaluator", + output_transform=lambda x: None, # no need to print loss value, so disable per iteration output ) val_stats_handler.attach(evaluator) # for the array data format, assume the 3rd item of batch data is the meta_data file_saver = SegmentationSaver( - output_dir='tempdir', output_ext='.nii.gz', output_postfix='seg', name='evaluator', - batch_transform=lambda x: x[2], output_transform=lambda output: predict_segmentation(output[0])) + output_dir="tempdir", + output_ext=".nii.gz", + output_postfix="seg", + name="evaluator", + batch_transform=lambda x: x[2], + output_transform=lambda output: predict_segmentation(output[0]), + ) file_saver.attach(evaluator) # the model was trained by "unet_training_array" example - ckpt_saver = CheckpointLoader(load_path='./runs/net_checkpoint_50.pth', load_dict={'net': net}) + ckpt_saver = CheckpointLoader(load_path="./runs/net_checkpoint_50.pth", load_dict={"net": net}) ckpt_saver.attach(evaluator) # sliding window inference for one image at every iteration @@ -103,5 +107,6 @@ def _sliding_window_processor(engine, batch): state = evaluator.run(loader) shutil.rmtree(tempdir) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/segmentation_3d_ignite/unet_evaluation_dict.py b/examples/segmentation_3d_ignite/unet_evaluation_dict.py index 5d8a35a170..4cfe1c5453 100644 --- a/examples/segmentation_3d_ignite/unet_evaluation_dict.py +++ b/examples/segmentation_3d_ignite/unet_evaluation_dict.py @@ -28,35 +28,38 @@ from monai.transforms import Compose, LoadNiftid, AsChannelFirstd, ScaleIntensityd, ToTensord from monai.handlers import SegmentationSaver, CheckpointLoader, StatsHandler, MeanDice + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) tempdir = tempfile.mkdtemp() - print('generating synthetic data to {} (this may take a while)'.format(tempdir)) + print("generating synthetic data to {} (this may take a while)".format(tempdir)) for i in range(5): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "im%i.nii.gz" % i)) n = nib.Nifti1Image(seg, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "seg%i.nii.gz" % i)) - images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz'))) - segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) - val_files = [{'img': img, 'seg': seg} for img, seg in zip(images, segs)] + images = sorted(glob(os.path.join(tempdir, "im*.nii.gz"))) + segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) + val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)] # define transforms for image and segmentation - val_transforms = Compose([ - LoadNiftid(keys=['img', 'seg']), - AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), - ScaleIntensityd(keys=['img', 'seg']), - ToTensord(keys=['img', 'seg']) - ]) + val_transforms = Compose( + [ + LoadNiftid(keys=["img", "seg"]), + AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), + ScaleIntensityd(keys=["img", "seg"]), + ToTensord(keys=["img", "seg"]), + ] + ) val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) - device = torch.device('cuda:0') + device = torch.device("cuda:0") net = UNet( dimensions=3, in_channels=1, @@ -71,41 +74,45 @@ def main(): roi_size = (96, 96, 96) sw_batch_size = 4 - def _sliding_window_processor(engine, batch): net.eval() with torch.no_grad(): - val_images, val_labels = batch['img'].to(device), batch['seg'].to(device) + val_images, val_labels = batch["img"].to(device), batch["seg"].to(device) seg_probs = sliding_window_inference(val_images, roi_size, sw_batch_size, net) return seg_probs, val_labels - evaluator = Engine(_sliding_window_processor) # add evaluation metric to the evaluator engine - MeanDice(add_sigmoid=True, to_onehot_y=False).attach(evaluator, 'Mean_Dice') + MeanDice(add_sigmoid=True, to_onehot_y=False).attach(evaluator, "Mean_Dice") # StatsHandler prints loss at every iteration and print metrics at every epoch, # we don't need to print loss for evaluator, so just print metrics, user can also customize print functions val_stats_handler = StatsHandler( - name='evaluator', - output_transform=lambda x: None # no need to print loss value, so disable per iteration output + name="evaluator", + output_transform=lambda x: None, # no need to print loss value, so disable per iteration output ) val_stats_handler.attach(evaluator) # convert the necessary metadata from batch data - SegmentationSaver(output_dir='tempdir', output_ext='.nii.gz', output_postfix='seg', name='evaluator', - batch_transform=lambda batch: {'filename_or_obj': batch['img.filename_or_obj'], - 'affine': batch['img.affine']}, - output_transform=lambda output: predict_segmentation(output[0])).attach(evaluator) + SegmentationSaver( + output_dir="tempdir", + output_ext=".nii.gz", + output_postfix="seg", + name="evaluator", + batch_transform=lambda batch: {"filename_or_obj": batch["img.filename_or_obj"], "affine": batch["img.affine"]}, + output_transform=lambda output: predict_segmentation(output[0]), + ).attach(evaluator) # the model was trained by "unet_training_dict" example - CheckpointLoader(load_path='./runs/net_checkpoint_50.pth', load_dict={'net': net}).attach(evaluator) + CheckpointLoader(load_path="./runs/net_checkpoint_50.pth", load_dict={"net": net}).attach(evaluator) # sliding window inference for one image at every iteration - val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate, - pin_memory=torch.cuda.is_available()) + val_loader = DataLoader( + val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available() + ) state = evaluator.run(val_loader) shutil.rmtree(tempdir) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/segmentation_3d_ignite/unet_training_array.py b/examples/segmentation_3d_ignite/unet_training_array.py index 9bafd34b75..77a37d2256 100644 --- a/examples/segmentation_3d_ignite/unet_training_array.py +++ b/examples/segmentation_3d_ignite/unet_training_array.py @@ -25,52 +25,42 @@ import monai from monai.data import NiftiDataset, create_test_image_3d from monai.transforms import Compose, AddChannel, ScaleIntensity, RandSpatialCrop, Resize, ToTensor -from monai.handlers import \ - StatsHandler, TensorBoardStatsHandler, TensorBoardImageHandler, MeanDice, stopping_fn_from_metric +from monai.handlers import ( + StatsHandler, + TensorBoardStatsHandler, + TensorBoardImageHandler, + MeanDice, + stopping_fn_from_metric, +) from monai.networks.utils import predict_segmentation + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # create a temporary directory and 40 random image, mask paris tempdir = tempfile.mkdtemp() - print('generating synthetic data to {} (this may take a while)'.format(tempdir)) + print("generating synthetic data to {} (this may take a while)".format(tempdir)) for i in range(40): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1) n = nib.Nifti1Image(im, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "im%i.nii.gz" % i)) n = nib.Nifti1Image(seg, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "seg%i.nii.gz" % i)) - images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz'))) - segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) + images = sorted(glob(os.path.join(tempdir, "im*.nii.gz"))) + segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) # define transforms for image and segmentation - train_imtrans = Compose([ - ScaleIntensity(), - AddChannel(), - RandSpatialCrop((96, 96, 96), random_size=False), - ToTensor() - ]) - train_segtrans = Compose([ - AddChannel(), - RandSpatialCrop((96, 96, 96), random_size=False), - ToTensor() - ]) - val_imtrans = Compose([ - ScaleIntensity(), - AddChannel(), - Resize((96, 96, 96)), - ToTensor() - ]) - val_segtrans = Compose([ - AddChannel(), - Resize((96, 96, 96)), - ToTensor() - ]) + train_imtrans = Compose( + [ScaleIntensity(), AddChannel(), RandSpatialCrop((96, 96, 96), random_size=False), ToTensor()] + ) + train_segtrans = Compose([AddChannel(), RandSpatialCrop((96, 96, 96), random_size=False), ToTensor()]) + val_imtrans = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()]) + val_segtrans = Compose([AddChannel(), Resize((96, 96, 96)), ToTensor()]) # define nifti dataset, data loader check_ds = NiftiDataset(images, segs, transform=train_imtrans, seg_transform=train_segtrans) @@ -97,22 +87,22 @@ def main(): loss = monai.losses.DiceLoss(do_sigmoid=True) lr = 1e-3 opt = torch.optim.Adam(net.parameters(), lr) - device = torch.device('cuda:0') + device = torch.device("cuda:0") # ignite trainer expects batch=(img, seg) and returns output=loss at every iteration, # user can add output_transform to return other values, like: y_pred, y, etc. trainer = create_supervised_trainer(net, opt, loss, device, False) # adding checkpoint handler to save models (network params and optimizer stats) during training - checkpoint_handler = ModelCheckpoint('./runs/', 'net', n_saved=10, require_empty=False) - trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED, - handler=checkpoint_handler, - to_save={'net': net, 'opt': opt}) + checkpoint_handler = ModelCheckpoint("./runs/", "net", n_saved=10, require_empty=False) + trainer.add_event_handler( + event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"net": net, "opt": opt} + ) # StatsHandler prints loss at every iteration and print metrics at every epoch, # we don't set metrics for trainer here, so just print loss, user can also customize print functions # and can use output_transform to convert engine.state.output if it's not a loss value - train_stats_handler = StatsHandler(name='trainer') + train_stats_handler = StatsHandler(name="trainer") train_stats_handler.attach(trainer) # TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler @@ -121,7 +111,7 @@ def main(): validation_every_n_epochs = 1 # Set parameters for validation - metric_name = 'Mean_Dice' + metric_name = "Mean_Dice" # add evaluation metric to the evaluator engine val_metrics = {metric_name: MeanDice(add_sigmoid=True, to_onehot_y=False)} @@ -129,29 +119,27 @@ def main(): # user can add output_transform to return other values evaluator = create_supervised_evaluator(net, val_metrics, device, True) - @trainer.on(Events.EPOCH_COMPLETED(every=validation_every_n_epochs)) def run_validation(engine): evaluator.run(val_loader) - # add early stopping handler to evaluator - early_stopper = EarlyStopping(patience=4, - score_function=stopping_fn_from_metric(metric_name), - trainer=trainer) + early_stopper = EarlyStopping(patience=4, score_function=stopping_fn_from_metric(metric_name), trainer=trainer) evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=early_stopper) # add stats event handler to print validation stats via evaluator val_stats_handler = StatsHandler( - name='evaluator', + name="evaluator", output_transform=lambda x: None, # no need to print loss value, so disable per iteration output - global_epoch_transform=lambda x: trainer.state.epoch) # fetch global epoch number from trainer + global_epoch_transform=lambda x: trainer.state.epoch, + ) # fetch global epoch number from trainer val_stats_handler.attach(evaluator) # add handler to record metrics to TensorBoard at every validation epoch val_tensorboard_stats_handler = TensorBoardStatsHandler( output_transform=lambda x: None, # no need to plot loss value, so disable per iteration output - global_epoch_transform=lambda x: trainer.state.epoch) # fetch global epoch number from trainer + global_epoch_transform=lambda x: trainer.state.epoch, + ) # fetch global epoch number from trainer val_tensorboard_stats_handler.attach(evaluator) # add handler to draw the first image and the corresponding label and model output in the last batch @@ -159,7 +147,7 @@ def run_validation(engine): val_tensorboard_image_handler = TensorBoardImageHandler( batch_transform=lambda batch: (batch[0], batch[1]), output_transform=lambda output: predict_segmentation(output[0]), - global_iter_transform=lambda x: trainer.state.epoch + global_iter_transform=lambda x: trainer.state.epoch, ) evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=val_tensorboard_image_handler) @@ -167,5 +155,6 @@ def run_validation(engine): state = trainer.run(train_loader, train_epochs) shutil.rmtree(tempdir) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/examples/segmentation_3d_ignite/unet_training_dict.py b/examples/segmentation_3d_ignite/unet_training_dict.py index 94d59c0d2d..8c83a2c703 100644 --- a/examples/segmentation_3d_ignite/unet_training_dict.py +++ b/examples/segmentation_3d_ignite/unet_training_dict.py @@ -23,67 +23,94 @@ from torch.utils.data import DataLoader import monai -from monai.transforms import \ - Compose, LoadNiftid, AsChannelFirstd, ScaleIntensityd, RandCropByPosNegLabeld, RandRotate90d, ToTensord -from monai.handlers import \ - StatsHandler, TensorBoardStatsHandler, TensorBoardImageHandler, MeanDice, stopping_fn_from_metric +from monai.transforms import ( + Compose, + LoadNiftid, + AsChannelFirstd, + ScaleIntensityd, + RandCropByPosNegLabeld, + RandRotate90d, + ToTensord, +) +from monai.handlers import ( + StatsHandler, + TensorBoardStatsHandler, + TensorBoardImageHandler, + MeanDice, + stopping_fn_from_metric, +) from monai.data import create_test_image_3d, list_data_collate from monai.networks.utils import predict_segmentation + def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # create a temporary directory and 40 random image, mask paris tempdir = tempfile.mkdtemp() - print('generating synthetic data to {} (this may take a while)'.format(tempdir)) + print("generating synthetic data to {} (this may take a while)".format(tempdir)) for i in range(40): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'img%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "img%i.nii.gz" % i)) n = nib.Nifti1Image(seg, np.eye(4)) - nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) + nib.save(n, os.path.join(tempdir, "seg%i.nii.gz" % i)) - images = sorted(glob(os.path.join(tempdir, 'img*.nii.gz'))) - segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) - train_files = [{'img': img, 'seg': seg} for img, seg in zip(images[:20], segs[:20])] - val_files = [{'img': img, 'seg': seg} for img, seg in zip(images[-20:], segs[-20:])] + images = sorted(glob(os.path.join(tempdir, "img*.nii.gz"))) + segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) + train_files = [{"img": img, "seg": seg} for img, seg in zip(images[:20], segs[:20])] + val_files = [{"img": img, "seg": seg} for img, seg in zip(images[-20:], segs[-20:])] # define transforms for image and segmentation - train_transforms = Compose([ - LoadNiftid(keys=['img', 'seg']), - AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), - ScaleIntensityd(keys=['img', 'seg']), - RandCropByPosNegLabeld(keys=['img', 'seg'], label_key='seg', size=[96, 96, 96], pos=1, neg=1, num_samples=4), - RandRotate90d(keys=['img', 'seg'], prob=0.5, spatial_axes=[0, 2]), - ToTensord(keys=['img', 'seg']) - ]) - val_transforms = Compose([ - LoadNiftid(keys=['img', 'seg']), - AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), - ScaleIntensityd(keys=['img', 'seg']), - ToTensord(keys=['img', 'seg']) - ]) + train_transforms = Compose( + [ + LoadNiftid(keys=["img", "seg"]), + AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), + ScaleIntensityd(keys=["img", "seg"]), + RandCropByPosNegLabeld( + keys=["img", "seg"], label_key="seg", size=[96, 96, 96], pos=1, neg=1, num_samples=4 + ), + RandRotate90d(keys=["img", "seg"], prob=0.5, spatial_axes=[0, 2]), + ToTensord(keys=["img", "seg"]), + ] + ) + val_transforms = Compose( + [ + LoadNiftid(keys=["img", "seg"]), + AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), + ScaleIntensityd(keys=["img", "seg"]), + ToTensord(keys=["img", "seg"]), + ] + ) # define dataset, data loader check_ds = monai.data.Dataset(data=train_files, transform=train_transforms) # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training - check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate, - pin_memory=torch.cuda.is_available()) + check_loader = DataLoader( + check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available() + ) check_data = monai.utils.misc.first(check_loader) - print(check_data['img'].shape, check_data['seg'].shape) + print(check_data["img"].shape, check_data["seg"].shape) # create a training data loader train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training - train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4, - collate_fn=list_data_collate, pin_memory=torch.cuda.is_available()) + train_loader = DataLoader( + train_ds, + batch_size=2, + shuffle=True, + num_workers=4, + collate_fn=list_data_collate, + pin_memory=torch.cuda.is_available(), + ) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) - val_loader = DataLoader(val_ds, batch_size=5, num_workers=8, collate_fn=list_data_collate, - pin_memory=torch.cuda.is_available()) + val_loader = DataLoader( + val_ds, batch_size=5, num_workers=8, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available() + ) # create UNet, DiceLoss and Adam optimizer net = monai.networks.nets.UNet( @@ -97,26 +124,25 @@ def main(): loss = monai.losses.DiceLoss(do_sigmoid=True) lr = 1e-3 opt = torch.optim.Adam(net.parameters(), lr) - device = torch.device('cuda:0') + device = torch.device("cuda:0") # ignite trainer expects batch=(img, seg) and returns output=loss at every iteration, # user can add output_transform to return other values, like: y_pred, y, etc. def prepare_batch(batch, device=None, non_blocking=False): - return _prepare_batch((batch['img'], batch['seg']), device, non_blocking) - + return _prepare_batch((batch["img"], batch["seg"]), device, non_blocking) trainer = create_supervised_trainer(net, opt, loss, device, False, prepare_batch=prepare_batch) # adding checkpoint handler to save models (network params and optimizer stats) during training - checkpoint_handler = ModelCheckpoint('./runs/', 'net', n_saved=10, require_empty=False) - trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED, - handler=checkpoint_handler, - to_save={'net': net, 'opt': opt}) + checkpoint_handler = ModelCheckpoint("./runs/", "net", n_saved=10, require_empty=False) + trainer.add_event_handler( + event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"net": net, "opt": opt} + ) # StatsHandler prints loss at every iteration and print metrics at every epoch, # we don't set metrics for trainer here, so just print loss, user can also customize print functions # and can use output_transform to convert engine.state.output if it's not loss value - train_stats_handler = StatsHandler(name='trainer') + train_stats_handler = StatsHandler(name="trainer") train_stats_handler.attach(trainer) # TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler @@ -125,7 +151,7 @@ def prepare_batch(batch, device=None, non_blocking=False): validation_every_n_iters = 5 # set parameters for validation - metric_name = 'Mean_Dice' + metric_name = "Mean_Dice" # add evaluation metric to the evaluator engine val_metrics = {metric_name: MeanDice(add_sigmoid=True, to_onehot_y=False)} @@ -133,44 +159,42 @@ def prepare_batch(batch, device=None, non_blocking=False): # user can add output_transform to return other values evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch) - @trainer.on(Events.ITERATION_COMPLETED(every=validation_every_n_iters)) def run_validation(engine): evaluator.run(val_loader) - # add early stopping handler to evaluator - early_stopper = EarlyStopping(patience=4, - score_function=stopping_fn_from_metric(metric_name), - trainer=trainer) + early_stopper = EarlyStopping(patience=4, score_function=stopping_fn_from_metric(metric_name), trainer=trainer) evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=early_stopper) # add stats event handler to print validation stats via evaluator val_stats_handler = StatsHandler( - name='evaluator', + name="evaluator", output_transform=lambda x: None, # no need to print loss value, so disable per iteration output - global_epoch_transform=lambda x: trainer.state.epoch) # fetch global epoch number from trainer + global_epoch_transform=lambda x: trainer.state.epoch, + ) # fetch global epoch number from trainer val_stats_handler.attach(evaluator) # add handler to record metrics to TensorBoard at every validation epoch val_tensorboard_stats_handler = TensorBoardStatsHandler( output_transform=lambda x: None, # no need to plot loss value, so disable per iteration output - global_epoch_transform=lambda x: trainer.state.iteration) # fetch global iteration number from trainer + global_epoch_transform=lambda x: trainer.state.iteration, + ) # fetch global iteration number from trainer val_tensorboard_stats_handler.attach(evaluator) # add handler to draw the first image and the corresponding label and model output in the last batch # here we draw the 3D output as GIF format along the depth axis, every 2 validation iterations. val_tensorboard_image_handler = TensorBoardImageHandler( - batch_transform=lambda batch: (batch['img'], batch['seg']), + batch_transform=lambda batch: (batch["img"], batch["seg"]), output_transform=lambda output: predict_segmentation(output[0]), - global_iter_transform=lambda x: trainer.state.epoch + global_iter_transform=lambda x: trainer.state.epoch, ) - evaluator.add_event_handler( - event_name=Events.ITERATION_COMPLETED(every=2), handler=val_tensorboard_image_handler) + evaluator.add_event_handler(event_name=Events.ITERATION_COMPLETED(every=2), handler=val_tensorboard_image_handler) train_epochs = 5 state = trainer.run(train_loader, train_epochs) shutil.rmtree(tempdir) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/monai/__init__.py b/monai/__init__.py index b6907cc290..fa9ed89bbf 100644 --- a/monai/__init__.py +++ b/monai/__init__.py @@ -15,7 +15,7 @@ from ._version import get_versions from .utils.module import load_submodules -__version__ = get_versions()['version'] +__version__ = get_versions()["version"] del get_versions __copyright__ = "(c) 2020 MONAI Consortium" diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index 26af53104f..826aaae3c4 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -20,9 +20,10 @@ try: import ignite + ignite_version = ignite.__version__ except ImportError: - ignite_version = 'NOT INSTALLED' + ignite_version = "NOT INSTALLED" def get_config_values(): diff --git a/monai/data/csv_saver.py b/monai/data/csv_saver.py index 760ee9fb7e..15446ead92 100644 --- a/monai/data/csv_saver.py +++ b/monai/data/csv_saver.py @@ -24,7 +24,7 @@ class CSVSaver: the cached data into CSV file. If no meta data provided, use index from 0 to save data. """ - def __init__(self, output_dir='./', filename='predictions.csv', overwrite=True): + def __init__(self, output_dir="./", filename="predictions.csv", overwrite=True): """ Args: output_dir (str): output CSV file directory. @@ -35,7 +35,7 @@ def __init__(self, output_dir='./', filename='predictions.csv', overwrite=True): """ self.output_dir = output_dir self._cache_dict = OrderedDict() - assert isinstance(filename, str) and filename[-4:] == '.csv', 'filename must be a string with CSV format.' + assert isinstance(filename, str) and filename[-4:] == ".csv", "filename must be a string with CSV format." self._filepath = os.path.join(output_dir, filename) self.overwrite = overwrite self._data_index = 0 @@ -46,14 +46,14 @@ def finalize(self): """ if not self.overwrite and os.path.exists(self._filepath): - with open(self._filepath, 'r') as f: + with open(self._filepath, "r") as f: reader = csv.reader(f) for row in reader: self._cache_dict[row[0]] = np.array(row[1:]).astype(np.float32) if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) - with open(self._filepath, 'w') as f: + with open(self._filepath, "w") as f: for k, v in self._cache_dict.items(): f.write(k) for result in v.flatten(): @@ -70,7 +70,7 @@ def save(self, data, meta_data=None): meta_data (dict): the meta data information corresponding to the data. """ - save_key = meta_data['filename_or_obj'] if meta_data else str(self._data_index) + save_key = meta_data["filename_or_obj"] if meta_data else str(self._data_index) self._data_index += 1 if torch.is_tensor(data): data = data.detach().cpu().numpy() diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 58ebb9386b..9c51839131 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -165,7 +165,9 @@ def _pre_first_random_cachecheck(self, item_transformed): cache_dir_path: Path = Path(self.cache_dir) if cache_dir_path.is_dir(): # TODO: Find way to hash transforms content as part of the cache - data_item_md5 = hashlib.md5(json.dumps(item_transformed, sort_keys=True).encode('utf-8')).hexdigest() + data_item_md5 = hashlib.md5( + json.dumps(item_transformed, sort_keys=True).encode("utf-8") + ).hexdigest() hashfile: Path = Path(cache_dir_path) / f"{data_item_md5}.pt" if hashfile is not None and hashfile.is_file(): @@ -241,7 +243,7 @@ def __init__(self, data, transform, cache_num=sys.maxsize, cache_rate=1.0, num_w super().__init__(data, transform) self.cache_num = min(cache_num, int(len(self) * cache_rate), len(self)) self._cache = [None] * self.cache_num - print('Load and cache transformed data...') + print("Load and cache transformed data...") if num_workers > 0: self._item_processed = 0 self._thread_lock = threading.Lock() diff --git a/monai/data/grid_dataset.py b/monai/data/grid_dataset.py index 968a0cdb70..6850642c3a 100644 --- a/monai/data/grid_dataset.py +++ b/monai/data/grid_dataset.py @@ -50,7 +50,7 @@ def __iter__(self): iter_start = 0 iter_end = len(self.dataset) - if worker_info is not None: + if worker_info is not None: # split workload per_worker = int(math.ceil((iter_end - iter_start) / float(worker_info.num_workers))) worker_id = worker_info.id @@ -60,6 +60,8 @@ def __iter__(self): for index in range(iter_start, iter_end): arrays = self.dataset[index] - iters = [iter_patch(a, self.patch_size, self.start_pos, False, self.pad_mode, **self.pad_opts) for a in arrays] + iters = [ + iter_patch(a, self.patch_size, self.start_pos, False, self.pad_mode, **self.pad_opts) for a in arrays + ] yield from zip(*iters) diff --git a/monai/data/nifti_reader.py b/monai/data/nifti_reader.py index d4db00fe2d..d59195f0d8 100644 --- a/monai/data/nifti_reader.py +++ b/monai/data/nifti_reader.py @@ -41,14 +41,14 @@ def load_nifti(filename_or_obj, as_closest_canonical=False, image_only=True, dty img = correct_nifti_header_if_necessary(img) header = dict(img.header) - header['filename_or_obj'] = filename_or_obj - header['original_affine'] = img.affine - header['affine'] = img.affine - header['as_closest_canonical'] = as_closest_canonical + header["filename_or_obj"] = filename_or_obj + header["original_affine"] = img.affine + header["affine"] = img.affine + header["as_closest_canonical"] = as_closest_canonical if as_closest_canonical: img = nib.as_closest_canonical(img) - header['affine'] = img.affine + header["affine"] = img.affine if dtype is not None: dat = img.get_fdata(dtype=dtype) @@ -66,8 +66,17 @@ class NiftiDataset(Dataset): for the image and segmentation arrays separately. """ - def __init__(self, image_files, seg_files=None, labels=None, as_closest_canonical=False, - transform=None, seg_transform=None, image_only=True, dtype=None): + def __init__( + self, + image_files, + seg_files=None, + labels=None, + as_closest_canonical=False, + transform=None, + seg_transform=None, + image_only=True, + dtype=None, + ): """ Initializes the dataset with the image and segmentation filename lists. The transform `transform` is applied to the images and `seg_transform` to the segmentations. @@ -84,7 +93,7 @@ def __init__(self, image_files, seg_files=None, labels=None, as_closest_canonica """ if seg_files is not None and len(image_files) != len(seg_files): - raise ValueError('Must have same number of image and segmentation files') + raise ValueError("Must have same number of image and segmentation files") self.image_files = image_files self.seg_files = seg_files @@ -101,11 +110,19 @@ def __len__(self): def __getitem__(self, index): meta_data = None if self.image_only: - img = load_nifti(self.image_files[index], as_closest_canonical=self.as_closest_canonical, - image_only=self.image_only, dtype=self.dtype) + img = load_nifti( + self.image_files[index], + as_closest_canonical=self.as_closest_canonical, + image_only=self.image_only, + dtype=self.dtype, + ) else: - img, meta_data = load_nifti(self.image_files[index], as_closest_canonical=self.as_closest_canonical, - image_only=self.image_only, dtype=self.dtype) + img, meta_data = load_nifti( + self.image_files[index], + as_closest_canonical=self.as_closest_canonical, + image_only=self.image_only, + dtype=self.dtype, + ) target = None if self.seg_files is not None: target = load_nifti(self.seg_files[index]) @@ -130,8 +147,10 @@ def __getitem__(self, index): compatible_meta = {} for meta_key in meta_data: meta_datum = meta_data[meta_key] - if type(meta_datum).__name__ == 'ndarray' \ - and np_str_obj_array_pattern.search(meta_datum.dtype.str) is not None: + if ( + type(meta_datum).__name__ == "ndarray" + and np_str_obj_array_pattern.search(meta_datum.dtype.str) is not None + ): continue compatible_meta[meta_key] = meta_datum return img, target, compatible_meta diff --git a/monai/data/nifti_saver.py b/monai/data/nifti_saver.py index 41d193cea9..26b00a0cdd 100644 --- a/monai/data/nifti_saver.py +++ b/monai/data/nifti_saver.py @@ -23,8 +23,17 @@ class NiftiSaver: use index from 0 as the filename prefix. """ - def __init__(self, output_dir='./', output_postfix='seg', output_ext='.nii.gz', - resample=True, interp_order=0, mode='constant', cval=0, dtype=None): + def __init__( + self, + output_dir="./", + output_postfix="seg", + output_ext=".nii.gz", + resample=True, + interp_order=0, + mode="constant", + cval=0, + dtype=None, + ): """ Args: output_dir (str): output image directory. @@ -111,21 +120,30 @@ def save(self, data, meta_data=None): See Also :py:meth:`monai.data.nifti_writer.write_nifti` """ - filename = meta_data['filename_or_obj'] if meta_data else str(self._data_index) + filename = meta_data["filename_or_obj"] if meta_data else str(self._data_index) self._data_index += 1 - original_affine = meta_data.get('original_affine', None) if meta_data else None - affine = meta_data.get('affine', None) if meta_data else None - spatial_shape = meta_data.get('spatial_shape', None) if meta_data else None + original_affine = meta_data.get("original_affine", None) if meta_data else None + affine = meta_data.get("affine", None) if meta_data else None + spatial_shape = meta_data.get("spatial_shape", None) if meta_data else None if torch.is_tensor(data): data = data.detach().cpu().numpy() filename = self._create_file_basename(self.output_postfix, filename, self.output_dir) - filename = '{}{}'.format(filename, self.output_ext) + filename = "{}{}".format(filename, self.output_ext) # change data to "channel last" format and write to nifti format file data = np.moveaxis(data, 0, -1) - write_nifti(data, file_name=filename, affine=affine, target_affine=original_affine, - resample=self.resample, output_shape=spatial_shape, interp_order=self.interp_order, - mode=self.mode, cval=self.cval, dtype=self.dtype or data.dtype) + write_nifti( + data, + file_name=filename, + affine=affine, + target_affine=original_affine, + resample=self.resample, + output_shape=spatial_shape, + interp_order=self.interp_order, + mode=self.mode, + cval=self.cval, + dtype=self.dtype or data.dtype, + ) def save_batch(self, batch_data, meta_data=None): """Save a batch of data into Nifti format files. diff --git a/monai/data/nifti_writer.py b/monai/data/nifti_writer.py index a093b37aaf..64c5c9108c 100644 --- a/monai/data/nifti_writer.py +++ b/monai/data/nifti_writer.py @@ -16,16 +16,18 @@ from monai.data.utils import compute_shape_offset, to_affine_nd -def write_nifti(data, - file_name, - affine=None, - target_affine=None, - resample=True, - output_shape=None, - interp_order=3, - mode='constant', - cval=0, - dtype=None): +def write_nifti( + data, + file_name, + affine=None, + target_affine=None, + resample=True, + output_shape=None, + interp_order=3, + mode="constant", + cval=0, + dtype=None, +): """ Write numpy data into NIfTI files to disk. This function converts data into the coordinate system defined by `target_affine` when `target_affine` @@ -76,7 +78,7 @@ def write_nifti(data, this option is used when `resample = True`. dtype (np.dtype, optional): convert the image to save to this data type. """ - assert isinstance(data, np.ndarray), 'input data must be numpy array.' + assert isinstance(data, np.ndarray), "input data must be numpy array." sr = min(data.ndim, 3) if affine is None: affine = np.eye(4, dtype=np.float64) @@ -115,22 +117,22 @@ def write_nifti(data, data_chns = [] for chn in range(data_.shape[-1]): data_chns.append( - scipy.ndimage.affine_transform(data_[..., chn], - matrix=transform, - output_shape=output_shape[:3], - order=interp_order, - mode=mode, - cval=cval)) + scipy.ndimage.affine_transform( + data_[..., chn], + matrix=transform, + output_shape=output_shape[:3], + order=interp_order, + mode=mode, + cval=cval, + ) + ) data_chns = np.stack(data_chns, axis=-1) data_ = data_chns.reshape(list(data_chns.shape[:3]) + list(channel_shape)) else: data_ = data.astype(dtype) - data_ = scipy.ndimage.affine_transform(data_, - matrix=transform, - output_shape=output_shape[:data_.ndim], - order=interp_order, - mode=mode, - cval=cval) + data_ = scipy.ndimage.affine_transform( + data_, matrix=transform, output_shape=output_shape[: data_.ndim], order=interp_order, mode=mode, cval=cval + ) results_img = nib.Nifti1Image(data_, to_affine_nd(3, target_affine)) nib.save(results_img, file_name) return diff --git a/monai/data/sliding_window_inference.py b/monai/data/sliding_window_inference.py index 70cc7d6b39..500f2fe275 100644 --- a/monai/data/sliding_window_inference.py +++ b/monai/data/sliding_window_inference.py @@ -31,7 +31,7 @@ def sliding_window_inference(inputs, roi_size, sw_batch_size, predictor): execute on 1 image/per inference, run a batch of window slices of 1 input image. """ num_spatial_dims = len(inputs.shape) - 2 - assert len(roi_size) == num_spatial_dims, 'roi_size {} does not match input dims.'.format(roi_size) + assert len(roi_size) == num_spatial_dims, "roi_size {} does not match input dims.".format(roi_size) # determine image spatial size and batch size # Note: all input images must have the same image size and batch size @@ -46,7 +46,7 @@ def sliding_window_inference(inputs, roi_size, sw_batch_size, predictor): # in case that image size is smaller than roi size image_size = tuple(max(image_size[i], roi_size[i]) for i in range(num_spatial_dims)) pad_size = [i for k in range(len(inputs.shape) - 1, 1, -1) for i in (0, max(roi_size[k - 2] - inputs.shape[k], 0))] - inputs = F.pad(inputs, pad=pad_size, mode='constant', value=0) + inputs = F.pad(inputs, pad=pad_size, mode="constant", value=0) # TODO: interval from user's specification scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims) @@ -88,23 +88,23 @@ def sliding_window_inference(inputs, roi_size, sw_batch_size, predictor): if num_spatial_dims == 3: slice_i, slice_j, slice_k = slices[curr_index] output_image[0, :, slice_i, slice_j, slice_k] += output_rois[window_id][curr_index - slice_index, :] - count_map[0, :, slice_i, slice_j, slice_k] += 1. + count_map[0, :, slice_i, slice_j, slice_k] += 1.0 else: slice_i, slice_j = slices[curr_index] output_image[0, :, slice_i, slice_j] += output_rois[window_id][curr_index - slice_index, :] - count_map[0, :, slice_i, slice_j] += 1. + count_map[0, :, slice_i, slice_j] += 1.0 # account for any overlapping sections output_image /= count_map if num_spatial_dims == 3: - return output_image[..., :original_image_size[0], :original_image_size[1], :original_image_size[2]] - return output_image[..., :original_image_size[0], :original_image_size[1]] # 2D + return output_image[..., : original_image_size[0], : original_image_size[1], : original_image_size[2]] + return output_image[..., : original_image_size[0], : original_image_size[1]] # 2D def _get_scan_interval(image_size, roi_size, num_spatial_dims): - assert (len(image_size) == num_spatial_dims), 'image coord different from spatial dims.' - assert (len(roi_size) == num_spatial_dims), 'roi coord different from spatial dims.' + assert len(image_size) == num_spatial_dims, "image coord different from spatial dims." + assert len(roi_size) == num_spatial_dims, "roi coord different from spatial dims." scan_interval = [1 for _ in range(num_spatial_dims)] for i in range(num_spatial_dims): diff --git a/monai/data/synthetic.py b/monai/data/synthetic.py index 1c49454c8e..6a980ac38e 100644 --- a/monai/data/synthetic.py +++ b/monai/data/synthetic.py @@ -28,7 +28,7 @@ def create_test_image_2d(width, height, num_objs=12, rad_max=30, noise_max=0.0, x = np.random.randint(rad_max, width - rad_max) y = np.random.randint(rad_max, height - rad_max) rad = np.random.randint(5, rad_max) - spy, spx = np.ogrid[-x:width - x, -y:height - y] + spy, spx = np.ogrid[-x : width - x, -y : height - y] circle = (spx * spx + spy * spy) <= rad * rad if num_seg_classes > 1: @@ -42,15 +42,18 @@ def create_test_image_2d(width, height, num_objs=12, rad_max=30, noise_max=0.0, noisyimage = rescale_array(np.maximum(image, norm)) if channel_dim is not None: - assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 2), 'invalid channel dim.' - noisyimage, labels = noisyimage[None], labels[None] \ - if channel_dim == 0 else (noisyimage[..., None], labels[..., None]) + assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 2), "invalid channel dim." + noisyimage, labels = ( + noisyimage[None], + labels[None] if channel_dim == 0 else (noisyimage[..., None], labels[..., None]), + ) return noisyimage, labels -def create_test_image_3d(height, width, depth, num_objs=12, rad_max=30, - noise_max=0.0, num_seg_classes=5, channel_dim=None): +def create_test_image_3d( + height, width, depth, num_objs=12, rad_max=30, noise_max=0.0, num_seg_classes=5, channel_dim=None +): """ Return a noisy 3D image and segmentation. @@ -64,7 +67,7 @@ def create_test_image_3d(height, width, depth, num_objs=12, rad_max=30, y = np.random.randint(rad_max, height - rad_max) z = np.random.randint(rad_max, depth - rad_max) rad = np.random.randint(5, rad_max) - spy, spx, spz = np.ogrid[-x:width - x, -y:height - y, -z:depth - z] + spy, spx, spz = np.ogrid[-x : width - x, -y : height - y, -z : depth - z] circle = (spx * spx + spy * spy + spz * spz) <= rad * rad if num_seg_classes > 1: @@ -78,8 +81,9 @@ def create_test_image_3d(height, width, depth, num_objs=12, rad_max=30, noisyimage = rescale_array(np.maximum(image, norm)) if channel_dim is not None: - assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 3), 'invalid channel dim.' - noisyimage, labels = (noisyimage[None], labels[None]) \ - if channel_dim == 0 else (noisyimage[..., None], labels[..., None]) + assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 3), "invalid channel dim." + noisyimage, labels = ( + (noisyimage[None], labels[None]) if channel_dim == 0 else (noisyimage[..., None], labels[..., None]) + ) return noisyimage, labels diff --git a/monai/data/utils.py b/monai/data/utils.py index 09b46dc1c5..946dbc84bb 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -83,12 +83,14 @@ def dense_patch_slices(image_size, patch_size, scan_interval): """ num_spatial_dims = len(image_size) if num_spatial_dims not in (2, 3): - raise ValueError('image_size should has 2 or 3 elements') + raise ValueError("image_size should has 2 or 3 elements") patch_size = get_valid_patch_size(image_size, patch_size) scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims) - scan_num = [int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1 - for i in range(num_spatial_dims)] + scan_num = [ + int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1 + for i in range(num_spatial_dims) + ] slices = [] if num_spatial_dims == 3: for i in range(scan_num[0]): @@ -203,7 +205,7 @@ def correct_nifti_header_if_necessary(img_nii): Args: img (nifti image object) """ - dim = img_nii.header['dim'][0] + dim = img_nii.header["dim"][0] if dim >= 5: return img_nii # do nothing for high-dimensional array # check that affine matches zooms @@ -211,7 +213,7 @@ def correct_nifti_header_if_necessary(img_nii): norm_affine = np.sqrt(np.sum(np.square(img_nii.affine[:dim, :dim]), 0)) if np.allclose(pixdim, norm_affine): return img_nii - if hasattr(img_nii, 'get_sform'): + if hasattr(img_nii, "get_sform"): return rectify_header_sform_qform(img_nii) return img_nii @@ -223,7 +225,7 @@ def rectify_header_sform_qform(img_nii): Adapted from https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/io/misc_io.py """ - d = img_nii.header['dim'][0] + d = img_nii.header["dim"][0] pixdim = np.asarray(img_nii.header.get_zooms())[:d] sform, qform = img_nii.get_sform(), img_nii.get_qform() norm_sform = np.sqrt(np.sum(np.square(sform[:d, :d]), 0)) @@ -231,13 +233,13 @@ def rectify_header_sform_qform(img_nii): sform_mismatch = not np.allclose(norm_sform, pixdim) qform_mismatch = not np.allclose(norm_qform, pixdim) - if img_nii.header['sform_code'] != 0: + if img_nii.header["sform_code"] != 0: if not sform_mismatch: return img_nii if not qform_mismatch: img_nii.set_sform(img_nii.get_qform()) return img_nii - if img_nii.header['qform_code'] != 0: + if img_nii.header["qform_code"] != 0: if not qform_mismatch: return img_nii if not sform_mismatch: @@ -245,7 +247,7 @@ def rectify_header_sform_qform(img_nii): return img_nii norm = np.sqrt(np.sum(np.square(img_nii.affine[:d, :d]), 0)) - warnings.warn('Modifying image pixdim from {} to {}'.format(pixdim, norm)) + warnings.warn("Modifying image pixdim from {} to {}".format(pixdim, norm)) img_nii.header.set_zooms(norm) return img_nii @@ -272,18 +274,18 @@ def zoom_affine(affine, scale, diagonal=True): """ affine = np.array(affine, dtype=float, copy=True) if len(affine) != len(affine[0]): - raise ValueError('affine should be a square matrix') + raise ValueError("affine should be a square matrix") scale = np.array(scale, dtype=float, copy=True) if np.any(scale <= 0): - raise ValueError('scale must be a sequence of positive numbers.') + raise ValueError("scale must be a sequence of positive numbers.") d = len(affine) - 1 if len(scale) < d: # defaults based on affine norm = np.sqrt(np.sum(np.square(affine), 0))[:-1] - scale = np.append(scale, norm[len(scale):]) + scale = np.append(scale, norm[len(scale) :]) scale = scale[:d] - scale[scale == 0] = 1. + scale[scale == 0] = 1.0 if diagonal: - return np.diag(np.append(scale, [1.])) + return np.diag(np.append(scale, [1.0])) rzs = affine[:-1, :-1] # rotation zoom scale zs = np.linalg.cholesky(rzs.T @ rzs).T rotation = rzs @ np.linalg.inv(zs) @@ -305,15 +307,14 @@ def compute_shape_offset(spatial_shape, in_affine, out_affine): sr = len(shape) in_affine = to_affine_nd(sr, in_affine) out_affine = to_affine_nd(sr, out_affine) - in_coords = [(0., dim - 1.) for dim in shape] - corners = np.asarray(np.meshgrid(*in_coords, indexing='ij')).reshape((len(shape), -1)) + in_coords = [(0.0, dim - 1.0) for dim in shape] + corners = np.asarray(np.meshgrid(*in_coords, indexing="ij")).reshape((len(shape), -1)) corners = np.concatenate((corners, np.ones_like(corners[:1]))) corners = in_affine @ corners corners_out = np.linalg.inv(out_affine) @ corners corners_out = corners_out[:-1] / corners_out[-1] - out_shape = np.round(np.max(corners_out, 1) - np.min(corners_out, 1) + 1.) - if np.allclose(nib.io_orientation(in_affine), - nib.io_orientation(out_affine)): + out_shape = np.round(np.max(corners_out, 1) - np.min(corners_out, 1) + 1.0) + if np.allclose(nib.io_orientation(in_affine), nib.io_orientation(out_affine)): # same orientation, get translate from the origin offset = in_affine @ ([0] * sr + [1]) offset = offset[:-1] / offset[-1] @@ -348,12 +349,12 @@ def to_affine_nd(r, affine): """ affine = np.array(affine, dtype=np.float64) if affine.ndim != 2: - raise ValueError('input affine must have two dimensions') + raise ValueError("input affine must have two dimensions") new_affine = np.array(r, dtype=np.float64, copy=True) if new_affine.ndim == 0: sr = new_affine.astype(int) if not np.isfinite(sr) or sr < 0: - raise ValueError('r must be positive.') + raise ValueError("r must be positive.") new_affine = np.eye(sr + 1, dtype=np.float64) d = max(min(len(new_affine) - 1, len(affine) - 1), 1) new_affine[:d, :d] = affine[:d, :d] diff --git a/monai/engine/multi_gpu_supervised_trainer.py b/monai/engine/multi_gpu_supervised_trainer.py index 89dcdee598..e4357a5e79 100644 --- a/monai/engine/multi_gpu_supervised_trainer.py +++ b/monai/engine/multi_gpu_supervised_trainer.py @@ -28,7 +28,7 @@ def get_devices_spec(devices=None): list of torch.device: list of devices. """ if devices is None: - devices = [torch.device('cuda:%i' % d) for d in range(torch.cuda.device_count())] + devices = [torch.device("cuda:%i" % d) for d in range(torch.cuda.device_count())] if len(devices) == 0: raise ValueError("No GPU devices available") @@ -43,12 +43,19 @@ def _default_transform(x, y, y_pred, loss): return loss.item() -def _default_eval_transform(x, y, y_pred): +def _default_eval_transform(x, y, y_pred): return y_pred, y -def create_multigpu_supervised_trainer(net, optimizer, loss_fn, devices=None, non_blocking=False, - prepare_batch=_prepare_batch, output_transform=_default_transform): +def create_multigpu_supervised_trainer( + net, + optimizer, + loss_fn, + devices=None, + non_blocking=False, + prepare_batch=_prepare_batch, + output_transform=_default_transform, +): """ Derived from `create_supervised_trainer` in Ignite. @@ -83,8 +90,14 @@ def create_multigpu_supervised_trainer(net, optimizer, loss_fn, devices=None, no return create_supervised_trainer(net, optimizer, loss_fn, devices[0], non_blocking, prepare_batch, output_transform) -def create_multigpu_supervised_evaluator(net, metrics=None, devices=None, non_blocking=False, - prepare_batch=_prepare_batch, output_transform=_default_eval_transform): +def create_multigpu_supervised_evaluator( + net, + metrics=None, + devices=None, + non_blocking=False, + prepare_batch=_prepare_batch, + output_transform=_default_eval_transform, +): """ Derived from `create_supervised_evaluator` in Ignite. diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py index d822c0427c..a0290fdb26 100644 --- a/monai/handlers/checkpoint_loader.py +++ b/monai/handlers/checkpoint_loader.py @@ -29,9 +29,9 @@ class CheckpointLoader: """ def __init__(self, load_path, load_dict): - assert load_path is not None, 'must provide clear path to load checkpoint.' + assert load_path is not None, "must provide clear path to load checkpoint." self.load_path = load_path - assert load_dict is not None and len(load_dict) > 0, 'must provide target objects to load.' + assert load_dict is not None and len(load_dict) > 0, "must provide target objects to load." self.load_dict = load_dict def attach(self, engine): @@ -40,4 +40,4 @@ def attach(self, engine): def __call__(self, engine): checkpoint = torch.load(self.load_path) Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint) - print('Restored all variables from {}'.format(self.load_path)) + print("Restored all variables from {}".format(self.load_path)) diff --git a/monai/handlers/classification_saver.py b/monai/handlers/classification_saver.py index 314b4eedb0..1d24c28e3c 100644 --- a/monai/handlers/classification_saver.py +++ b/monai/handlers/classification_saver.py @@ -19,8 +19,15 @@ class ClassificationSaver: Event handler triggered on completing every iteration to save the classification predictions as CSV file. """ - def __init__(self, output_dir='./', filename='predictions.csv', overwrite=True, - batch_transform=lambda x: x, output_transform=lambda x: x, name=None): + def __init__( + self, + output_dir="./", + filename="predictions.csv", + overwrite=True, + batch_transform=lambda x: x, + output_transform=lambda x: x, + name=None, + ): """ Args: output_dir (str): output CSV file directory. diff --git a/monai/handlers/mean_dice.py b/monai/handlers/mean_dice.py index 0ed9d366d9..65085f1b31 100644 --- a/monai/handlers/mean_dice.py +++ b/monai/handlers/mean_dice.py @@ -23,14 +23,16 @@ class MeanDice(Metric): """Computes Dice score metric from full size Tensor and collects average over batch, class-channels, iterations. """ - def __init__(self, - include_background=True, - to_onehot_y=False, - mutually_exclusive=False, - add_sigmoid=False, - logit_thresh=0.5, - output_transform: Callable = lambda x: x, - device: Optional[Union[str, torch.device]] = None): + def __init__( + self, + include_background=True, + to_onehot_y=False, + mutually_exclusive=False, + add_sigmoid=False, + logit_thresh=0.5, + output_transform: Callable = lambda x: x, + device: Optional[Union[str, torch.device]] = None, + ): """ Args: @@ -65,10 +67,17 @@ def reset(self): @reinit__is_reduced def update(self, output: Sequence[Union[torch.Tensor, dict]]): - assert len(output) == 2, 'MeanDice metric can only support y_pred and y.' + assert len(output) == 2, "MeanDice metric can only support y_pred and y." y_pred, y = output - scores = compute_meandice(y_pred, y, self.include_background, self.to_onehot_y, self.mutually_exclusive, - self.add_sigmoid, self.logit_thresh) + scores = compute_meandice( + y_pred, + y, + self.include_background, + self.to_onehot_y, + self.mutually_exclusive, + self.add_sigmoid, + self.logit_thresh, + ) # add all items in current batch for batch in scores: @@ -82,5 +91,5 @@ def update(self, output: Sequence[Union[torch.Tensor, dict]]): @sync_all_reduce("_sum", "_num_examples") def compute(self): if self._num_examples == 0: - raise NotComputableError('MeanDice must have at least one example before it can be computed.') + raise NotComputableError("MeanDice must have at least one example before it can be computed.") return self._sum / self._num_examples diff --git a/monai/handlers/metric_logger.py b/monai/handlers/metric_logger.py index 9014a75f34..a12505c438 100644 --- a/monai/handlers/metric_logger.py +++ b/monai/handlers/metric_logger.py @@ -15,7 +15,6 @@ class MetricLogger: - def __init__(self, loss_transform=lambda x: x, metric_transform=lambda x: x): self.loss_transform = loss_transform self.metric_transform = metric_transform diff --git a/monai/handlers/roc_auc.py b/monai/handlers/roc_auc.py index ffbd6e3331..4d93a963e1 100644 --- a/monai/handlers/roc_auc.py +++ b/monai/handlers/roc_auc.py @@ -44,11 +44,8 @@ class ROCAUC(Metric): ROCAUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. """ - def __init__(self, - to_onehot_y=False, - add_softmax=False, - average='macro', - output_transform=lambda x: x): + + def __init__(self, to_onehot_y=False, add_softmax=False, average="macro", output_transform=lambda x: x): super().__init__(output_transform=output_transform) self.to_onehot_y = to_onehot_y self.add_softmax = add_softmax @@ -71,5 +68,4 @@ def update(self, output: Sequence[torch.Tensor]): def compute(self): _prediction_tensor = torch.cat(self._predictions, dim=0) _target_tensor = torch.cat(self._targets, dim=0) - return compute_roc_auc(_prediction_tensor, _target_tensor, self.to_onehot_y, - self.add_softmax, self.average) + return compute_roc_auc(_prediction_tensor, _target_tensor, self.to_onehot_y, self.add_softmax, self.average) diff --git a/monai/handlers/segmentation_saver.py b/monai/handlers/segmentation_saver.py index 8611176c6b..0cc961e377 100644 --- a/monai/handlers/segmentation_saver.py +++ b/monai/handlers/segmentation_saver.py @@ -19,8 +19,16 @@ class SegmentationSaver: Event handler triggered on completing every iteration to save the segmentation predictions as nifti files. """ - def __init__(self, output_dir='./', output_postfix='seg', output_ext='.nii.gz', dtype=None, - batch_transform=lambda x: x, output_transform=lambda x: x, name=None): + def __init__( + self, + output_dir="./", + output_postfix="seg", + output_ext=".nii.gz", + dtype=None, + batch_transform=lambda x: x, + output_transform=lambda x: x, + name=None, + ): """ Args: output_dir (str): output image directory. @@ -58,4 +66,4 @@ def __call__(self, engine): meta_data = self.batch_transform(engine.state.batch) engine_output = self.output_transform(engine.state.output) self.saver.save_batch(engine_output, meta_data) - self.logger.info('saved all the model outputs as nifti files.') + self.logger.info("saved all the model outputs as nifti files.") diff --git a/monai/handlers/stats_handler.py b/monai/handlers/stats_handler.py index efde18fb5c..f74e20239e 100644 --- a/monai/handlers/stats_handler.py +++ b/monai/handlers/stats_handler.py @@ -15,8 +15,8 @@ from ignite.engine import Engine, Events from monai.utils.misc import is_scalar -DEFAULT_KEY_VAL_FORMAT = '{}: {:.4f} ' -DEFAULT_TAG = 'Loss' +DEFAULT_KEY_VAL_FORMAT = "{}: {:.4f} " +DEFAULT_TAG = "Loss" class StatsHandler(object): @@ -31,14 +31,16 @@ class StatsHandler(object): """ - def __init__(self, - epoch_print_logger=None, - iteration_print_logger=None, - output_transform=lambda x: x, - global_epoch_transform=lambda x: x, - name=None, - tag_name=DEFAULT_TAG, - key_var_format=DEFAULT_KEY_VAL_FORMAT): + def __init__( + self, + epoch_print_logger=None, + iteration_print_logger=None, + output_transform=lambda x: x, + global_epoch_transform=lambda x: x, + name=None, + tag_name=DEFAULT_TAG, + key_var_format=DEFAULT_KEY_VAL_FORMAT, + ): """ Args: @@ -119,7 +121,7 @@ def exception_raised(self, engine: Engine, e): e (Exception): the exception caught in Ignite during engine.run(). """ - self.logger.exception('Exception: {}'.format(e)) + self.logger.exception("Exception: {}".format(e)) # import traceback # traceback.print_exc() @@ -156,25 +158,29 @@ def _default_iteration_print(self, engine: Engine): if loss is None: return # no printing if the output is empty - out_str = '' + out_str = "" if isinstance(loss, dict): # print dictionary items for name in sorted(loss): value = loss[name] if not is_scalar(value): - warnings.warn('ignoring non-scalar output in StatsHandler,' - ' make sure `output_transform(engine.state.output)` returns' - ' a scalar or dictionary of key and scalar pairs to avoid this warning.' - ' {}:{}'.format(name, type(value))) + warnings.warn( + "ignoring non-scalar output in StatsHandler," + " make sure `output_transform(engine.state.output)` returns" + " a scalar or dictionary of key and scalar pairs to avoid this warning." + " {}:{}".format(name, type(value)) + ) continue # not printing multi dimensional output out_str += self.key_var_format.format(name, value.item() if torch.is_tensor(value) else value) else: if is_scalar(loss): # not printing multi dimensional output out_str += self.key_var_format.format(self.tag_name, loss.item() if torch.is_tensor(loss) else loss) else: - warnings.warn('ignoring non-scalar output in StatsHandler,' - ' make sure `output_transform(engine.state.output)` returns' - ' a scalar or a dictionary of key and scalar pairs to avoid this warning.' - ' {}'.format(type(loss))) + warnings.warn( + "ignoring non-scalar output in StatsHandler," + " make sure `output_transform(engine.state.output)` returns" + " a scalar or a dictionary of key and scalar pairs to avoid this warning." + " {}".format(type(loss)) + ) if not out_str: return # no value to print @@ -184,10 +190,6 @@ def _default_iteration_print(self, engine: Engine): current_epoch = engine.state.epoch num_epochs = engine.state.max_epochs - base_str = "Epoch: {}/{}, Iter: {}/{} --".format( - current_epoch, - num_epochs, - current_iteration, - num_iterations) + base_str = "Epoch: {}/{}, Iter: {}/{} --".format(current_epoch, num_epochs, current_iteration, num_iterations) - self.logger.info(' '.join([base_str, out_str])) + self.logger.info(" ".join([base_str, out_str])) diff --git a/monai/handlers/tensorboard_handlers.py b/monai/handlers/tensorboard_handlers.py index 02bdbc533f..7639cf775d 100644 --- a/monai/handlers/tensorboard_handlers.py +++ b/monai/handlers/tensorboard_handlers.py @@ -17,7 +17,7 @@ from monai.visualize import plot_2d_or_3d_image from monai.utils.misc import is_scalar -DEFAULT_TAG = 'Loss' +DEFAULT_TAG = "Loss" class TensorBoardStatsHandler(object): @@ -33,13 +33,15 @@ class TensorBoardStatsHandler(object): ``self.output_transform(engine.state.output)`` to TensorBoard. """ - def __init__(self, - summary_writer=None, - epoch_event_writer=None, - iteration_event_writer=None, - output_transform=lambda x: x, - global_epoch_transform=lambda x: x, - tag_name=DEFAULT_TAG): + def __init__( + self, + summary_writer=None, + epoch_event_writer=None, + iteration_event_writer=None, + output_transform=lambda x: x, + global_epoch_transform=lambda x: x, + tag_name=DEFAULT_TAG, + ): """ Args: summary_writer (SummaryWriter): user can specify TensorBoard SummaryWriter, @@ -133,19 +135,23 @@ def _default_iteration_writer(self, engine: Engine, writer: SummaryWriter): for name in sorted(loss): value = loss[name] if not is_scalar(value): - warnings.warn('ignoring non-scalar output in TensorBoardStatsHandler,' - ' make sure `output_transform(engine.state.output)` returns' - ' a scalar or dictionary of key and scalar pairs to avoid this warning.' - ' {}:{}'.format(name, type(value))) + warnings.warn( + "ignoring non-scalar output in TensorBoardStatsHandler," + " make sure `output_transform(engine.state.output)` returns" + " a scalar or dictionary of key and scalar pairs to avoid this warning." + " {}:{}".format(name, type(value)) + ) continue # not plot multi dimensional output writer.add_scalar(name, value.item() if torch.is_tensor(value) else value, engine.state.iteration) elif is_scalar(loss): # not printing multi dimensional output writer.add_scalar(self.tag_name, loss.item() if torch.is_tensor(loss) else loss, engine.state.iteration) else: - warnings.warn('ignoring non-scalar output in TensorBoardStatsHandler,' - ' make sure `output_transform(engine.state.output)` returns' - ' a scalar or a dictionary of key and scalar pairs to avoid this warning.' - ' {}'.format(type(loss))) + warnings.warn( + "ignoring non-scalar output in TensorBoardStatsHandler," + " make sure `output_transform(engine.state.output)` returns" + " a scalar or a dictionary of key and scalar pairs to avoid this warning." + " {}".format(type(loss)) + ) writer.flush() @@ -170,14 +176,16 @@ class TensorBoardImageHandler(object): """ - def __init__(self, - summary_writer=None, - batch_transform=lambda x: x, - output_transform=lambda x: x, - global_iter_transform=lambda x: x, - index=0, - max_channels=1, - max_frames=64): + def __init__( + self, + summary_writer=None, + batch_transform=lambda x: x, + output_transform=lambda x: x, + global_iter_transform=lambda x: x, + index=0, + max_channels=1, + max_frames=64, + ): """ Args: summary_writer (SummaryWriter): user can specify TensorBoard SummaryWriter, @@ -208,26 +216,29 @@ def __call__(self, engine): show_images = show_images.detach().cpu().numpy() if show_images is not None: if not isinstance(show_images, np.ndarray): - raise ValueError('output_transform(engine.state.output)[0] must be an ndarray or tensor.') - plot_2d_or_3d_image(show_images, step, self._writer, self.index, - self.max_channels, self.max_frames, 'input_0') + raise ValueError("output_transform(engine.state.output)[0] must be an ndarray or tensor.") + plot_2d_or_3d_image( + show_images, step, self._writer, self.index, self.max_channels, self.max_frames, "input_0" + ) show_labels = self.batch_transform(engine.state.batch)[1] if torch.is_tensor(show_labels): show_labels = show_labels.detach().cpu().numpy() if show_labels is not None: if not isinstance(show_labels, np.ndarray): - raise ValueError('batch_transform(engine.state.batch)[1] must be an ndarray or tensor.') - plot_2d_or_3d_image(show_labels, step, self._writer, self.index, - self.max_channels, self.max_frames, 'input_1') + raise ValueError("batch_transform(engine.state.batch)[1] must be an ndarray or tensor.") + plot_2d_or_3d_image( + show_labels, step, self._writer, self.index, self.max_channels, self.max_frames, "input_1" + ) show_outputs = self.output_transform(engine.state.output) if torch.is_tensor(show_outputs): show_outputs = show_outputs.detach().cpu().numpy() if show_outputs is not None: if not isinstance(show_outputs, np.ndarray): - raise ValueError('output_transform(engine.state.output) must be an ndarray or tensor.') - plot_2d_or_3d_image(show_outputs, step, self._writer, self.index, - self.max_channels, self.max_frames, 'output') + raise ValueError("output_transform(engine.state.output) must be an ndarray or tensor.") + plot_2d_or_3d_image( + show_outputs, step, self._writer, self.index, self.max_channels, self.max_frames, "output" + ) self._writer.flush() diff --git a/monai/losses/dice.py b/monai/losses/dice.py index cf2d48eef8..ae59488fd7 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -37,7 +37,7 @@ def __init__( do_sigmoid=False, do_softmax=False, squared_pred=False, - jaccard=False + jaccard=False, ): """ Args: @@ -53,7 +53,7 @@ def __init__( self.include_background = include_background self.to_onehot_y = to_onehot_y if do_sigmoid and do_softmax: - raise ValueError('do_sigmoid=True and do_softmax=True are not compatible.') + raise ValueError("do_sigmoid=True and do_softmax=True are not compatible.") self.do_sigmoid = do_sigmoid self.do_softmax = do_softmax self.squared_pred = squared_pred @@ -71,11 +71,11 @@ def forward(self, pred, ground, smooth=1e-5): n_pred_ch = pred.shape[1] if n_pred_ch == 1: if self.do_softmax: - warnings.warn('single channel prediction, `do_softmax=True` ignored.') + warnings.warn("single channel prediction, `do_softmax=True` ignored.") if self.to_onehot_y: - warnings.warn('single channel prediction, `to_onehot_y=True` ignored.') + warnings.warn("single channel prediction, `to_onehot_y=True` ignored.") if not self.include_background: - warnings.warn('single channel prediction, `include_background=False` ignored.') + warnings.warn("single channel prediction, `include_background=False` ignored.") else: if self.do_softmax: pred = torch.softmax(pred, 1) @@ -85,8 +85,10 @@ def forward(self, pred, ground, smooth=1e-5): # if skipping background, removing first channel ground = ground[:, 1:] pred = pred[:, 1:] - assert ground.shape == pred.shape, ('ground truth one-hot has differing shape (%r) from pred (%r)' % - (ground.shape, pred.shape)) + assert ground.shape == pred.shape, "ground truth one-hot has differing shape (%r) from pred (%r)" % ( + ground.shape, + pred.shape, + ) # reducing only spatial dimensions (not batch nor channels) reduce_axis = list(range(2, len(pred.shape))) @@ -119,14 +121,7 @@ class GeneralizedDiceLoss(_Loss): https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279 """ - def __init__( - self, - include_background=True, - to_onehot_y=False, - do_sigmoid=False, - do_softmax=False, - w_type='square' - ): + def __init__(self, include_background=True, to_onehot_y=False, do_sigmoid=False, do_softmax=False, w_type="square"): """ Args: include_background (bool): If False channel index 0 (background category) is excluded from the calculation. @@ -140,16 +135,16 @@ def __init__( self.include_background = include_background self.to_onehot_y = to_onehot_y if do_sigmoid and do_softmax: - raise ValueError('do_sigmoid=True and do_softmax=True are not compatible.') + raise ValueError("do_sigmoid=True and do_softmax=True are not compatible.") self.do_sigmoid = do_sigmoid self.do_softmax = do_softmax self.w_func = torch.ones_like - if w_type == 'simple': + if w_type == "simple": self.w_func = torch.reciprocal - elif w_type == 'square': + elif w_type == "square": self.w_func = lambda x: torch.reciprocal(x * x) else: - raise ValueError('unknown option for `w_type`: {}'.format(w_type)) + raise ValueError("unknown option for `w_type`: {}".format(w_type)) def forward(self, pred, ground, smooth=1e-5): """ @@ -163,11 +158,11 @@ def forward(self, pred, ground, smooth=1e-5): n_pred_ch = pred.shape[1] if n_pred_ch == 1: if self.do_softmax: - warnings.warn('single channel prediction, `do_softmax=True` ignored.') + warnings.warn("single channel prediction, `do_softmax=True` ignored.") if self.to_onehot_y: - warnings.warn('single channel prediction, `to_onehot_y=True` ignored.') + warnings.warn("single channel prediction, `to_onehot_y=True` ignored.") if not self.include_background: - warnings.warn('single channel prediction, `include_background=False` ignored.') + warnings.warn("single channel prediction, `include_background=False` ignored.") else: if self.do_softmax: pred = torch.softmax(pred, 1) @@ -177,8 +172,10 @@ def forward(self, pred, ground, smooth=1e-5): # if skipping background, removing first channel ground = ground[:, 1:] pred = pred[:, 1:] - assert ground.shape == pred.shape, ('ground truth one-hot has differing shape (%r) from pred (%r)' % - (ground.shape, pred.shape)) + assert ground.shape == pred.shape, "ground truth one-hot has differing shape (%r) from pred (%r)" % ( + ground.shape, + pred.shape, + ) # reducing only spatial dimensions (not batch nor channels) reduce_axis = list(range(2, len(pred.shape))) diff --git a/monai/losses/tversky.py b/monai/losses/tversky.py index 2d441314fc..4ad69d56b0 100644 --- a/monai/losses/tversky.py +++ b/monai/losses/tversky.py @@ -31,13 +31,7 @@ class TverskyLoss(_Loss): """ def __init__( - self, - include_background=True, - to_onehot_y=False, - do_sigmoid=False, - do_softmax=False, - alpha=0.5, - beta=0.5 + self, include_background=True, to_onehot_y=False, do_sigmoid=False, do_softmax=False, alpha=0.5, beta=0.5 ): """ @@ -55,7 +49,7 @@ def __init__( self.to_onehot_y = to_onehot_y if do_sigmoid and do_softmax: - raise ValueError('do_sigmoid=True and do_softmax=True are not compatible.') + raise ValueError("do_sigmoid=True and do_softmax=True are not compatible.") self.do_sigmoid = do_sigmoid self.do_softmax = do_softmax self.alpha = alpha @@ -73,11 +67,11 @@ def forward(self, pred, ground, smooth=1e-5): n_pred_ch = pred.shape[1] if n_pred_ch == 1: if self.do_softmax: - warnings.warn('single channel prediction, `do_softmax=True` ignored.') + warnings.warn("single channel prediction, `do_softmax=True` ignored.") if self.to_onehot_y: - warnings.warn('single channel prediction, `to_onehot_y=True` ignored.') + warnings.warn("single channel prediction, `to_onehot_y=True` ignored.") if not self.include_background: - warnings.warn('single channel prediction, `include_background=False` ignored.') + warnings.warn("single channel prediction, `include_background=False` ignored.") else: if self.do_softmax: pred = torch.softmax(pred, 1) @@ -87,8 +81,10 @@ def forward(self, pred, ground, smooth=1e-5): # if skipping background, removing first channel ground = ground[:, 1:] pred = pred[:, 1:] - assert ground.shape == pred.shape, ('ground truth one-hot has differing shape (%r) from pred (%r)' % - (ground.shape, pred.shape)) + assert ground.shape == pred.shape, "ground truth one-hot has differing shape (%r) from pred (%r)" % ( + ground.shape, + pred.shape, + ) p0 = pred p1 = 1 - p0 @@ -98,13 +94,13 @@ def forward(self, pred, ground, smooth=1e-5): # reducing only spatial dimensions (not batch nor channels) reduce_axis = list(range(2, len(pred.shape))) - tp = torch.sum(p0 * g0 , reduce_axis) - fp = self.alpha * torch.sum(p0 * g1 , reduce_axis) - fn = self.beta * torch.sum(p1 * g0 , reduce_axis) + tp = torch.sum(p0 * g0, reduce_axis) + fp = self.alpha * torch.sum(p0 * g1, reduce_axis) + fn = self.beta * torch.sum(p1 * g0, reduce_axis) numerator = tp + smooth denominator = tp + fp + fn + smooth - score = numerator / denominator + score = numerator / denominator - return 1.0 - score.mean() + return 1.0 - score.mean() diff --git a/monai/metrics/meandice.py b/monai/metrics/meandice.py index 24d7b79ff5..0bc1a62908 100644 --- a/monai/metrics/meandice.py +++ b/monai/metrics/meandice.py @@ -16,13 +16,9 @@ from monai.networks.utils import one_hot -def compute_meandice(y_pred, - y, - include_background=True, - to_onehot_y=False, - mutually_exclusive=False, - add_sigmoid=False, - logit_thresh=0.5): +def compute_meandice( + y_pred, y, include_background=True, to_onehot_y=False, mutually_exclusive=False, add_sigmoid=False, logit_thresh=0.5 +): """Computes Dice score metric from full size Tensor and collects average. Args: @@ -58,11 +54,11 @@ def compute_meandice(y_pred, if n_classes == 1: if mutually_exclusive: - warnings.warn('y_pred has only one class, mutually_exclusive=True ignored.') + warnings.warn("y_pred has only one class, mutually_exclusive=True ignored.") if to_onehot_y: - warnings.warn('y_pred has only one channel, to_onehot_y=True ignored.') + warnings.warn("y_pred has only one channel, to_onehot_y=True ignored.") if not include_background: - warnings.warn('y_pred has only one channel, include_background=False ignored.') + warnings.warn("y_pred has only one channel, include_background=False ignored.") # make both y and y_pred binary y_pred = (y_pred >= logit_thresh).float() y = (y > 0).float() @@ -70,7 +66,7 @@ def compute_meandice(y_pred, # make both y and y_pred binary if mutually_exclusive: if add_sigmoid: - raise ValueError('add_sigmoid=True is incompatible with mutually_exclusive=True.') + raise ValueError("add_sigmoid=True is incompatible with mutually_exclusive=True.") y_pred = torch.argmax(y_pred, dim=1, keepdim=True) y_pred = one_hot(y_pred, n_classes) else: @@ -82,8 +78,10 @@ def compute_meandice(y_pred, y = y[:, 1:] if y.shape[1] > 1 else y y_pred = y_pred[:, 1:] if y_pred.shape[1] > 1 else y_pred - assert y.shape == y_pred.shape, ("Ground truth one-hot has differing shape (%r) from source (%r)" % - (y.shape, y_pred.shape)) + assert y.shape == y_pred.shape, "Ground truth one-hot has differing shape (%r) from source (%r)" % ( + y.shape, + y_pred.shape, + ) # reducing only spatial dimensions (not batch nor channels) reduce_axis = list(range(2, n_len)) @@ -93,5 +91,5 @@ def compute_meandice(y_pred, y_pred_o = torch.sum(y_pred, reduce_axis) denominator = y_o + y_pred_o - f = torch.where(y_o > 0, (2.0 * intersection) / denominator, torch.tensor(float('nan')).to(y_o.float())) + f = torch.where(y_o > 0, (2.0 * intersection) / denominator, torch.tensor(float("nan")).to(y_o.float())) return f # returns array of Dice shape: [Batch, n_classes] diff --git a/monai/metrics/rocauc.py b/monai/metrics/rocauc.py index a804a7b372..a5d7312587 100644 --- a/monai/metrics/rocauc.py +++ b/monai/metrics/rocauc.py @@ -16,10 +16,12 @@ def _calculate(y, y_pred): - assert y.ndimension() == y_pred.ndimension() == 1 and len(y) == len(y_pred), \ - 'y and y_pred must be 1 dimension data with same length.' - assert y.unique().equal(torch.tensor([0, 1], dtype=y.dtype, device=y.device)), \ - 'y values must be 0 or 1, can not be all 0 or all 1.' + assert y.ndimension() == y_pred.ndimension() == 1 and len(y) == len( + y_pred + ), "y and y_pred must be 1 dimension data with same length." + assert y.unique().equal( + torch.tensor([0, 1], dtype=y.dtype, device=y.device) + ), "y values must be 0 or 1, can not be all 0 or all 1." n = len(y) indexes = y_pred.argsort() y = y[indexes].cpu().numpy() @@ -46,7 +48,7 @@ def _calculate(y, y_pred): return auc / (nneg * (n - nneg)) -def compute_roc_auc(y_pred, y, to_onehot_y=False, add_softmax=False, average='macro'): +def compute_roc_auc(y_pred, y, to_onehot_y=False, add_softmax=False, average="macro"): """Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC). Referring to: `sklearn.metrics.roc_auc_score `_. @@ -88,9 +90,9 @@ def compute_roc_auc(y_pred, y, to_onehot_y=False, add_softmax=False, average='ma if y_pred_ndim == 1: if to_onehot_y: - warnings.warn('y_pred has only one channel, to_onehot_y=True ignored.') + warnings.warn("y_pred has only one channel, to_onehot_y=True ignored.") if add_softmax: - warnings.warn('y_pred has only one channel, add_softmax=True ignored.') + warnings.warn("y_pred has only one channel, add_softmax=True ignored.") return _calculate(y, y_pred) else: n_classes = y_pred.shape[1] @@ -99,18 +101,18 @@ def compute_roc_auc(y_pred, y, to_onehot_y=False, add_softmax=False, average='ma if add_softmax: y_pred = y_pred.float().softmax(dim=1) - assert y.shape == y_pred.shape, 'data shapes of y_pred and y do not match.' + assert y.shape == y_pred.shape, "data shapes of y_pred and y do not match." - if average == 'micro': + if average == "micro": return _calculate(y.flatten(), y_pred.flatten()) else: y, y_pred = y.transpose(0, 1), y_pred.transpose(0, 1) auc_values = [_calculate(y_, y_pred_) for y_, y_pred_ in zip(y, y_pred)] if average is None: return auc_values - if average == 'macro': + if average == "macro": return np.mean(auc_values) - if average == 'weighted': + if average == "weighted": weights = [sum(y_) for y_ in y] return np.average(auc_values, weights=weights) - raise ValueError('unsupported average method.') + raise ValueError("unsupported average method.") diff --git a/monai/networks/blocks/convolutions.py b/monai/networks/blocks/convolutions.py index 3c618636d0..4de3071baa 100644 --- a/monai/networks/blocks/convolutions.py +++ b/monai/networks/blocks/convolutions.py @@ -21,8 +21,21 @@ class Convolution(nn.Sequential): Constructs a convolution with optional dropout, normalization, and activation layers. """ - def __init__(self, dimensions, in_channels, out_channels, strides=1, kernel_size=3, act=Act.PRELU, - norm=Norm.INSTANCE, dropout=None, dilation=1, bias=True, conv_only=False, is_transposed=False): + def __init__( + self, + dimensions, + in_channels, + out_channels, + strides=1, + kernel_size=3, + act=Act.PRELU, + norm=Norm.INSTANCE, + dropout=None, + dilation=1, + bias=True, + conv_only=False, + is_transposed=False, + ): super().__init__() self.dimensions = dimensions self.in_channels = in_channels @@ -66,8 +79,21 @@ def __init__(self, dimensions, in_channels, out_channels, strides=1, kernel_size class ResidualUnit(nn.Module): - def __init__(self, dimensions, in_channels, out_channels, strides=1, kernel_size=3, subunits=2, - act=Act.PRELU, norm=Norm.INSTANCE, dropout=None, dilation=1, bias=True, last_conv_only=False): + def __init__( + self, + dimensions, + in_channels, + out_channels, + strides=1, + kernel_size=3, + subunits=2, + act=Act.PRELU, + norm=Norm.INSTANCE, + dropout=None, + dilation=1, + bias=True, + last_conv_only=False, + ): super().__init__() self.dimensions = dimensions self.in_channels = in_channels @@ -82,13 +108,24 @@ def __init__(self, dimensions, in_channels, out_channels, strides=1, kernel_size for su in range(subunits): conv_only = last_conv_only and su == (subunits - 1) - unit = Convolution(dimensions, schannels, out_channels, sstrides, - kernel_size, act, norm, dropout, dilation, bias, conv_only) + unit = Convolution( + dimensions, + schannels, + out_channels, + sstrides, + kernel_size, + act, + norm, + dropout, + dilation, + bias, + conv_only, + ) self.conv.add_module("unit%i" % su, unit) # after first loop set channels and strides to what they should be for subsequent units - schannels = out_channels + schannels = out_channels sstrides = 1 # apply convolution to input to change number of output channels and size to match that coming from self.conv diff --git a/monai/networks/layers/convutils.py b/monai/networks/layers/convutils.py index 96781448f7..6cb3d3286a 100644 --- a/monai/networks/layers/convutils.py +++ b/monai/networks/layers/convutils.py @@ -37,7 +37,7 @@ def calculate_out_shape(in_shape, kernel_size, stride, padding): return tuple(out_shape) if len(out_shape) > 1 else out_shape[0] -def gaussian_1d(sigma, truncated=4.): +def gaussian_1d(sigma, truncated=4.0): """ one dimensional gaussian kernel. @@ -49,11 +49,11 @@ def gaussian_1d(sigma, truncated=4.): 1D numpy array """ if sigma <= 0: - raise ValueError('sigma must be positive') + raise ValueError("sigma must be positive") - tail = int(sigma * truncated + .5) + tail = int(sigma * truncated + 0.5) sigma2 = sigma * sigma x = np.arange(-tail, tail + 1) - out = np.exp(-.5 / sigma2 * x ** 2) + out = np.exp(-0.5 / sigma2 * x ** 2) out /= out.sum() return out diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index c41ff93a0f..f9bb21bbc2 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -36,8 +36,7 @@ def forward(self, x): class GaussianFilter: - - def __init__(self, spatial_dims, sigma, truncated=4., device=None): + def __init__(self, spatial_dims, sigma, truncated=4.0, device=None): """ Args: spatial_dims (int): number of spatial dimensions of the input image. diff --git a/monai/networks/nets/densenet.py b/monai/networks/nets/densenet.py index 4f34ae7a37..6f2f54fe5e 100644 --- a/monai/networks/nets/densenet.py +++ b/monai/networks/nets/densenet.py @@ -38,25 +38,24 @@ def densenet264(**kwargs): class _DenseLayer(nn.Sequential): - def __init__(self, spatial_dims, in_channels, growth_rate, bn_size, dropout_prob): super(_DenseLayer, self).__init__() out_channels = bn_size * growth_rate - conv_type = Conv[Conv.CONV, spatial_dims] + conv_type = Conv[Conv.CONV, spatial_dims] norm_type = Norm[Norm.BATCH, spatial_dims] dropout_type = Dropout[Dropout.DROPOUT, spatial_dims] - self.add_module('norm1', norm_type(in_channels)) - self.add_module('relu1', nn.ReLU(inplace=True)) - self.add_module('conv1', conv_type(in_channels, out_channels, kernel_size=1, bias=False)) + self.add_module("norm1", norm_type(in_channels)) + self.add_module("relu1", nn.ReLU(inplace=True)) + self.add_module("conv1", conv_type(in_channels, out_channels, kernel_size=1, bias=False)) - self.add_module('norm2', norm_type(out_channels)) - self.add_module('relu2', nn.ReLU(inplace=True)) - self.add_module('conv2', conv_type(out_channels, growth_rate, kernel_size=3, padding=1, bias=False)) + self.add_module("norm2", norm_type(out_channels)) + self.add_module("relu2", nn.ReLU(inplace=True)) + self.add_module("conv2", conv_type(out_channels, growth_rate, kernel_size=3, padding=1, bias=False)) if dropout_prob > 0: - self.add_module('dropout', dropout_type(dropout_prob)) + self.add_module("dropout", dropout_type(dropout_prob)) def forward(self, x): new_features = super(_DenseLayer, self).forward(x) @@ -64,28 +63,26 @@ def forward(self, x): class _DenseBlock(nn.Sequential): - def __init__(self, spatial_dims, layers, in_channels, bn_size, growth_rate, dropout_prob): super(_DenseBlock, self).__init__() for i in range(layers): layer = _DenseLayer(spatial_dims, in_channels, growth_rate, bn_size, dropout_prob) in_channels += growth_rate - self.add_module('denselayer%d' % (i + 1), layer) + self.add_module("denselayer%d" % (i + 1), layer) class _Transition(nn.Sequential): - def __init__(self, spatial_dims, in_channels, out_channels): super(_Transition, self).__init__() - conv_type = Conv[Conv.CONV, spatial_dims] + conv_type = Conv[Conv.CONV, spatial_dims] norm_type = Norm[Norm.BATCH, spatial_dims] pool_type = Pool[Pool.AVG, spatial_dims] - self.add_module('norm', norm_type(in_channels)) - self.add_module('relu', nn.ReLU(inplace=True)) - self.add_module('conv', conv_type(in_channels, out_channels, kernel_size=1, bias=False)) - self.add_module('pool', pool_type(kernel_size=2, stride=2)) + self.add_module("norm", norm_type(in_channels)) + self.add_module("relu", nn.ReLU(inplace=True)) + self.add_module("conv", conv_type(in_channels, out_channels, kernel_size=1, bias=False)) + self.add_module("pool", pool_type(kernel_size=2, stride=2)) class DenseNet(nn.Module): @@ -106,57 +103,67 @@ class DenseNet(nn.Module): dropout_prob (Float): dropout rate after each dense layer. """ - def __init__(self, - spatial_dims, - in_channels, - out_channels, - init_features=64, - growth_rate=32, - block_config=(6, 12, 24, 16), - bn_size=4, - dropout_prob=0): + def __init__( + self, + spatial_dims, + in_channels, + out_channels, + init_features=64, + growth_rate=32, + block_config=(6, 12, 24, 16), + bn_size=4, + dropout_prob=0, + ): super(DenseNet, self).__init__() - conv_type = Conv[Conv.CONV, spatial_dims] + conv_type = Conv[Conv.CONV, spatial_dims] norm_type = Norm[Norm.BATCH, spatial_dims] pool_type = Pool[Pool.MAX, spatial_dims] avg_pool_type = Pool[Pool.ADAPTIVEAVG, spatial_dims] self.features = nn.Sequential( - OrderedDict([ - ('conv0', conv_type(in_channels, init_features, kernel_size=7, stride=2, padding=3, bias=False)), - ('norm0', norm_type(init_features)), - ('relu0', nn.ReLU(inplace=True)), - ('pool0', pool_type(kernel_size=3, stride=2, padding=1)), - ])) + OrderedDict( + [ + ("conv0", conv_type(in_channels, init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ("norm0", norm_type(init_features)), + ("relu0", nn.ReLU(inplace=True)), + ("pool0", pool_type(kernel_size=3, stride=2, padding=1)), + ] + ) + ) in_channels = init_features for i, num_layers in enumerate(block_config): - block = _DenseBlock(spatial_dims=spatial_dims, - layers=num_layers, - in_channels=in_channels, - bn_size=bn_size, - growth_rate=growth_rate, - dropout_prob=dropout_prob) - self.features.add_module('denseblock%d' % (i + 1), block) + block = _DenseBlock( + spatial_dims=spatial_dims, + layers=num_layers, + in_channels=in_channels, + bn_size=bn_size, + growth_rate=growth_rate, + dropout_prob=dropout_prob, + ) + self.features.add_module("denseblock%d" % (i + 1), block) in_channels += num_layers * growth_rate if i == len(block_config) - 1: - self.features.add_module('norm5', norm_type(in_channels)) + self.features.add_module("norm5", norm_type(in_channels)) else: _out_channels = in_channels // 2 trans = _Transition(spatial_dims, in_channels=in_channels, out_channels=_out_channels) - self.features.add_module('transition%d' % (i + 1), trans) + self.features.add_module("transition%d" % (i + 1), trans) in_channels = _out_channels # pooling and classification self.class_layers = nn.Sequential( - OrderedDict([ - ('relu', nn.ReLU(inplace=True)), - ('norm', avg_pool_type(1)), - ('flatten', nn.Flatten(1)), - ('class', nn.Linear(in_channels, out_channels)), - ])) + OrderedDict( + [ + ("relu", nn.ReLU(inplace=True)), + ("norm", avg_pool_type(1)), + ("flatten", nn.Flatten(1)), + ("class", nn.Linear(in_channels, out_channels)), + ] + ) + ) for m in self.modules(): if isinstance(m, conv_type): diff --git a/monai/networks/nets/highresnet.py b/monai/networks/nets/highresnet.py index 54482c8f3f..559b859c71 100644 --- a/monai/networks/nets/highresnet.py +++ b/monai/networks/nets/highresnet.py @@ -16,39 +16,33 @@ from monai.networks.layers.factories import Conv, Dropout, Norm SUPPORTED_NORM = { - 'batch': lambda spatial_dims: Norm[Norm.BATCH, spatial_dims], - 'instance': lambda spatial_dims: Norm[Norm.INSTANCE, spatial_dims], + "batch": lambda spatial_dims: Norm[Norm.BATCH, spatial_dims], + "instance": lambda spatial_dims: Norm[Norm.INSTANCE, spatial_dims], } -SUPPORTED_ACTI = {'relu': nn.ReLU, 'prelu': nn.PReLU, 'relu6': nn.ReLU6} +SUPPORTED_ACTI = {"relu": nn.ReLU, "prelu": nn.PReLU, "relu6": nn.ReLU6} DEFAULT_LAYER_PARAMS_3D = ( # initial conv layer - {'name': 'conv_0', 'n_features': 16, 'kernel_size': 3}, + {"name": "conv_0", "n_features": 16, "kernel_size": 3}, # residual blocks - {'name': 'res_1', 'n_features': 16, 'kernels': (3, 3), 'repeat': 3}, - {'name': 'res_2', 'n_features': 32, 'kernels': (3, 3), 'repeat': 3}, - {'name': 'res_3', 'n_features': 64, 'kernels': (3, 3), 'repeat': 3}, + {"name": "res_1", "n_features": 16, "kernels": (3, 3), "repeat": 3}, + {"name": "res_2", "n_features": 32, "kernels": (3, 3), "repeat": 3}, + {"name": "res_3", "n_features": 64, "kernels": (3, 3), "repeat": 3}, # final conv layers - {'name': 'conv_1', 'n_features': 80, 'kernel_size': 1}, - {'name': 'conv_2', 'kernel_size': 1}, + {"name": "conv_1", "n_features": 80, "kernel_size": 1}, + {"name": "conv_2", "kernel_size": 1}, ) class ConvNormActi(nn.Module): - - def __init__(self, - spatial_dims, - in_channels, - out_channels, - kernel_size, - norm_type=None, - acti_type=None, - dropout_prob=None): + def __init__( + self, spatial_dims, in_channels, out_channels, kernel_size, norm_type=None, acti_type=None, dropout_prob=None + ): super(ConvNormActi, self).__init__() layers = nn.ModuleList() - conv_type = Conv[Conv.CONV, spatial_dims] + conv_type = Conv[Conv.CONV, spatial_dims] padding_size = same_padding(kernel_size) conv = conv_type(in_channels, out_channels, kernel_size, padding=padding_size) layers.append(conv) @@ -67,16 +61,17 @@ def forward(self, x): class HighResBlock(nn.Module): - - def __init__(self, - spatial_dims, - in_channels, - out_channels, - kernels=(3, 3), - dilation=1, - norm_type='instance', - acti_type='relu', - channel_matching='pad'): + def __init__( + self, + spatial_dims, + in_channels, + out_channels, + kernels=(3, 3), + dilation=1, + norm_type="instance", + acti_type="relu", + channel_matching="pad", + ): """ Args: kernels (list of int): each integer k in `kernels` corresponds to a convolution layer with kernel size k. @@ -88,13 +83,13 @@ def __init__(self, self.project, self.pad = None, None if in_channels != out_channels: - if channel_matching not in ('pad', 'project'): - raise ValueError('channel matching must be pad or project, got {}.'.format(channel_matching)) - if channel_matching == 'project': + if channel_matching not in ("pad", "project"): + raise ValueError("channel matching must be pad or project, got {}.".format(channel_matching)) + if channel_matching == "project": self.project = conv_type(in_channels, out_channels, kernel_size=1) - if channel_matching == 'pad': + if channel_matching == "pad": if in_channels > out_channels: - raise ValueError('in_channels > out_channels is incompatible with `channel_matching=pad`.') + raise ValueError("in_channels > out_channels is incompatible with `channel_matching=pad`.") pad_1 = (out_channels - in_channels) // 2 pad_2 = out_channels - in_channels - pad_1 pad = [0, 0] * spatial_dims + [pad_1, pad_2] + [0, 0] @@ -106,11 +101,10 @@ def __init__(self, layers.append(SUPPORTED_NORM[norm_type](spatial_dims)(_in_chns)) layers.append(SUPPORTED_ACTI[acti_type](inplace=True)) layers.append( - conv_type(_in_chns, - _out_chns, - kernel_size, - padding=same_padding(kernel_size, dilation), - dilation=dilation)) + conv_type( + _in_chns, _out_chns, kernel_size, padding=same_padding(kernel_size, dilation), dilation=dilation + ) + ) _in_chns = _out_chns self.layers = nn.Sequential(*layers) @@ -144,67 +138,81 @@ class HighResNet(nn.Module): layer_params (a list of dictionaries): specifying key parameters of each layer/block. """ - def __init__(self, - spatial_dims=3, - in_channels=1, - out_channels=1, - norm_type='batch', - acti_type='relu', - dropout_prob=None, - layer_params=DEFAULT_LAYER_PARAMS_3D): + def __init__( + self, + spatial_dims=3, + in_channels=1, + out_channels=1, + norm_type="batch", + acti_type="relu", + dropout_prob=None, + layer_params=DEFAULT_LAYER_PARAMS_3D, + ): super(HighResNet, self).__init__() blocks = nn.ModuleList() # intial conv layer params = layer_params[0] - _in_chns, _out_chns = in_channels, params['n_features'] + _in_chns, _out_chns = in_channels, params["n_features"] blocks.append( - ConvNormActi(spatial_dims, - _in_chns, - _out_chns, - kernel_size=params['kernel_size'], - norm_type=norm_type, - acti_type=acti_type, - dropout_prob=None)) + ConvNormActi( + spatial_dims, + _in_chns, + _out_chns, + kernel_size=params["kernel_size"], + norm_type=norm_type, + acti_type=acti_type, + dropout_prob=None, + ) + ) # residual blocks for (idx, params) in enumerate(layer_params[1:-2]): # res blocks except the 1st and last two conv layers. - _in_chns, _out_chns = _out_chns, params['n_features'] - _dilation = 2**idx - for _ in range(params['repeat']): + _in_chns, _out_chns = _out_chns, params["n_features"] + _dilation = 2 ** idx + for _ in range(params["repeat"]): blocks.append( - HighResBlock(spatial_dims, - _in_chns, - _out_chns, - params['kernels'], - dilation=_dilation, - norm_type=norm_type, - acti_type=acti_type)) + HighResBlock( + spatial_dims, + _in_chns, + _out_chns, + params["kernels"], + dilation=_dilation, + norm_type=norm_type, + acti_type=acti_type, + ) + ) _in_chns = _out_chns # final conv layers params = layer_params[-2] - _in_chns, _out_chns = _out_chns, params['n_features'] + _in_chns, _out_chns = _out_chns, params["n_features"] blocks.append( - ConvNormActi(spatial_dims, - _in_chns, - _out_chns, - kernel_size=params['kernel_size'], - norm_type=norm_type, - acti_type=acti_type, - dropout_prob=dropout_prob)) + ConvNormActi( + spatial_dims, + _in_chns, + _out_chns, + kernel_size=params["kernel_size"], + norm_type=norm_type, + acti_type=acti_type, + dropout_prob=dropout_prob, + ) + ) params = layer_params[-1] _in_chns = _out_chns blocks.append( - ConvNormActi(spatial_dims, - _in_chns, - out_channels, - kernel_size=params['kernel_size'], - norm_type=norm_type, - acti_type=None, - dropout_prob=None)) + ConvNormActi( + spatial_dims, + _in_chns, + out_channels, + kernel_size=params["kernel_size"], + norm_type=norm_type, + acti_type=None, + dropout_prob=None, + ) + ) self.blocks = nn.Sequential(*blocks) diff --git a/monai/networks/nets/unet.py b/monai/networks/nets/unet.py index 7d0aa27d96..dd84b35940 100644 --- a/monai/networks/nets/unet.py +++ b/monai/networks/nets/unet.py @@ -21,9 +21,20 @@ @export("monai.networks.nets") @alias("Unet") class UNet(nn.Module): - - def __init__(self, dimensions, in_channels, out_channels, channels, strides, kernel_size=3, up_kernel_size=3, - num_res_units=0, act=Act.PRELU, norm=Norm.INSTANCE, dropout=0): + def __init__( + self, + dimensions, + in_channels, + out_channels, + channels, + strides, + kernel_size=3, + up_kernel_size=3, + num_res_units=0, + act=Act.PRELU, + norm=Norm.INSTANCE, + dropout=0, + ): super().__init__() assert len(channels) == (len(strides) + 1) self.dimensions = dimensions @@ -63,22 +74,52 @@ def _create_block(inc, outc, channels, strides, is_top): def _get_down_layer(self, in_channels, out_channels, strides, is_top): if self.num_res_units > 0: - return ResidualUnit(self.dimensions, in_channels, out_channels, strides, self.kernel_size, self.num_res_units, - self.act, self.norm, self.dropout) + return ResidualUnit( + self.dimensions, + in_channels, + out_channels, + strides, + self.kernel_size, + self.num_res_units, + self.act, + self.norm, + self.dropout, + ) else: - return Convolution(self.dimensions, in_channels, out_channels, strides, self.kernel_size, self.act, self.norm, - self.dropout) + return Convolution( + self.dimensions, in_channels, out_channels, strides, self.kernel_size, self.act, self.norm, self.dropout + ) def _get_bottom_layer(self, in_channels, out_channels): return self._get_down_layer(in_channels, out_channels, 1, False) def _get_up_layer(self, in_channels, out_channels, strides, is_top): - conv = Convolution(self.dimensions, in_channels, out_channels, strides, self.up_kernel_size, self.act, self.norm, - self.dropout, conv_only=is_top and self.num_res_units == 0, is_transposed=True) + conv = Convolution( + self.dimensions, + in_channels, + out_channels, + strides, + self.up_kernel_size, + self.act, + self.norm, + self.dropout, + conv_only=is_top and self.num_res_units == 0, + is_transposed=True, + ) if self.num_res_units > 0: - ru = ResidualUnit(self.dimensions, out_channels, out_channels, 1, self.kernel_size, 1, self.act, self.norm, - self.dropout, last_conv_only=is_top) + ru = ResidualUnit( + self.dimensions, + out_channels, + out_channels, + 1, + self.kernel_size, + 1, + self.act, + self.norm, + self.dropout, + last_conv_only=is_top, + ) return nn.Sequential(conv, ru) else: return conv diff --git a/monai/networks/utils.py b/monai/networks/utils.py index 69fdb392e5..6b908f472c 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -29,7 +29,7 @@ def one_hot(labels, num_classes): """ num_dims = labels.dim() if num_dims > 1: - assert labels.shape[1] == 1, 'labels should have a channel with length equals to one.' + assert labels.shape[1] == 1, "labels should have a channel with length equals to one." labels = torch.squeeze(labels, 1) labels = f.one_hot(labels.long(), num_classes) new_axes = [0, -1] + list(range(1, num_dims - 1)) @@ -62,6 +62,6 @@ def predict_segmentation(logits, mutually_exclusive=False, threshold=0): return (logits >= threshold).int() else: if logits.shape[1] == 1: - warnings.warn('single channel prediction, `mutually_exclusive=True` ignored, use threshold instead.') + warnings.warn("single channel prediction, `mutually_exclusive=True` ignored, use threshold instead.") return (logits >= threshold).int() return logits.argmax(1, keepdim=True) diff --git a/monai/transforms/adaptors.py b/monai/transforms/adaptors.py index 183085e5db..813ad030b6 100644 --- a/monai/transforms/adaptors.py +++ b/monai/transforms/adaptors.py @@ -99,21 +99,16 @@ def __call__(img): import monai -@monai.utils.export('monai.transforms') +@monai.utils.export("monai.transforms") def adaptor(function, outputs, inputs=None): - def must_be_types_or_none(variable_name, variable, types): if variable is not None: if not isinstance(variable, types): - raise ValueError( - "'{}' must be None or {} but is {}".format( - variable_name, types, type(variable))) + raise ValueError("'{}' must be None or {} but is {}".format(variable_name, types, type(variable))) def must_be_types(variable_name, variable, types): if not isinstance(variable, types): - raise ValueError( - "'{}' must be one of {} but is {}".format( - variable_name, types, type(variable))) + raise ValueError("'{}' must be one of {} but is {}".format(variable_name, types, type(variable))) def map_names(ditems, input_map): return {input_map(k, k): v for k, v in ditems.items()} @@ -126,7 +121,7 @@ def _inner(ditems): sig = FunctionSignature(function) if sig.found_kwargs: - must_be_types_or_none('inputs', inputs, (dict,)) + must_be_types_or_none("inputs", inputs, (dict,)) # we just forward all arguments unless we have been provided an input map if inputs is None: dinputs = dict(ditems) @@ -138,7 +133,7 @@ def _inner(ditems): # no **kwargs # select only items from the method signature dinputs = dict((k, v) for k, v in ditems.items() if k in sig.non_var_parameters) - must_be_types_or_none('inputs', inputs, (str, list, tuple, dict)) + must_be_types_or_none("inputs", inputs, (str, list, tuple, dict)) if inputs is None: pass elif isinstance(inputs, str): @@ -156,14 +151,14 @@ def _inner(ditems): # now the mapping back to the output dictionary depends on outputs and what was returned from the function op = outputs if isinstance(ret, dict): - must_be_types_or_none('outputs', op, (dict,)) + must_be_types_or_none("outputs", op, (dict,)) if op is not None: ret = {v: ret[k] for k, v in op.items()} elif isinstance(ret, (list, tuple)): if len(ret) == 1: - must_be_types('outputs', op, (str, list, tuple)) + must_be_types("outputs", op, (str, list, tuple)) else: - must_be_types('outputs', op, (list, tuple)) + must_be_types("outputs", op, (list, tuple)) if isinstance(op, str): op = [op] @@ -173,7 +168,7 @@ def _inner(ditems): ret = dict((k, v) for k, v in zip(op, ret)) else: - must_be_types('outputs', op, (str, list, tuple)) + must_be_types("outputs", op, (str, list, tuple)) if isinstance(op, (list, tuple)): if len(op) != 1: raise ValueError("'outputs' must be of length one if it is a list or tuple") @@ -189,9 +184,8 @@ def _inner(ditems): return _inner -@monai.utils.export('monai.transforms') +@monai.utils.export("monai.transforms") def apply_alias(fn, name_map): - def _inner(data): # map names @@ -211,7 +205,7 @@ def _inner(data): return _inner -@monai.utils.export('monai.transforms') +@monai.utils.export("monai.transforms") def to_kwargs(fn): def _inner(data): return fn(**data) @@ -222,6 +216,7 @@ def _inner(data): class FunctionSignature: def __init__(self, function): import inspect + sfn = inspect.signature(function) self.found_args = False self.found_kwargs = False diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py index 206202cf4d..d0b644451f 100644 --- a/monai/transforms/compose.py +++ b/monai/transforms/compose.py @@ -61,6 +61,7 @@ class Randomizable(ABC): An interface for handling local numpy random state. this is mainly for randomized data augmentation transforms. """ + R = np.random.RandomState() def set_random_state(self, seed=None, state=None): @@ -83,7 +84,7 @@ def set_random_state(self, seed=None, state=None): if state is not None: if not isinstance(state, np.random.RandomState): - raise ValueError('`state` must be a `np.random.RandomState`, got {}'.format(type(state))) + raise ValueError("`state` must be a `np.random.RandomState`, got {}".format(type(state))) self.R = state return self @@ -183,11 +184,8 @@ def randomize(self): _transform.randomize() except TypeError as type_error: warnings.warn( - 'Transform "{0}" in Compose not randomized\n{0}.{1}.'.format( - type(_transform).__name__, - type_error - ), - RuntimeWarning + 'Transform "{0}" in Compose not randomized\n{0}.{1}.'.format(type(_transform).__name__, type_error), + RuntimeWarning, ) def __call__(self, input_): @@ -220,10 +218,10 @@ def __call__(self, data): def __init__(self, keys): self.keys = ensure_tuple(keys) if not self.keys: - raise ValueError('keys unspecified') + raise ValueError("keys unspecified") for key in self.keys: if not isinstance(key, Hashable): - raise ValueError('keys should be a hashable or a sequence of hashables, got {}'.format(type(key))) + raise ValueError("keys should be a hashable or a sequence of hashables, got {}".format(type(key))) @abstractmethod def __call__(self, data): diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index caa7de8c80..527d120dd3 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -34,16 +34,16 @@ class SpatialPad(Transform): for more details, please check: https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html """ - def __init__(self, spatial_size, method='symmetric', mode='constant'): - assert isinstance(spatial_size, (list, tuple)), 'spatial_out_size must be list or tuple.' + def __init__(self, spatial_size, method="symmetric", mode="constant"): + assert isinstance(spatial_size, (list, tuple)), "spatial_out_size must be list or tuple." self.spatial_size = spatial_size - assert method in ('symmetric', 'end'), 'unsupported padding type.' + assert method in ("symmetric", "end"), "unsupported padding type." self.method = method - assert isinstance(mode, str), 'mode must be str.' + assert isinstance(mode, str), "mode must be str." self.mode = mode def _determine_data_pad_width(self, data_shape): - if self.method == 'symmetric': + if self.method == "symmetric": pad_width = list() for i in range(len(self.spatial_size)): width = max(self.spatial_size[i] - data_shape[i], 0) @@ -82,19 +82,19 @@ def __init__(self, roi_center=None, roi_size=None, roi_start=None, roi_end=None) self.roi_start = np.subtract(roi_center, np.floor_divide(roi_size, 2)) self.roi_end = np.add(self.roi_start, roi_size) else: - assert roi_start is not None and roi_end is not None, 'roi_start and roi_end must be provided.' + assert roi_start is not None and roi_end is not None, "roi_start and roi_end must be provided." self.roi_start = np.asarray(roi_start, dtype=np.uint16) self.roi_end = np.asarray(roi_end, dtype=np.uint16) - assert np.all(self.roi_start >= 0), 'all elements of roi_start must be greater than or equal to 0.' - assert np.all(self.roi_end > 0), 'all elements of roi_end must be positive.' - assert np.all(self.roi_end >= self.roi_start), 'invalid roi range.' + assert np.all(self.roi_start >= 0), "all elements of roi_start must be greater than or equal to 0." + assert np.all(self.roi_end > 0), "all elements of roi_end must be positive." + assert np.all(self.roi_end >= self.roi_start), "invalid roi range." def __call__(self, img): max_end = img.shape[1:] sd = min(len(self.roi_start), len(max_end)) - assert np.all(max_end[:sd] >= self.roi_start[:sd]), 'roi start out of image space.' - assert np.all(max_end[:sd] >= self.roi_end[:sd]), 'roi end out of image space.' + assert np.all(max_end[:sd] >= self.roi_start[:sd]), "roi start out of image space." + assert np.all(max_end[:sd] >= self.roi_end[:sd]), "roi end out of image space." slices = [slice(None)] + [slice(s, e) for s, e in zip(self.roi_start[:sd], self.roi_end[:sd])] return img[tuple(slices)] diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 06ed74d70b..549d82daef 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -28,7 +28,7 @@ class SpatialPadd(MapTransform): Performs padding to the data, symmetric for all sides or all on one side for each dimension. """ - def __init__(self, keys, spatial_size, method='symmetric', mode='constant'): + def __init__(self, keys, spatial_size, method="symmetric", mode="constant"): """ Args: keys (hashable items): keys of the corresponding items to be transformed. @@ -172,8 +172,9 @@ def __init__(self, keys, source_key, select_fn=lambda x: x > 0, channel_indexes= def __call__(self, data): d = dict(data) - box_start, box_end = \ - generate_spatial_bounding_box(data[self.source_key], self.select_fn, self.channel_indexes, self.margin) + box_start, box_end = generate_spatial_bounding_box( + data[self.source_key], self.select_fn, self.channel_indexes, self.margin + ) cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) for key in self.keys: d[key] = cropper(d[key]) @@ -203,14 +204,15 @@ class RandCropByPosNegLabeld(Randomizable, MapTransform): def __init__(self, keys, label_key, size, pos=1, neg=1, num_samples=1, image_key=None, image_threshold=0): super().__init__(keys) - assert isinstance(label_key, str), 'label_key must be a string.' - assert isinstance(size, (list, tuple)), 'size must be list or tuple.' - assert all(isinstance(x, int) and x > 0 for x in size), 'all elements of size must be positive integers.' + assert isinstance(label_key, str), "label_key must be a string." + assert isinstance(size, (list, tuple)), "size must be list or tuple." + assert all(isinstance(x, int) and x > 0 for x in size), "all elements of size must be positive integers." assert float(pos) >= 0 and float(neg) >= 0, "pos and neg must be greater than or equal to 0." assert float(pos) + float(neg) > 0, "pos and neg cannot both be 0." - assert isinstance(num_samples, int), \ - "invalid samples number: {}. num_samples must be an integer.".format(num_samples) - assert num_samples >= 0, 'num_samples must be greater than or equal to 0.' + assert isinstance(num_samples, int), "invalid samples number: {}. num_samples must be an integer.".format( + num_samples + ) + assert num_samples >= 0, "num_samples must be greater than or equal to 0." self.label_key = label_key self.size = size self.pos_ratio = float(pos) / (float(pos) + float(neg)) @@ -220,8 +222,9 @@ def __init__(self, keys, label_key, size, pos=1, neg=1, num_samples=1, image_key self.centers = None def randomize(self, label, image): - self.centers = generate_pos_neg_label_crop_centers(label, self.size, self.num_samples, self.pos_ratio, - image, self.image_threshold, self.R) + self.centers = generate_pos_neg_label_crop_centers( + label, self.size, self.num_samples, self.pos_ratio, image, self.image_threshold, self.R + ) def __call__(self, data): d = dict(data) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index a856d663e4..be24505ac3 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -70,7 +70,7 @@ def __init__(self, offsets, prob=0.1): prob (float): probability of shift. """ self.offsets = (-offsets, offsets) if not isinstance(offsets, (list, tuple)) else offsets - assert len(self.offsets) == 2, 'offsets should be a number or pair of numbers.' + assert len(self.offsets) == 2, "offsets should be a number or pair of numbers." self.prob = prob self._do_transform = False @@ -127,7 +127,7 @@ def __init__(self, factors, prob=0.1, dtype=np.float32): dtype (np.dtype): expected output data type. """ self.factors = (-factors, factors) if not isinstance(factors, (list, tuple)) else factors - assert len(self.factors) == 2, 'factors should be a number or pair of numbers.' + assert len(self.factors) == 2, "factors should be a number or pair of numbers." self.prob = prob self.dtype = dtype self._do_transform = False @@ -161,8 +161,9 @@ class NormalizeIntensity(Transform): def __init__(self, subtrahend=None, divisor=None, nonzero=False, channel_wise=False): if subtrahend is not None or divisor is not None: - assert isinstance(subtrahend, np.ndarray) and isinstance(divisor, np.ndarray), \ - 'subtrahend and divisor must be set in pair and in numpy array.' + assert isinstance(subtrahend, np.ndarray) and isinstance( + divisor, np.ndarray + ), "subtrahend and divisor must be set in pair and in numpy array." self.subtrahend = subtrahend self.divisor = divisor self.nonzero = nonzero @@ -198,7 +199,7 @@ class ThresholdIntensity(Transform): """ def __init__(self, threshold, above=True, cval=0): - assert isinstance(threshold, (float, int)), 'must set the threshold to filter intensity.' + assert isinstance(threshold, (float, int)), "must set the threshold to filter intensity." self.threshold = threshold self.above = above self.cval = cval @@ -244,7 +245,7 @@ class AdjustContrast(Transform): """ def __init__(self, gamma): - assert isinstance(gamma, (float, int)), 'gamma must be a float or int number.' + assert isinstance(gamma, (float, int)), "gamma must be a float or int number." self.gamma = gamma def __call__(self, img): @@ -267,12 +268,11 @@ class RandAdjustContrast(Randomizable, Transform): def __init__(self, prob=0.1, gamma=(0.5, 4.5)): self.prob = prob if not isinstance(gamma, (tuple, list)): - assert gamma > 0.5, \ - 'if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)' + assert gamma > 0.5, "if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)" self.gamma = (0.5, gamma) else: self.gamma = gamma - assert len(self.gamma) == 2, 'gamma should be a number or pair of numbers.' + assert len(self.gamma) == 2, "gamma should be a number or pair of numbers." self._do_transform = False self.gamma_value = None diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 4d5b0cb249..2235110563 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -18,8 +18,14 @@ import numpy as np from monai.transforms.compose import MapTransform, Randomizable -from monai.transforms.intensity.array import NormalizeIntensity, ScaleIntensityRange, \ - ThresholdIntensity, AdjustContrast, ShiftIntensity, ScaleIntensity +from monai.transforms.intensity.array import ( + NormalizeIntensity, + ScaleIntensityRange, + ThresholdIntensity, + AdjustContrast, + ShiftIntensity, + ScaleIntensity, +) class RandGaussianNoised(Randomizable, MapTransform): @@ -97,7 +103,7 @@ def __init__(self, keys, offsets, prob=0.1): """ super().__init__(keys) self.offsets = (-offsets, offsets) if not isinstance(offsets, (list, tuple)) else offsets - assert len(self.offsets) == 2, 'offsets should be a number or pair of numbers.' + assert len(self.offsets) == 2, "offsets should be a number or pair of numbers." self.prob = prob self._do_transform = False @@ -161,7 +167,7 @@ def __init__(self, keys, factors, prob=0.1, dtype=np.float32): """ super().__init__(keys) self.factors = (-factors, factors) if not isinstance(factors, (list, tuple)) else factors - assert len(self.factors) == 2, 'factors should be a number or pair of numbers.' + assert len(self.factors) == 2, "factors should be a number or pair of numbers." self.prob = prob self.dtype = dtype self._do_transform = False @@ -297,12 +303,11 @@ def __init__(self, keys, prob=0.1, gamma=(0.5, 4.5)): super().__init__(keys) self.prob = prob if not isinstance(gamma, (tuple, list)): - assert gamma > 0.5, \ - 'if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)' + assert gamma > 0.5, "if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)" self.gamma = (0.5, gamma) else: self.gamma = gamma - assert len(self.gamma) == 2, 'gamma should be a number or pair of numbers.' + assert len(self.gamma) == 2, "gamma should be a number or pair of numbers." self._do_transform = False self.gamma_value = None diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index baa222155b..705c8d1f36 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -63,17 +63,17 @@ def __call__(self, filename): img = nib.load(name) img = correct_nifti_header_if_necessary(img) header = dict(img.header) - header['filename_or_obj'] = name - header['affine'] = img.affine - header['original_affine'] = img.affine.copy() - header['as_closest_canonical'] = self.as_closest_canonical - ndim = img.header['dim'][0] + header["filename_or_obj"] = name + header["affine"] = img.affine + header["original_affine"] = img.affine.copy() + header["as_closest_canonical"] = self.as_closest_canonical + ndim = img.header["dim"][0] spatial_rank = min(ndim, 3) - header['spatial_shape'] = img.header['dim'][1:spatial_rank + 1] + header["spatial_shape"] = img.header["dim"][1 : spatial_rank + 1] if self.as_closest_canonical: img = nib.as_closest_canonical(img) - header['affine'] = img.affine + header["affine"] = img.affine img_array.append(np.array(img.get_fdata(dtype=self.dtype))) img.uncache() @@ -84,13 +84,16 @@ def __call__(self, filename): if not compatible_meta: for meta_key in header: meta_datum = header[meta_key] - if type(meta_datum).__name__ == 'ndarray' \ - and np_str_obj_array_pattern.search(meta_datum.dtype.str) is not None: + if ( + type(meta_datum).__name__ == "ndarray" + and np_str_obj_array_pattern.search(meta_datum.dtype.str) is not None + ): continue compatible_meta[meta_key] = meta_datum else: - assert np.allclose(header['affine'], compatible_meta['affine']), \ - 'affine data of all images should be same.' + assert np.allclose( + header["affine"], compatible_meta["affine"] + ), "affine data of all images should be same." img_array = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] if self.image_only: @@ -128,13 +131,13 @@ def __call__(self, filename): data = data.astype(self.dtype) img_array.append(data) meta = dict() - meta['filename_or_obj'] = name - meta['spatial_shape'] = data.shape[:2] - meta['format'] = img.format - meta['mode'] = img.mode - meta['width'] = img.width - meta['height'] = img.height - meta['info'] = img.info + meta["filename_or_obj"] = name + meta["spatial_shape"] = data.shape[:2] + meta["format"] = img.format + meta["mode"] = img.mode + meta["width"] = img.width + meta["height"] = img.height + meta["info"] = img.info if self.image_only: continue @@ -142,8 +145,9 @@ def __call__(self, filename): if not compatible_meta: compatible_meta = meta else: - assert np.allclose(meta['spatial_shape'], compatible_meta['spatial_shape']), \ - 'all the images in the list should have same spatial shape.' + assert np.allclose( + meta["spatial_shape"], compatible_meta["spatial_shape"] + ), "all the images in the list should have same spatial shape." img_array = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] return img_array if self.image_only else (img_array, compatible_meta) diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 8b0cb245cc..62e639ba8d 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -31,8 +31,9 @@ class LoadNiftid(MapTransform): be created as ``self.meta_key_format(key, metadata_key)``. """ - def __init__(self, keys, as_closest_canonical=False, dtype=np.float32, - meta_key_format='{}.{}', overwriting_keys=False): + def __init__( + self, keys, as_closest_canonical=False, dtype=np.float32, meta_key_format="{}.{}", overwriting_keys=False + ): """ Args: keys (hashable items): keys of the corresponding items to be transformed. @@ -53,13 +54,13 @@ def __call__(self, data): d = dict(data) for key in self.keys: data = self.loader(d[key]) - assert isinstance(data, (tuple, list)), 'loader must return a tuple or list.' + assert isinstance(data, (tuple, list)), "loader must return a tuple or list." d[key] = data[0] - assert isinstance(data[1], dict), 'metadata must be a dict.' + assert isinstance(data[1], dict), "metadata must be a dict." for k in sorted(data[1]): key_to_add = self.meta_key_format.format(key, k) if key_to_add in d and not self.overwriting_keys: - raise KeyError('meta data key {} already exists.'.format(key_to_add)) + raise KeyError("meta data key {} already exists.".format(key_to_add)) d[key_to_add] = data[1][k] return d @@ -69,7 +70,7 @@ class LoadPNGd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.LoadPNG`. """ - def __init__(self, keys, dtype=np.float32, meta_key_format='{}.{}'): + def __init__(self, keys, dtype=np.float32, meta_key_format="{}.{}"): """ Args: keys (hashable items): keys of the corresponding items to be transformed. @@ -86,9 +87,9 @@ def __call__(self, data): d = dict(data) for key in self.keys: data = self.loader(d[key]) - assert isinstance(data, (tuple, list)), 'loader must return a tuple or list.' + assert isinstance(data, (tuple, list)), "loader must return a tuple or list." d[key] = data[0] - assert isinstance(data[1], dict), 'metadata must be a dict.' + assert isinstance(data[1], dict), "metadata must be a dict." for k in sorted(data[1]): key_to_add = self.meta_key_format.format(key, k) d[key_to_add] = data[1][k] diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 86a22d5d2d..b35bf519ba 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -23,8 +23,14 @@ from monai.data.utils import zoom_affine, compute_shape_offset, to_affine_nd from monai.networks.layers.simplelayers import GaussianFilter from monai.transforms.compose import Transform, Randomizable -from monai.transforms.utils import create_control_grid, create_grid, create_rotate, \ - create_scale, create_shear, create_translate +from monai.transforms.utils import ( + create_control_grid, + create_grid, + create_rotate, + create_scale, + create_shear, + create_translate, +) from monai.utils.misc import ensure_tuple @@ -33,7 +39,7 @@ class Spacing(Transform): Resample input image into the specified `pixdim`. """ - def __init__(self, pixdim, diagonal=False, mode='constant', cval=0, dtype=None): + def __init__(self, pixdim, diagonal=False, mode="constant", cval=0, dtype=None): """ Args: pixdim (sequence of floats): output voxel spacing. @@ -72,7 +78,7 @@ def __call__(self, data_array, affine=None, interp_order=3): """ sr = data_array.ndim - 1 if sr <= 0: - raise ValueError('the array should have at least one spatial dimension.') + raise ValueError("the array should have at least one spatial dimension.") if affine is None: # default to identity affine = np.eye(sr + 1, dtype=np.float64) @@ -81,9 +87,9 @@ def __call__(self, data_array, affine=None, interp_order=3): affine_ = to_affine_nd(sr, affine) out_d = self.pixdim[:sr] if out_d.size < sr: - out_d = np.append(out_d, [1.] * (out_d.size - sr)) + out_d = np.append(out_d, [1.0] * (out_d.size - sr)) if np.any(out_d <= 0): - raise ValueError('pixdim must be positive, got {}'.format(out_d)) + raise ValueError("pixdim must be positive, got {}".format(out_d)) # compute output affine, shape and offset new_affine = zoom_affine(affine_, out_d, diagonal=self.diagonal) output_shape, offset = compute_shape_offset(data_array.shape[1:], affine_, new_affine) @@ -96,8 +102,13 @@ def __call__(self, data_array, affine=None, interp_order=3): output_data = [] for data in data_array: data_ = scipy.ndimage.affine_transform( - data.astype(dtype), matrix=transform_, output_shape=output_shape, - order=interp_order, mode=self.mode, cval=self.cval) + data.astype(dtype), + matrix=transform_, + output_shape=output_shape, + order=interp_order, + mode=self.mode, + cval=self.cval, + ) output_data.append(data_) output_data = np.stack(output_data) new_affine = to_affine_nd(affine, new_affine) @@ -109,7 +120,7 @@ class Orientation(Transform): Change the input image's orientation into the specified based on `axcodes`. """ - def __init__(self, axcodes=None, as_closest_canonical=False, labels=tuple(zip('LPI', 'RAS'))): + def __init__(self, axcodes=None, as_closest_canonical=False, labels=tuple(zip("LPI", "RAS"))): """ Args: axcodes (N elements sequence): for spatial ND input's orientation. @@ -125,9 +136,9 @@ def __init__(self, axcodes=None, as_closest_canonical=False, labels=tuple(zip('L See Also: `nibabel.orientations.ornt2axcodes`. """ if axcodes is None and not as_closest_canonical: - raise ValueError('provide either `axcodes` or `as_closest_canonical=True`.') + raise ValueError("provide either `axcodes` or `as_closest_canonical=True`.") if axcodes is not None and as_closest_canonical: - warnings.warn('using as_closest_canonical=True, axcodes ignored.') + warnings.warn("using as_closest_canonical=True, axcodes ignored.") self.axcodes = axcodes self.as_closest_canonical = as_closest_canonical self.labels = labels @@ -144,7 +155,7 @@ def __call__(self, data_array, affine=None): """ sr = data_array.ndim - 1 if sr <= 0: - raise ValueError('the array should have at least one spatial dimension.') + raise ValueError("the array should have at least one spatial dimension.") if affine is None: affine = np.eye(sr + 1, dtype=np.float64) affine_ = np.eye(sr + 1, dtype=np.float64) @@ -156,8 +167,10 @@ def __call__(self, data_array, affine=None): else: dst = nib.orientations.axcodes2ornt(self.axcodes[:sr], labels=self.labels) if len(dst) < sr: - raise ValueError('`self.axcodes` should have at least {0} elements' - ' given the data array is in spatial {0}D, got "{1}"'.format(sr, self.axcodes)) + raise ValueError( + "`self.axcodes` should have at least {0} elements" + ' given the data array is in spatial {0}D, got "{1}"'.format(sr, self.axcodes) + ) spatial_ornt = nib.orientations.ornt_transform(src, dst) ornt = spatial_ornt.copy() ornt[:, 0] += 1 # skip channel dim @@ -188,9 +201,7 @@ def __call__(self, img): """ flipped = list() for channel in img: - flipped.append( - np.flip(channel, self.spatial_axis) - ) + flipped.append(np.flip(channel, self.spatial_axis)) return np.stack(flipped) @@ -214,8 +225,17 @@ class Resize(Transform): anti_aliasing_sigma (float, tuple of floats): Standard deviation for gaussian filtering. """ - def __init__(self, spatial_size, order=1, mode='reflect', cval=0, - clip=True, preserve_range=True, anti_aliasing=True, anti_aliasing_sigma=None): + def __init__( + self, + spatial_size, + order=1, + mode="reflect", + cval=0, + clip=True, + preserve_range=True, + anti_aliasing=True, + anti_aliasing_sigma=None, + ): assert isinstance(order, int), "order must be integer." self.spatial_size = spatial_size self.order = order @@ -234,11 +254,17 @@ def __call__(self, img): resized = list() for channel in img: resized.append( - resize(channel, self.spatial_size, order=self.order, - mode=self.mode, cval=self.cval, - clip=self.clip, preserve_range=self.preserve_range, - anti_aliasing=self.anti_aliasing, - anti_aliasing_sigma=self.anti_aliasing_sigma) + resize( + channel, + self.spatial_size, + order=self.order, + mode=self.mode, + cval=self.cval, + clip=self.clip, + preserve_range=self.preserve_range, + anti_aliasing=self.anti_aliasing, + anti_aliasing_sigma=self.anti_aliasing_sigma, + ) ) return np.stack(resized).astype(np.float32) @@ -262,7 +288,7 @@ class Rotate(Transform): prefilter (bool): Apply spline_filter before interpolation. Default: True. """ - def __init__(self, angle, spatial_axes=(0, 1), reshape=True, order=1, mode='constant', cval=0, prefilter=True): + def __init__(self, angle, spatial_axes=(0, 1), reshape=True, order=1, mode="constant", cval=0, prefilter=True): self.angle = angle self.reshape = reshape self.order = order @@ -279,8 +305,16 @@ def __call__(self, img): rotated = list() for channel in img: rotated.append( - scipy.ndimage.rotate(channel, self.angle, self.spatial_axes, reshape=self.reshape, - order=self.order, mode=self.mode, cval=self.cval, prefilter=self.prefilter) + scipy.ndimage.rotate( + channel, + self.angle, + self.spatial_axes, + reshape=self.reshape, + order=self.order, + mode=self.mode, + cval=self.cval, + prefilter=self.prefilter, + ) ) return np.stack(rotated).astype(np.float32) @@ -301,7 +335,7 @@ class Zoom(Transform): keep_size (bool): Should keep original size (pad if needed). """ - def __init__(self, zoom, order=3, mode='constant', cval=0, prefilter=True, use_gpu=False, keep_size=False): + def __init__(self, zoom, order=3, mode="constant", cval=0, prefilter=True, use_gpu=False, keep_size=False): assert isinstance(order, int), "Order must be integer." self.zoom = zoom self.order = order @@ -317,7 +351,7 @@ def __init__(self, zoom, order=3, mode='constant', cval=0, prefilter=True, use_g self._zoom = zoom_gpu except ImportError: - print('For GPU zoom, please install cupy. Defaulting to cpu.') + print("For GPU zoom, please install cupy. Defaulting to cpu.") self._zoom = scipy.ndimage.zoom self.use_gpu = False else: @@ -331,23 +365,24 @@ def __call__(self, img): zoomed = list() if self.use_gpu: import cupy + for channel in cupy.array(img): - zoom_channel = self._zoom(channel, - zoom=self.zoom, - order=self.order, - mode=self.mode, - cval=self.cval, - prefilter=self.prefilter) + zoom_channel = self._zoom( + channel, zoom=self.zoom, order=self.order, mode=self.mode, cval=self.cval, prefilter=self.prefilter + ) zoomed.append(cupy.asnumpy(zoom_channel)) else: for channel in img: zoomed.append( - self._zoom(channel, - zoom=self.zoom, - order=self.order, - mode=self.mode, - cval=self.cval, - prefilter=self.prefilter)) + self._zoom( + channel, + zoom=self.zoom, + order=self.order, + mode=self.mode, + cval=self.cval, + prefilter=self.prefilter, + ) + ) zoomed = np.stack(zoomed).astype(np.float32) if not self.keep_size or np.allclose(img.shape, zoomed.shape): @@ -362,7 +397,7 @@ def __call__(self, img): pad_vec[idx] = [half, diff - half] elif diff < 0: # need slicing slice_vec[idx] = slice(half, half + od) - zoomed = np.pad(zoomed, pad_vec, mode='constant') + zoomed = np.pad(zoomed, pad_vec, mode="constant") return zoomed[tuple(slice_vec)] @@ -388,9 +423,7 @@ def __call__(self, img): """ rotated = list() for channel in img: - rotated.append( - np.rot90(channel, self.k, self.spatial_axes) - ) + rotated.append(np.rot90(channel, self.k, self.spatial_axes)) return np.stack(rotated) @@ -448,8 +481,9 @@ class RandRotate(Randomizable, Transform): prefilter (bool): Apply spline_filter before interpolation. Default: True. """ - def __init__(self, degrees, prob=0.1, spatial_axes=(0, 1), reshape=True, order=1, - mode='constant', cval=0, prefilter=True): + def __init__( + self, degrees, prob=0.1, spatial_axes=(0, 1), reshape=True, order=1, mode="constant", cval=0, prefilter=True + ): self.prob = prob self.degrees = degrees self.reshape = reshape @@ -459,9 +493,9 @@ def __init__(self, degrees, prob=0.1, spatial_axes=(0, 1), reshape=True, order=1 self.prefilter = prefilter self.spatial_axes = spatial_axes - if not hasattr(self.degrees, '__iter__'): + if not hasattr(self.degrees, "__iter__"): self.degrees = (-self.degrees, self.degrees) - assert len(self.degrees) == 2, 'degrees should be a number or pair of numbers.' + assert len(self.degrees) == 2, "degrees should be a number or pair of numbers." self._do_transform = False self.angle = None @@ -474,8 +508,7 @@ def __call__(self, img): self.randomize() if not self._do_transform: return img - rotator = Rotate(self.angle, self.spatial_axes, self.reshape, self.order, - self.mode, self.cval, self.prefilter) + rotator = Rotate(self.angle, self.spatial_axes, self.reshape, self.order, self.mode, self.cval, self.prefilter) return rotator(img) @@ -524,11 +557,20 @@ class RandZoom(Randomizable, Transform): keep_size (bool): Should keep original size (pad if needed). """ - def __init__(self, prob=0.1, min_zoom=0.9, max_zoom=1.1, order=3, - mode='constant', cval=0, prefilter=True, - use_gpu=False, keep_size=False): - if hasattr(min_zoom, '__iter__') and hasattr(max_zoom, '__iter__'): - assert len(min_zoom) == len(max_zoom), 'min_zoom and max_zoom must have same length.' + def __init__( + self, + prob=0.1, + min_zoom=0.9, + max_zoom=1.1, + order=3, + mode="constant", + cval=0, + prefilter=True, + use_gpu=False, + keep_size=False, + ): + if hasattr(min_zoom, "__iter__") and hasattr(max_zoom, "__iter__"): + assert len(min_zoom) == len(max_zoom), "min_zoom and max_zoom must have same length." self.min_zoom = min_zoom self.max_zoom = max_zoom self.prob = prob @@ -544,7 +586,7 @@ def __init__(self, prob=0.1, min_zoom=0.9, max_zoom=1.1, order=3, def randomize(self): self._do_transform = self.R.random_sample() < self.prob - if hasattr(self.min_zoom, '__iter__'): + if hasattr(self.min_zoom, "__iter__"): self._zoom = (self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)) else: self._zoom = self.R.uniform(self.min_zoom, self.max_zoom) @@ -562,13 +604,15 @@ class AffineGrid(Transform): Affine transforms on the coordinates. """ - def __init__(self, - rotate_params=None, - shear_params=None, - translate_params=None, - scale_params=None, - as_tensor_output=True, - device=None): + def __init__( + self, + rotate_params=None, + shear_params=None, + translate_params=None, + scale_params=None, + as_tensor_output=True, + device=None, + ): self.rotate_params = rotate_params self.shear_params = shear_params self.translate_params = translate_params @@ -587,7 +631,7 @@ def __call__(self, spatial_size=None, grid=None): if spatial_size is not None: grid = create_grid(spatial_size) else: - raise ValueError('Either specify a grid or a spatial size to create a grid from.') + raise ValueError("Either specify a grid or a spatial size to create a grid from.") spatial_dims = len(grid.shape) - 1 affine = np.eye(spatial_dims + 1) @@ -615,13 +659,15 @@ class RandAffineGrid(Randomizable, Transform): generate randomised affine grid """ - def __init__(self, - rotate_range=None, - shear_range=None, - translate_range=None, - scale_range=None, - as_tensor_output=True, - device=None): + def __init__( + self, + rotate_range=None, + shear_range=None, + translate_range=None, + scale_range=None, + as_tensor_output=True, + device=None, + ): """ Args: rotate_range (a sequence of positive floats): rotate_range[0] with be used to generate the 1st rotation @@ -675,9 +721,14 @@ def __call__(self, spatial_size=None, grid=None): a 2D (3xHxW) or 3D (4xHxWxD) grid. """ self.randomize() - affine_grid = AffineGrid(rotate_params=self.rotate_params, shear_params=self.shear_params, - translate_params=self.translate_params, scale_params=self.scale_params, - as_tensor_output=self.as_tensor_output, device=self.device) + affine_grid = AffineGrid( + rotate_params=self.rotate_params, + shear_params=self.shear_params, + translate_params=self.translate_params, + scale_params=self.scale_params, + as_tensor_output=self.as_tensor_output, + device=self.device, + ) return affine_grid(spatial_size, grid) @@ -714,15 +765,14 @@ def randomize(self, grid_size): def __call__(self, spatial_size): control_grid = create_control_grid(spatial_size, self.spacing) self.randomize(control_grid.shape[1:]) - control_grid[:len(spatial_size)] += self.rand_mag * self.random_offset + control_grid[: len(spatial_size)] += self.rand_mag * self.random_offset if self.as_tensor_output: control_grid = torch.as_tensor(np.ascontiguousarray(control_grid), device=self.device) return control_grid class Resample(Transform): - - def __init__(self, padding_mode='zeros', as_tensor_output=False, device=None): + def __init__(self, padding_mode="zeros", as_tensor_output=False, device=None): """ computes output image using values from `img`, locations from `grid` using pytorch. supports spatially 2D or 3D (num_channels, H, W[, D]). @@ -736,7 +786,7 @@ def __init__(self, padding_mode='zeros', as_tensor_output=False, device=None): self.as_tensor_output = as_tensor_output self.device = device - def __call__(self, img, grid, mode='bilinear'): + def __call__(self, img, grid, mode="bilinear"): """ Args: img (ndarray or tensor): shape must be (num_channels, H, W[, D]). @@ -751,15 +801,13 @@ def __call__(self, img, grid, mode='bilinear'): grid = grid.to(self.device) for i, dim in enumerate(img.shape[1:]): - grid[i] = 2. * grid[i] / (dim - 1.) + grid[i] = 2.0 * grid[i] / (dim - 1.0) grid = grid[:-1] / grid[-1:] grid = grid[range(img.ndim - 2, -1, -1)] grid = grid.permute(list(range(grid.ndim))[1:] + [0]) - out = torch.nn.functional.grid_sample(img[None].float(), - grid[None].float(), - mode=mode, - padding_mode=self.padding_mode, - align_corners=False)[0] + out = torch.nn.functional.grid_sample( + img[None].float(), grid[None].float(), mode=mode, padding_mode=self.padding_mode, align_corners=False + )[0] if self.as_tensor_output: return out return out.cpu().numpy() @@ -770,16 +818,18 @@ class Affine(Transform): transform ``img`` given the affine parameters. """ - def __init__(self, - rotate_params=None, - shear_params=None, - translate_params=None, - scale_params=None, - spatial_size=None, - mode='bilinear', - padding_mode='zeros', - as_tensor_output=False, - device=None): + def __init__( + self, + rotate_params=None, + shear_params=None, + translate_params=None, + scale_params=None, + spatial_size=None, + mode="bilinear", + padding_mode="zeros", + as_tensor_output=False, + device=None, + ): """ The affine transformations are applied in rotate, shear, translate, scale order. @@ -802,12 +852,14 @@ def __init__(self, whether to convert it back to numpy arrays. device (torch.device): device on which the tensor will be allocated. """ - self.affine_grid = AffineGrid(rotate_params=rotate_params, - shear_params=shear_params, - translate_params=translate_params, - scale_params=scale_params, - as_tensor_output=True, - device=device) + self.affine_grid = AffineGrid( + rotate_params=rotate_params, + shear_params=shear_params, + translate_params=translate_params, + scale_params=scale_params, + as_tensor_output=True, + device=device, + ) self.resampler = Resample(padding_mode=padding_mode, as_tensor_output=as_tensor_output, device=device) self.spatial_size = spatial_size self.mode = mode @@ -832,17 +884,19 @@ class RandAffine(Randomizable, Transform): Random affine transform. """ - def __init__(self, - prob=0.1, - rotate_range=None, - shear_range=None, - translate_range=None, - scale_range=None, - spatial_size=None, - mode='bilinear', - padding_mode='zeros', - as_tensor_output=True, - device=None): + def __init__( + self, + prob=0.1, + rotate_range=None, + shear_range=None, + translate_range=None, + scale_range=None, + spatial_size=None, + mode="bilinear", + padding_mode="zeros", + as_tensor_output=True, + device=None, + ): """ Args: prob (float): probability of returning a randomized affine grid. @@ -861,9 +915,14 @@ def __init__(self, - :py:class:`Affine` for the affine transformation parameters configurations. """ - self.rand_affine_grid = RandAffineGrid(rotate_range=rotate_range, shear_range=shear_range, - translate_range=translate_range, scale_range=scale_range, - as_tensor_output=True, device=device) + self.rand_affine_grid = RandAffineGrid( + rotate_range=rotate_range, + shear_range=shear_range, + translate_range=translate_range, + scale_range=scale_range, + as_tensor_output=True, + device=device, + ) self.resampler = Resample(padding_mode=padding_mode, as_tensor_output=as_tensor_output, device=device) self.spatial_size = spatial_size @@ -905,19 +964,21 @@ class Rand2DElastic(Randomizable, Transform): Random elastic deformation and affine in 2D """ - def __init__(self, - spacing, - magnitude_range, - prob=0.1, - rotate_range=None, - shear_range=None, - translate_range=None, - scale_range=None, - spatial_size=None, - mode='bilinear', - padding_mode='zeros', - as_tensor_output=False, - device=None): + def __init__( + self, + spacing, + magnitude_range, + prob=0.1, + rotate_range=None, + shear_range=None, + translate_range=None, + scale_range=None, + spatial_size=None, + mode="bilinear", + padding_mode="zeros", + as_tensor_output=False, + device=None, + ): """ Args: spacing (2 ints): distance in between the control points. @@ -938,11 +999,17 @@ def __init__(self, - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. """ - self.deform_grid = RandDeformGrid(spacing=spacing, magnitude_range=magnitude_range, - as_tensor_output=True, device=device) - self.rand_affine_grid = RandAffineGrid(rotate_range=rotate_range, shear_range=shear_range, - translate_range=translate_range, scale_range=scale_range, - as_tensor_output=True, device=device) + self.deform_grid = RandDeformGrid( + spacing=spacing, magnitude_range=magnitude_range, as_tensor_output=True, device=device + ) + self.rand_affine_grid = RandAffineGrid( + rotate_range=rotate_range, + shear_range=shear_range, + translate_range=translate_range, + scale_range=scale_range, + as_tensor_output=True, + device=device, + ) self.resampler = Resample(padding_mode=padding_mode, as_tensor_output=as_tensor_output, device=device) self.spatial_size = spatial_size @@ -974,7 +1041,7 @@ def __call__(self, img, spatial_size=None, mode=None): if self.do_transform: grid = self.deform_grid(spatial_size=spatial_size) grid = self.rand_affine_grid(grid=grid) - grid = torch.nn.functional.interpolate(grid[None], spatial_size, mode='bicubic', align_corners=False)[0] + grid = torch.nn.functional.interpolate(grid[None], spatial_size, mode="bicubic", align_corners=False)[0] else: grid = create_grid(spatial_size) return self.resampler(img, grid, mode) @@ -985,19 +1052,21 @@ class Rand3DElastic(Randomizable, Transform): Random elastic deformation and affine in 3D """ - def __init__(self, - sigma_range, - magnitude_range, - prob=0.1, - rotate_range=None, - shear_range=None, - translate_range=None, - scale_range=None, - spatial_size=None, - mode='bilinear', - padding_mode='zeros', - as_tensor_output=False, - device=None): + def __init__( + self, + sigma_range, + magnitude_range, + prob=0.1, + rotate_range=None, + shear_range=None, + translate_range=None, + scale_range=None, + spatial_size=None, + mode="bilinear", + padding_mode="zeros", + as_tensor_output=False, + device=None, + ): """ Args: sigma_range (2 ints): a Gaussian kernel with standard deviation sampled @@ -1042,7 +1111,7 @@ def set_random_state(self, seed=None, state=None): def randomize(self, grid_size): self.do_transform = self.R.rand() < self.prob if self.do_transform: - self.rand_offset = self.R.uniform(-1., 1., [3] + list(grid_size)) + self.rand_offset = self.R.uniform(-1.0, 1.0, [3] + list(grid_size)) self.magnitude = self.R.uniform(self.magnitude_range[0], self.magnitude_range[1]) self.sigma = self.R.uniform(self.sigma_range[0], self.sigma_range[1]) self.rand_affine_grid.randomize() @@ -1060,7 +1129,7 @@ def __call__(self, img, spatial_size=None, mode=None): grid = create_grid(spatial_size) if self.do_transform: grid = torch.as_tensor(np.ascontiguousarray(grid), device=self.device) - gaussian = GaussianFilter(3, self.sigma, 3., device=self.device) + gaussian = GaussianFilter(3, self.sigma, 3.0, device=self.device) grid[:3] += gaussian(self.rand_offset[None])[0] * self.magnitude grid = self.rand_affine_grid(grid=grid) return self.resampler(img, grid, mode) diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 82ed701da4..bd4f4ffb44 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -19,8 +19,18 @@ from monai.networks.layers.simplelayers import GaussianFilter from monai.transforms.compose import MapTransform, Randomizable -from monai.transforms.spatial.array import Flip, Orientation, Rand2DElastic, Rand3DElastic, \ - RandAffine, Resize, Rotate, Rotate90, Spacing, Zoom +from monai.transforms.spatial.array import ( + Flip, + Orientation, + Rand2DElastic, + Rand3DElastic, + RandAffine, + Resize, + Rotate, + Rotate90, + Spacing, + Zoom, +) from monai.transforms.utils import create_grid from monai.utils.misc import ensure_tuple @@ -39,8 +49,9 @@ class Spacingd(MapTransform): :py:class:`monai.transforms.Spacing` """ - def __init__(self, keys, pixdim, diagonal=False, mode='constant', cval=0, - interp_order=3, dtype=None, meta_key_format='{}.{}'): + def __init__( + self, keys, pixdim, diagonal=False, mode="constant", cval=0, interp_order=3, dtype=None, meta_key_format="{}.{}" + ): """ Args: pixdim (sequence of floats): output voxel spacing. @@ -70,18 +81,16 @@ def __init__(self, keys, pixdim, diagonal=False, mode='constant', cval=0, super().__init__(keys) self.spacing_transform = Spacing(pixdim, diagonal=diagonal, mode=mode, cval=cval, dtype=dtype) interp_order = ensure_tuple(interp_order) - self.interp_order = interp_order \ - if len(interp_order) == len(self.keys) else interp_order * len(self.keys) + self.interp_order = interp_order if len(interp_order) == len(self.keys) else interp_order * len(self.keys) self.meta_key_format = meta_key_format def __call__(self, data): d = dict(data) for key, interp in zip(self.keys, self.interp_order): - affine_key = self.meta_key_format.format(key, 'affine') + affine_key = self.meta_key_format.format(key, "affine") # resample array of each corresponding key # using affine fetched from d[affine_key] - d[key], _, new_affine = self.spacing_transform( - data_array=d[key], affine=d[affine_key], interp_order=interp) + d[key], _, new_affine = self.spacing_transform(data_array=d[key], affine=d[affine_key], interp_order=interp) # set the 'affine' key d[affine_key] = new_affine return d @@ -98,8 +107,9 @@ class Orientationd(MapTransform): to the key formed by ``meta_key_format.format(key, 'affine')``. """ - def __init__(self, keys, axcodes=None, as_closest_canonical=False, - labels=tuple(zip('LPI', 'RAS')), meta_key_format='{}.{}'): + def __init__( + self, keys, axcodes=None, as_closest_canonical=False, labels=tuple(zip("LPI", "RAS")), meta_key_format="{}.{}" + ): """ Args: axcodes (N elements sequence): for spatial ND input's orientation. @@ -117,14 +127,13 @@ def __init__(self, keys, axcodes=None, as_closest_canonical=False, `nibabel.orientations.ornt2axcodes`. """ super().__init__(keys) - self.ornt_transform = Orientation( - axcodes=axcodes, as_closest_canonical=as_closest_canonical, labels=labels) + self.ornt_transform = Orientation(axcodes=axcodes, as_closest_canonical=as_closest_canonical, labels=labels) self.meta_key_format = meta_key_format def __call__(self, data): d = dict(data) for key in self.keys: - affine_key = self.meta_key_format.format(key, 'affine') + affine_key = self.meta_key_format.format(key, "affine") d[key], _, new_affine = self.ornt_transform(d[key], affine=d[affine_key]) d[affine_key] = new_affine return d @@ -215,11 +224,20 @@ class Resized(MapTransform): anti_aliasing_sigma (float, tuple of floats): Standard deviation for gaussian filtering. """ - def __init__(self, keys, spatial_size, order=1, mode='reflect', cval=0, - clip=True, preserve_range=True, anti_aliasing=True, anti_aliasing_sigma=None): + def __init__( + self, + keys, + spatial_size, + order=1, + mode="reflect", + cval=0, + clip=True, + preserve_range=True, + anti_aliasing=True, + anti_aliasing_sigma=None, + ): super().__init__(keys) - self.resizer = Resize(spatial_size, order, mode, cval, clip, preserve_range, - anti_aliasing, anti_aliasing_sigma) + self.resizer = Resize(spatial_size, order, mode, cval, clip, preserve_range, anti_aliasing, anti_aliasing_sigma) def __call__(self, data): d = dict(data) @@ -233,10 +251,20 @@ class RandAffined(Randomizable, MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.RandAffine`. """ - def __init__(self, keys, - spatial_size, prob=0.1, - rotate_range=None, shear_range=None, translate_range=None, scale_range=None, - mode='bilinear', padding_mode='zeros', as_tensor_output=True, device=None): + def __init__( + self, + keys, + spatial_size, + prob=0.1, + rotate_range=None, + shear_range=None, + translate_range=None, + scale_range=None, + mode="bilinear", + padding_mode="zeros", + as_tensor_output=True, + device=None, + ): """ Args: keys (Hashable items): keys of the corresponding items to be transformed. @@ -259,13 +287,19 @@ def __init__(self, keys, - :py:class:`RandAffineGrid` for the random affine parameters configurations. """ super().__init__(keys) - default_mode = 'bilinear' if isinstance(mode, (tuple, list)) else mode - self.rand_affine = RandAffine(prob=prob, - rotate_range=rotate_range, shear_range=shear_range, - translate_range=translate_range, scale_range=scale_range, - spatial_size=spatial_size, - mode=default_mode, padding_mode=padding_mode, - as_tensor_output=as_tensor_output, device=device) + default_mode = "bilinear" if isinstance(mode, (tuple, list)) else mode + self.rand_affine = RandAffine( + prob=prob, + rotate_range=rotate_range, + shear_range=shear_range, + translate_range=translate_range, + scale_range=scale_range, + spatial_size=spatial_size, + mode=default_mode, + padding_mode=padding_mode, + as_tensor_output=as_tensor_output, + device=device, + ) self.mode = mode def set_random_state(self, seed=None, state=None): @@ -301,10 +335,22 @@ class Rand2DElasticd(Randomizable, MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.Rand2DElastic`. """ - def __init__(self, keys, - spatial_size, spacing, magnitude_range, prob=0.1, - rotate_range=None, shear_range=None, translate_range=None, scale_range=None, - mode='bilinear', padding_mode='zeros', as_tensor_output=False, device=None): + def __init__( + self, + keys, + spatial_size, + spacing, + magnitude_range, + prob=0.1, + rotate_range=None, + shear_range=None, + translate_range=None, + scale_range=None, + mode="bilinear", + padding_mode="zeros", + as_tensor_output=False, + device=None, + ): """ Args: keys (Hashable items): keys of the corresponding items to be transformed. @@ -328,13 +374,21 @@ def __init__(self, keys, - :py:class:`Affine` for the affine transformation parameters configurations. """ super().__init__(keys) - default_mode = 'bilinear' if isinstance(mode, (tuple, list)) else mode - self.rand_2d_elastic = Rand2DElastic(spacing=spacing, magnitude_range=magnitude_range, prob=prob, - rotate_range=rotate_range, shear_range=shear_range, - translate_range=translate_range, scale_range=scale_range, - spatial_size=spatial_size, - mode=default_mode, padding_mode=padding_mode, - as_tensor_output=as_tensor_output, device=device) + default_mode = "bilinear" if isinstance(mode, (tuple, list)) else mode + self.rand_2d_elastic = Rand2DElastic( + spacing=spacing, + magnitude_range=magnitude_range, + prob=prob, + rotate_range=rotate_range, + shear_range=shear_range, + translate_range=translate_range, + scale_range=scale_range, + spatial_size=spatial_size, + mode=default_mode, + padding_mode=padding_mode, + as_tensor_output=as_tensor_output, + device=device, + ) self.mode = mode def set_random_state(self, seed=None, state=None): @@ -353,7 +407,7 @@ def __call__(self, data): if self.rand_2d_elastic.do_transform: grid = self.rand_2d_elastic.deform_grid(spatial_size) grid = self.rand_2d_elastic.rand_affine_grid(grid=grid) - grid = torch.nn.functional.interpolate(grid[None], spatial_size, mode='bicubic', align_corners=False)[0] + grid = torch.nn.functional.interpolate(grid[None], spatial_size, mode="bicubic", align_corners=False)[0] else: grid = create_grid(spatial_size) @@ -372,10 +426,22 @@ class Rand3DElasticd(Randomizable, MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.Rand3DElastic`. """ - def __init__(self, keys, - spatial_size, sigma_range, magnitude_range, prob=0.1, - rotate_range=None, shear_range=None, translate_range=None, scale_range=None, - mode='bilinear', padding_mode='zeros', as_tensor_output=False, device=None): + def __init__( + self, + keys, + spatial_size, + sigma_range, + magnitude_range, + prob=0.1, + rotate_range=None, + shear_range=None, + translate_range=None, + scale_range=None, + mode="bilinear", + padding_mode="zeros", + as_tensor_output=False, + device=None, + ): """ Args: keys (Hashable items): keys of the corresponding items to be transformed. @@ -400,13 +466,21 @@ def __init__(self, keys, - :py:class:`Affine` for the affine transformation parameters configurations. """ super().__init__(keys) - default_mode = 'bilinear' if isinstance(mode, (tuple, list)) else mode - self.rand_3d_elastic = Rand3DElastic(sigma_range=sigma_range, magnitude_range=magnitude_range, prob=prob, - rotate_range=rotate_range, shear_range=shear_range, - translate_range=translate_range, scale_range=scale_range, - spatial_size=spatial_size, - mode=default_mode, padding_mode=padding_mode, - as_tensor_output=as_tensor_output, device=device) + default_mode = "bilinear" if isinstance(mode, (tuple, list)) else mode + self.rand_3d_elastic = Rand3DElastic( + sigma_range=sigma_range, + magnitude_range=magnitude_range, + prob=prob, + rotate_range=rotate_range, + shear_range=shear_range, + translate_range=translate_range, + scale_range=scale_range, + spatial_size=spatial_size, + mode=default_mode, + padding_mode=padding_mode, + as_tensor_output=as_tensor_output, + device=device, + ) self.mode = mode def set_random_state(self, seed=None, state=None): @@ -425,7 +499,7 @@ def __call__(self, data): if self.rand_3d_elastic.do_transform: device = self.rand_3d_elastic.device grid = torch.tensor(grid).to(device) - gaussian = GaussianFilter(spatial_dims=3, sigma=self.rand_3d_elastic.sigma, truncated=3., device=device) + gaussian = GaussianFilter(spatial_dims=3, sigma=self.rand_3d_elastic.sigma, truncated=3.0, device=device) grid[:3] += gaussian(self.rand_3d_elastic.rand_offset[None])[0] * self.rand_3d_elastic.magnitude grid = self.rand_3d_elastic.rand_affine_grid(grid=grid) @@ -511,11 +585,19 @@ class Rotated(MapTransform): prefilter (bool): Apply spline_filter before interpolation. Default: True. """ - def __init__(self, keys, angle, spatial_axes=(0, 1), reshape=True, order=1, - mode='constant', cval=0, prefilter=True): + def __init__( + self, keys, angle, spatial_axes=(0, 1), reshape=True, order=1, mode="constant", cval=0, prefilter=True + ): super().__init__(keys) - self.rotator = Rotate(angle=angle, spatial_axes=spatial_axes, reshape=reshape, - order=order, mode=mode, cval=cval, prefilter=prefilter) + self.rotator = Rotate( + angle=angle, + spatial_axes=spatial_axes, + reshape=reshape, + order=order, + mode=mode, + cval=cval, + prefilter=prefilter, + ) def __call__(self, data): d = dict(data) @@ -544,8 +626,18 @@ class RandRotated(Randomizable, MapTransform): prefilter (bool): Apply spline_filter before interpolation. Default: True. """ - def __init__(self, keys, degrees, prob=0.1, spatial_axes=(0, 1), reshape=True, order=1, - mode='constant', cval=0, prefilter=True): + def __init__( + self, + keys, + degrees, + prob=0.1, + spatial_axes=(0, 1), + reshape=True, + order=1, + mode="constant", + cval=0, + prefilter=True, + ): super().__init__(keys) self.prob = prob self.degrees = degrees @@ -556,9 +648,9 @@ def __init__(self, keys, degrees, prob=0.1, spatial_axes=(0, 1), reshape=True, o self.prefilter = prefilter self.spatial_axes = spatial_axes - if not hasattr(self.degrees, '__iter__'): + if not hasattr(self.degrees, "__iter__"): self.degrees = (-self.degrees, self.degrees) - assert len(self.degrees) == 2, 'degrees should be a number or pair of numbers.' + assert len(self.degrees) == 2, "degrees should be a number or pair of numbers." self._do_transform = False self.angle = None @@ -572,8 +664,7 @@ def __call__(self, data): d = dict(data) if not self._do_transform: return d - rotator = Rotate(self.angle, self.spatial_axes, self.reshape, self.order, - self.mode, self.cval, self.prefilter) + rotator = Rotate(self.angle, self.spatial_axes, self.reshape, self.order, self.mode, self.cval, self.prefilter) for key in self.keys: d[key] = rotator(d[key]) return d @@ -594,11 +685,11 @@ class Zoomd(MapTransform): keep_size (bool): Should keep original size (pad if needed). """ - def __init__(self, keys, zoom, order=3, mode='constant', cval=0, - prefilter=True, use_gpu=False, keep_size=False): + def __init__(self, keys, zoom, order=3, mode="constant", cval=0, prefilter=True, use_gpu=False, keep_size=False): super().__init__(keys) - self.zoomer = Zoom(zoom=zoom, order=order, mode=mode, cval=cval, - prefilter=prefilter, use_gpu=use_gpu, keep_size=keep_size) + self.zoomer = Zoom( + zoom=zoom, order=order, mode=mode, cval=cval, prefilter=prefilter, use_gpu=use_gpu, keep_size=keep_size + ) def __call__(self, data): d = dict(data) @@ -628,13 +719,22 @@ class RandZoomd(Randomizable, MapTransform): keep_size (bool): Should keep original size (pad if needed). """ - def __init__(self, keys, prob=0.1, min_zoom=0.9, - max_zoom=1.1, order=3, mode='constant', - cval=0, prefilter=True, use_gpu=False, keep_size=False): + def __init__( + self, + keys, + prob=0.1, + min_zoom=0.9, + max_zoom=1.1, + order=3, + mode="constant", + cval=0, + prefilter=True, + use_gpu=False, + keep_size=False, + ): super().__init__(keys) - if hasattr(min_zoom, '__iter__') and \ - hasattr(max_zoom, '__iter__'): - assert len(min_zoom) == len(max_zoom), 'min_zoom and max_zoom must have same length.' + if hasattr(min_zoom, "__iter__") and hasattr(max_zoom, "__iter__"): + assert len(min_zoom) == len(max_zoom), "min_zoom and max_zoom must have same length." self.min_zoom = min_zoom self.max_zoom = max_zoom self.prob = prob @@ -650,7 +750,7 @@ def __init__(self, keys, prob=0.1, min_zoom=0.9, def randomize(self): self._do_transform = self.R.random_sample() < self.prob - if hasattr(self.min_zoom, '__iter__'): + if hasattr(self.min_zoom, "__iter__"): self._zoom = (self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)) else: self._zoom = self.R.uniform(self.min_zoom, self.max_zoom) diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index c1c49770ce..5583bd7473 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -37,7 +37,7 @@ class AsChannelFirst(Transform): """ def __init__(self, channel_dim=-1): - assert isinstance(channel_dim, int) and channel_dim >= -1, 'invalid channel dimension.' + assert isinstance(channel_dim, int) and channel_dim >= -1, "invalid channel dimension." self.channel_dim = channel_dim def __call__(self, img): @@ -60,7 +60,7 @@ class AsChannelLast(Transform): """ def __init__(self, channel_dim=0): - assert isinstance(channel_dim, int) and channel_dim >= -1, 'invalid channel dimension.' + assert isinstance(channel_dim, int) and channel_dim >= -1, "invalid channel dimension." self.channel_dim = channel_dim def __call__(self, img): @@ -96,7 +96,7 @@ class RepeatChannel(Transform): """ def __init__(self, repeats): - assert repeats > 0, 'repeats count must be greater than 0.' + assert repeats > 0, "repeats count must be greater than 0." self.repeats = repeats def __call__(self, img): @@ -116,7 +116,7 @@ def __init__(self, dtype=np.float32): self.dtype = dtype def __call__(self, img): - assert isinstance(img, np.ndarray), 'image must be numpy array.' + assert isinstance(img, np.ndarray), "image must be numpy array." return img.astype(self.dtype) @@ -155,7 +155,7 @@ def __init__(self, dim=None): Default: None (all dimensions of size 1 will be removed) """ if dim is not None: - assert isinstance(dim, int) and dim >= -1, 'invalid channel dimension.' + assert isinstance(dim, int) and dim >= -1, "invalid channel dimension." self.dim = dim def __call__(self, img): diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 35e6a2d138..4c420829df 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -18,8 +18,16 @@ import numpy as np from monai.transforms.compose import MapTransform -from monai.transforms.utility.array import AddChannel, AsChannelFirst, ToTensor, \ - AsChannelLast, CastToType, RepeatChannel, SqueezeDim, SimulateDelay +from monai.transforms.utility.array import ( + AddChannel, + AsChannelFirst, + ToTensor, + AsChannelLast, + CastToType, + RepeatChannel, + SqueezeDim, + SimulateDelay, +) class AsChannelFirstd(MapTransform): diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index 5c5cfd1035..c040741831 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -159,8 +159,9 @@ def one_hot(labels, num_classes): return onehot.reshape(tuple(labels.shape) + (num_classes,)).astype(labels.dtype) -def generate_pos_neg_label_crop_centers(label, size, num_samples, pos_ratio, image=None, - image_threshold=0, rand_state=np.random): +def generate_pos_neg_label_crop_centers( + label, size, num_samples, pos_ratio, image=None, image_threshold=0, rand_state=np.random +): """Generate valid sample locations based on image with option for specifying foreground ratio Valid: samples sitting entirely within image, expected input shape: [C, H, W, D] or [C, H, W] @@ -176,8 +177,8 @@ def generate_pos_neg_label_crop_centers(label, size, num_samples, pos_ratio, ima rand_state (random.RandomState): numpy randomState object to align with other modules. """ max_size = label.shape[1:] - assert len(max_size) == len(size), 'expected size does not match label dim.' - assert (np.subtract(max_size, size) >= 0).all(), 'proposed roi is larger than image itself.' + assert len(max_size) == len(size), "expected size does not match label dim." + assert (np.subtract(max_size, size) >= 0).all(), "proposed roi is larger than image itself." # Select subregion to assure valid roi valid_start = np.floor_divide(size, 2) @@ -199,9 +200,12 @@ def generate_pos_neg_label_crop_centers(label, size, num_samples, pos_ratio, ima if not len(fg_indices) or not len(bg_indices): if not len(fg_indices) and not len(bg_indices): - raise ValueError('no sampling location available.') - warnings.warn('N foreground {}, N background {}, unable to generate class balanced samples.'.format( - len(fg_indices), len(bg_indices))) + raise ValueError("no sampling location available.") + warnings.warn( + "N foreground {}, N background {}, unable to generate class balanced samples.".format( + len(fg_indices), len(bg_indices) + ) + ) pos_ratio = 0 if not len(fg_indices) else 1 centers = [] @@ -251,8 +255,8 @@ def create_grid(spatial_size, spacing=None, homogeneous=True, dtype=float): dtype (type): output grid data type. """ spacing = spacing or tuple(1.0 for _ in spatial_size) - ranges = [np.linspace(-(d - 1.) / 2. * s, (d - 1.) / 2. * s, int(d)) for d, s in zip(spatial_size, spacing)] - coords = np.asarray(np.meshgrid(*ranges, indexing='ij'), dtype=dtype) + ranges = [np.linspace(-(d - 1.0) / 2.0 * s, (d - 1.0) / 2.0 * s, int(d)) for d, s in zip(spatial_size, spacing)] + coords = np.asarray(np.meshgrid(*ranges, indexing="ij"), dtype=dtype) if not homogeneous: return coords return np.concatenate([coords, np.ones_like(coords[:1])]) @@ -266,9 +270,9 @@ def create_control_grid(spatial_shape, spacing, homogeneous=True, dtype=float): for d, s in zip(spatial_shape, spacing): d = int(d) if d % 2 == 0: - grid_shape.append(np.ceil((d - 1.) / (2. * s) + 0.5) * 2. + 2.) + grid_shape.append(np.ceil((d - 1.0) / (2.0 * s) + 0.5) * 2.0 + 2.0) else: - grid_shape.append(np.ceil((d - 1.) / (2. * s)) * 2. + 3.) + grid_shape.append(np.ceil((d - 1.0) / (2.0 * s)) * 2.0 + 3.0) return create_grid(grid_shape, spacing, homogeneous, dtype) @@ -286,37 +290,28 @@ def create_rotate(spatial_dims, radians): if spatial_dims == 2: if len(radians) >= 1: sin_, cos_ = np.sin(radians[0]), np.cos(radians[0]) - return np.array([[cos_, -sin_, 0.], [sin_, cos_, 0.], [0., 0., 1.]]) + return np.array([[cos_, -sin_, 0.0], [sin_, cos_, 0.0], [0.0, 0.0, 1.0]]) if spatial_dims == 3: affine = None if len(radians) >= 1: sin_, cos_ = np.sin(radians[0]), np.cos(radians[0]) - affine = np.array([ - [1., 0., 0., 0.], - [0., cos_, -sin_, 0.], - [0., sin_, cos_, 0.], - [0., 0., 0., 1.], - ]) + affine = np.array( + [[1.0, 0.0, 0.0, 0.0], [0.0, cos_, -sin_, 0.0], [0.0, sin_, cos_, 0.0], [0.0, 0.0, 0.0, 1.0]] + ) if len(radians) >= 2: sin_, cos_ = np.sin(radians[1]), np.cos(radians[1]) - affine = affine @ np.array([ - [cos_, 0.0, sin_, 0.], - [0., 1., 0., 0.], - [-sin_, 0., cos_, 0.], - [0., 0., 0., 1.], - ]) + affine = affine @ np.array( + [[cos_, 0.0, sin_, 0.0], [0.0, 1.0, 0.0, 0.0], [-sin_, 0.0, cos_, 0.0], [0.0, 0.0, 0.0, 1.0]] + ) if len(radians) >= 3: sin_, cos_ = np.sin(radians[2]), np.cos(radians[2]) - affine = affine @ np.array([ - [cos_, -sin_, 0., 0.], - [sin_, cos_, 0., 0.], - [0., 0., 1., 0.], - [0., 0., 0., 1.], - ]) + affine = affine @ np.array( + [[cos_, -sin_, 0.0, 0.0], [sin_, cos_, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] + ) return affine - raise ValueError('create_rotate got spatial_dims={}, radians={}.'.format(spatial_dims, radians)) + raise ValueError("create_rotate got spatial_dims={}, radians={}.".format(spatial_dims, radians)) def create_shear(spatial_dims, coefs): @@ -330,20 +325,18 @@ def create_shear(spatial_dims, coefs): if spatial_dims == 2: while len(coefs) < 2: coefs.append(0.0) - return np.array([ - [1, coefs[0], 0.], - [coefs[1], 1., 0.], - [0., 0., 1.], - ]) + return np.array([[1, coefs[0], 0.0], [coefs[1], 1.0, 0.0], [0.0, 0.0, 1.0]]) if spatial_dims == 3: while len(coefs) < 6: coefs.append(0.0) - return np.array([ - [1., coefs[0], coefs[1], 0.], - [coefs[2], 1., coefs[3], 0.], - [coefs[4], coefs[5], 1., 0.], - [0., 0., 0., 1.], - ]) + return np.array( + [ + [1.0, coefs[0], coefs[1], 0.0], + [coefs[2], 1.0, coefs[3], 0.0], + [coefs[4], coefs[5], 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) raise NotImplementedError @@ -356,8 +349,8 @@ def create_scale(spatial_dims, scaling_factor): """ scaling_factor = list(ensure_tuple(scaling_factor)) while len(scaling_factor) < spatial_dims: - scaling_factor.append(1.) - return np.diag(scaling_factor[:spatial_dims] + [1.]) + scaling_factor.append(1.0) + return np.diag(scaling_factor[:spatial_dims] + [1.0]) def create_translate(spatial_dims, shift): @@ -387,7 +380,7 @@ def generate_spatial_bounding_box(img, select_fn=lambda x: x > 0, channel_indexe of image. if None, select foreground on the whole image. margin (int): add margin to all dims of the bounding box. """ - assert isinstance(margin, int), 'margin must be int type.' + assert isinstance(margin, int), "margin must be int type." data = img[[*(ensure_tuple(channel_indexes))]] if channel_indexes is not None else img data = np.any(select_fn(data), axis=0) nonzero_idx = np.nonzero(data) @@ -395,7 +388,7 @@ def generate_spatial_bounding_box(img, select_fn=lambda x: x > 0, channel_indexe box_start = list() box_end = list() for i in range(data.ndim): - assert len(nonzero_idx[i]) > 0, 'did not find nonzero index at spatial dim {}'.format(i) + assert len(nonzero_idx[i]) > 0, "did not find nonzero index at spatial dim {}".format(i) box_start.append(max(0, np.min(nonzero_idx[i]) - margin)) box_end.append(min(data.shape[i], np.max(nonzero_idx[i]) + margin + 1)) return box_start, box_end diff --git a/monai/utils/aliases.py b/monai/utils/aliases.py index ba474eb260..2e163d4725 100644 --- a/monai/utils/aliases.py +++ b/monai/utils/aliases.py @@ -35,7 +35,7 @@ def _outer(obj): GlobalAliases[n] = obj # set the member list __aliases__ to contain the alias names defined by the decorator for `obj` - obj.__aliases__ = getattr(obj, '__aliases__', ()) + tuple(names) + obj.__aliases__ = getattr(obj, "__aliases__", ()) + tuple(names) return obj @@ -81,7 +81,10 @@ def resolve_name(name): if len(foundmods) > 1: # found multiple declarations with the same name modnames = [m.__name__ for m in foundmods] - msg = "Multiple modules (%r) with declaration name %r found, resolution is ambiguous" % (modnames, name) + msg = "Multiple modules (%r) with declaration name %r found, resolution is ambiguous" % ( + modnames, + name, + ) raise ValueError(msg) else: mods = list(foundmods) diff --git a/monai/utils/misc.py b/monai/utils/misc.py index 1e663622d2..685bdcff3b 100644 --- a/monai/utils/misc.py +++ b/monai/utils/misc.py @@ -72,9 +72,9 @@ def process_bar(index, count, bar_len=30, newline=False): bar_len(int): the total length of the bar on screen, default is 30 char. newline (bool): whether to print in a new line for every index. """ - end = '\r' if newline is False else '\r\n' + end = "\r" if newline is False else "\r\n" filled_len = int(bar_len * index // count) - bar = '[' + '=' * filled_len + ' ' * (bar_len - filled_len) + ']' + bar = "[" + "=" * filled_len + " " * (bar_len - filled_len) + "]" print("{}/{} {:s} ".format(index, count, bar), end=end) if index == count: - print('') + print("") diff --git a/monai/utils/module.py b/monai/utils/module.py index a6383f10b7..a7a5b91a9c 100644 --- a/monai/utils/module.py +++ b/monai/utils/module.py @@ -27,7 +27,7 @@ def _inner(obj): setattr(mod, obj.__name__, obj) # add the aliases for `obj` to the target module - for alias in getattr(obj, '__aliases__', ()): + for alias in getattr(obj, "__aliases__", ()): if not hasattr(mod, alias): setattr(mod, alias, obj) diff --git a/monai/visualize/img2tensorboard.py b/monai/visualize/img2tensorboard.py index 8d8332231c..0b8ffe17ef 100644 --- a/monai/visualize/img2tensorboard.py +++ b/monai/visualize/img2tensorboard.py @@ -27,33 +27,24 @@ def _image3_animated_gif(imp, scale_factor=1): """ (tag, ims) = imp - ims = [ - (np.asarray((ims[:, :, i])) * scale_factor).astype(np.uint8) - for i in range(ims.shape[2]) - ] + ims = [(np.asarray((ims[:, :, i])) * scale_factor).astype(np.uint8) for i in range(ims.shape[2])] ims = [GifImage.fromarray(im) for im in ims] - img_str = b'' + img_str = b"" for b_data in PIL.GifImagePlugin.getheader(ims[0])[0]: img_str += b_data - img_str += b'\x21\xFF\x0B\x4E\x45\x54\x53\x43\x41\x50' \ - b'\x45\x32\x2E\x30\x03\x01\x00\x00\x00' + img_str += b"\x21\xFF\x0B\x4E\x45\x54\x53\x43\x41\x50" b"\x45\x32\x2E\x30\x03\x01\x00\x00\x00" for i in ims: for b_data in PIL.GifImagePlugin.getdata(i): img_str += b_data - img_str += b'\x3B' - summary_image_str = summary_pb2.Summary.Image( - height=10, width=10, colorspace=1, encoded_image_string=img_str) + img_str += b"\x3B" + summary_image_str = summary_pb2.Summary.Image(height=10, width=10, colorspace=1, encoded_image_string=img_str) image_summary = summary_pb2.Summary.Value(tag=tag, image=summary_image_str) return summary_pb2.Summary(value=[image_summary]) -def make_animated_gif_summary(tag, - tensor, - max_out=3, - animation_axes=(3,), - image_axes=(1, 2), - other_indices=None, - scale_factor=1): +def make_animated_gif_summary( + tag, tensor, max_out=3, animation_axes=(3,), image_axes=(1, 2), other_indices=None, scale_factor=1 +): """Creates an animated gif out of an image tensor and returns Summary. Args: @@ -68,9 +59,9 @@ def make_animated_gif_summary(tag, """ if max_out == 1: - suffix = '/image' + suffix = "/image" else: - suffix = '/image/{}' + suffix = "/image/{}" if other_indices is None: other_indices = {} axis_order = [0] + animation_axes + image_axes @@ -85,9 +76,7 @@ def make_animated_gif_summary(tag, tensor = tensor[tuple(slicing)] for it_i in range(min(max_out, list(tensor.shape)[0])): - inp = [ - tag + suffix.format(it_i), tensor[it_i, :, :, :] - ] + inp = [tag + suffix.format(it_i), tensor[it_i, :, :, :]] summary_op = _image3_animated_gif(inp, scale_factor) return summary_op @@ -104,9 +93,12 @@ def add_animated_gif(writer, tag, image_tensor, max_out, scale_factor, global_st scale it to displayable range global_step: Global step value to record """ - writer._get_file_writer().add_summary(make_animated_gif_summary(tag, image_tensor, max_out=max_out, - animation_axes=[1], image_axes=[2, 3], - scale_factor=scale_factor), global_step) + writer._get_file_writer().add_summary( + make_animated_gif_summary( + tag, image_tensor, max_out=max_out, animation_axes=[1], image_axes=[2, 3], scale_factor=scale_factor + ), + global_step, + ) def add_animated_gif_no_channels(writer, tag, image_tensor, max_out, scale_factor, global_step=None): @@ -121,13 +113,20 @@ def add_animated_gif_no_channels(writer, tag, image_tensor, max_out, scale_facto scale it to displayable range global_step: Global step value to record """ - writer._get_file_writer().add_summary(make_animated_gif_summary(tag, image_tensor.unsqueeze(0), - max_out=max_out, animation_axes=[1], - image_axes=[2, 3], scale_factor=scale_factor), - global_step) - - -def plot_2d_or_3d_image(data, step, writer, index=0, max_channels=1, max_frames=64, tag='output'): + writer._get_file_writer().add_summary( + make_animated_gif_summary( + tag, + image_tensor.unsqueeze(0), + max_out=max_out, + animation_axes=[1], + image_axes=[2, 3], + scale_factor=scale_factor, + ), + global_step, + ) + + +def plot_2d_or_3d_image(data, step, writer, index=0, max_channels=1, max_frames=64, tag="output"): """Plot 2D or 3D image on the TensorBoard, 3D image will be converted to GIF image. Note: @@ -143,31 +142,31 @@ def plot_2d_or_3d_image(data, step, writer, index=0, max_channels=1, max_frames= max_frames (int): number of frames for 2D-t plot. tag (str): tag of the plotted image on TensorBoard. """ - assert isinstance(writer, SummaryWriter) is True, 'must provide a TensorBoard SummaryWriter.' + assert isinstance(writer, SummaryWriter) is True, "must provide a TensorBoard SummaryWriter." d = data[index] if torch.is_tensor(d): d = d.detach().cpu().numpy() if d.ndim == 2: d = rescale_array(d, 0, 1) - dataformats = 'HW' - writer.add_image('{}_{}'.format(tag, dataformats), d, step, dataformats=dataformats) + dataformats = "HW" + writer.add_image("{}_{}".format(tag, dataformats), d, step, dataformats=dataformats) return if d.ndim == 3: if d.shape[0] == 3 and max_channels == 3: # RGB - dataformats = 'CHW' - writer.add_image('{}_{}'.format(tag, dataformats), d, step, dataformats=dataformats) + dataformats = "CHW" + writer.add_image("{}_{}".format(tag, dataformats), d, step, dataformats=dataformats) return for j, d2 in enumerate(d[:max_channels]): d2 = rescale_array(d2, 0, 1) - dataformats = 'HW' - writer.add_image('{}_{}_{}'.format(tag, dataformats, j), d2, step, dataformats=dataformats) + dataformats = "HW" + writer.add_image("{}_{}_{}".format(tag, dataformats, j), d2, step, dataformats=dataformats) return if d.ndim >= 4: spatial = d.shape[-3:] for j, d3 in enumerate(d.reshape([-1] + list(spatial))[:max_channels]): d3 = rescale_array(d3, 0, 255) - add_animated_gif(writer, '{}_HWD_{}'.format(tag, j), d3[None], max_frames, 1.0, step) + add_animated_gif(writer, "{}_HWD_{}".format(tag, j), d3[None], max_frames, 1.0, step) return diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..7a70bae892 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,23 @@ +[tool.black] +line-length = 120 +target-version = ['py36', 'py37', 'py38'] +include = '\.pyi?$' +exclude = ''' +( + /( + # exclude a few common directories in the root of the project + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + )/ + # also separately exclude a file named versioneer.py + | monai/_version.py +) +''' diff --git a/setup.py b/setup.py index bbd10748d6..abdacffdb9 100644 --- a/setup.py +++ b/setup.py @@ -13,8 +13,10 @@ import versioneer -if __name__ == '__main__': - setup(version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), - packages=find_packages(exclude=('docs', 'examples', 'tests')), - zip_safe=True) +if __name__ == "__main__": + setup( + version=versioneer.get_version(), + cmdclass=versioneer.get_cmdclass(), + packages=find_packages(exclude=("docs", "examples", "tests")), + zip_safe=True, + ) diff --git a/tests/test_adaptors.py b/tests/test_adaptors.py index d089290d04..a0281051f7 100644 --- a/tests/test_adaptors.py +++ b/tests/test_adaptors.py @@ -17,9 +17,7 @@ class TestAdaptors(unittest.TestCase): - def test_function_signature(self): - def foo(image, label=None, *a, **kw): pass @@ -29,124 +27,110 @@ def test_single_in_single_out(self): def foo(image): return image * 2 - it = itertools.product( - ['image', ['image']], - [None, 'image', ['image'], {'image': 'image'}] - ) + it = itertools.product(["image", ["image"]], [None, "image", ["image"], {"image": "image"}]) for i in it: - d = {'image': 2} + d = {"image": 2} dres = adaptor(foo, i[0], i[1])(d) - self.assertEqual(dres['image'], 4) + self.assertEqual(dres["image"], 4) - d = {'image': 2} - dres = adaptor(foo, 'image')(d) - self.assertEqual(dres['image'], 4) + d = {"image": 2} + dres = adaptor(foo, "image")(d) + self.assertEqual(dres["image"], 4) - d = {'image': 2} - dres = adaptor(foo, 'image', 'image')(d) - self.assertEqual(dres['image'], 4) + d = {"image": 2} + dres = adaptor(foo, "image", "image")(d) + self.assertEqual(dres["image"], 4) - d = {'image': 2} - dres = adaptor(foo, 'image', {'image': 'image'})(d) - self.assertEqual(dres['image'], 4) + d = {"image": 2} + dres = adaptor(foo, "image", {"image": "image"})(d) + self.assertEqual(dres["image"], 4) - d = {'img': 2} - dres = adaptor(foo, 'img', {'img': 'image'})(d) - self.assertEqual(dres['img'], 4) + d = {"img": 2} + dres = adaptor(foo, "img", {"img": "image"})(d) + self.assertEqual(dres["img"], 4) - d = {'img': 2} - dres = adaptor(foo, ['img'], {'img': 'image'})(d) - self.assertEqual(dres['img'], 4) + d = {"img": 2} + dres = adaptor(foo, ["img"], {"img": "image"})(d) + self.assertEqual(dres["img"], 4) def test_multi_in_single_out(self): def foo(image, label): return image * label - it = itertools.product( - ['image', ['image']], - [None, ['image', 'label'], {'image': 'image', 'label': 'label'}] - ) + it = itertools.product(["image", ["image"]], [None, ["image", "label"], {"image": "image", "label": "label"}]) for i in it: - d = {'image': 2, 'label': 3} + d = {"image": 2, "label": 3} dres = adaptor(foo, i[0], i[1])(d) - self.assertEqual(dres['image'], 6) - self.assertEqual(dres['label'], 3) + self.assertEqual(dres["image"], 6) + self.assertEqual(dres["label"], 3) it = itertools.product( - ['newimage', ['newimage']], - [None, ['image', 'label'], {'image': 'image', 'label': 'label'}] + ["newimage", ["newimage"]], [None, ["image", "label"], {"image": "image", "label": "label"}] ) for i in it: - d = {'image': 2, 'label': 3} + d = {"image": 2, "label": 3} dres = adaptor(foo, i[0], i[1])(d) - self.assertEqual(dres['image'], 2) - self.assertEqual(dres['label'], 3) - self.assertEqual(dres['newimage'], 6) + self.assertEqual(dres["image"], 2) + self.assertEqual(dres["label"], 3) + self.assertEqual(dres["newimage"], 6) - it = itertools.product( - ['img', ['img']], - [{'img': 'image', 'lbl': 'label'}] - ) + it = itertools.product(["img", ["img"]], [{"img": "image", "lbl": "label"}]) for i in it: - d = {'img': 2, 'lbl': 3} + d = {"img": 2, "lbl": 3} dres = adaptor(foo, i[0], i[1])(d) - self.assertEqual(dres['img'], 6) - self.assertEqual(dres['lbl'], 3) + self.assertEqual(dres["img"], 6) + self.assertEqual(dres["lbl"], 3) def test_default_arg_single_out(self): def foo(a, b=2): return a * b - d = {'a': 5} - dres = adaptor(foo, 'c')(d) - self.assertEqual(dres['c'], 10) + d = {"a": 5} + dres = adaptor(foo, "c")(d) + self.assertEqual(dres["c"], 10) - d = {'b': 5} + d = {"b": 5} with self.assertRaises(TypeError): - dres = adaptor(foo, 'c')(d) + dres = adaptor(foo, "c")(d) def test_multi_out(self): def foo(a, b): return a * b, a / b - d = {'a': 3, 'b': 4} - dres = adaptor(foo, ['c', 'd'])(d) - self.assertEqual(dres['c'], 12) - self.assertEqual(dres['d'], 3 / 4) + d = {"a": 3, "b": 4} + dres = adaptor(foo, ["c", "d"])(d) + self.assertEqual(dres["c"], 12) + self.assertEqual(dres["d"], 3 / 4) def test_dict_out(self): def foo(a): - return {'a': a * 2} + return {"a": a * 2} - d = {'a': 2} - dres = adaptor(foo, {'a': 'a'})(d) - self.assertEqual(dres['a'], 4) + d = {"a": 2} + dres = adaptor(foo, {"a": "a"})(d) + self.assertEqual(dres["a"], 4) - d = {'b': 2} - dres = adaptor(foo, {'a': 'b'}, {'b': 'a'})(d) - self.assertEqual(dres['b'], 4) + d = {"b": 2} + dres = adaptor(foo, {"a": "b"}, {"b": "a"})(d) + self.assertEqual(dres["b"], 4) class TestApplyAlias(unittest.TestCase): - def test_apply_alias(self): - def foo(d): - d['x'] *= 2 + d["x"] *= 2 return d - d = {'a': 1, 'b': 3} - result = apply_alias(foo, {'b': 'x'})(d) - self.assertDictEqual({'a': 1, 'b': 6}, result) + d = {"a": 1, "b": 3} + result = apply_alias(foo, {"b": "x"})(d) + self.assertDictEqual({"a": 1, "b": 6}, result) class TestToKwargs(unittest.TestCase): - def test_to_kwargs(self): - def foo(**kwargs): results = {k: v * 2 for k, v in kwargs.items()} return results @@ -155,10 +139,10 @@ def compose_like(fn, data): data = fn(data) return data - d = {'a': 1, 'b': 2} + d = {"a": 1, "b": 2} actual = compose_like(to_kwargs(foo), d) - self.assertDictEqual(actual, {'a': 2, 'b': 4}) + self.assertDictEqual(actual, {"a": 2, "b": 4}) with self.assertRaises(TypeError): actual = compose_like(foo, d) diff --git a/tests/test_add_channeld.py b/tests/test_add_channeld.py index 9b7b37c451..12363d7e9c 100644 --- a/tests/test_add_channeld.py +++ b/tests/test_add_channeld.py @@ -15,23 +15,19 @@ from monai.transforms import AddChanneld TEST_CASE_1 = [ - {'keys': ['img', 'seg']}, - { - 'img': np.array([[0, 1], [1, 2]]), - 'seg': np.array([[0, 1], [1, 2]]) - }, + {"keys": ["img", "seg"]}, + {"img": np.array([[0, 1], [1, 2]]), "seg": np.array([[0, 1], [1, 2]])}, (1, 2, 2), ] class TestAddChanneld(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_shape(self, input_param, input_data, expected_shape): result = AddChanneld(**input_param)(input_data) - self.assertEqual(result['img'].shape, expected_shape) - self.assertEqual(result['seg'].shape, expected_shape) + self.assertEqual(result["img"].shape, expected_shape) + self.assertEqual(result["seg"].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_adjust_contrast.py b/tests/test_adjust_contrast.py index 47be3c56c9..287aeca34a 100644 --- a/tests/test_adjust_contrast.py +++ b/tests/test_adjust_contrast.py @@ -15,21 +15,14 @@ from monai.transforms import AdjustContrast from tests.utils import NumpyImageTestCase2D -TEST_CASE_1 = [ - 1.0 -] +TEST_CASE_1 = [1.0] -TEST_CASE_2 = [ - 0.5 -] +TEST_CASE_2 = [0.5] -TEST_CASE_3 = [ - 4.5 -] +TEST_CASE_3 = [4.5] class TestAdjustContrast(NumpyImageTestCase2D): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_correct_results(self, gamma): adjuster = AdjustContrast(gamma=gamma) @@ -44,5 +37,5 @@ def test_correct_results(self, gamma): np.testing.assert_allclose(expected, result, rtol=1e-05) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_adjust_contrastd.py b/tests/test_adjust_contrastd.py index bd8d23180b..3111ab51ea 100644 --- a/tests/test_adjust_contrastd.py +++ b/tests/test_adjust_contrastd.py @@ -15,25 +15,18 @@ from monai.transforms import AdjustContrastd from tests.utils import NumpyImageTestCase2D -TEST_CASE_1 = [ - 1.0 -] +TEST_CASE_1 = [1.0] -TEST_CASE_2 = [ - 0.5 -] +TEST_CASE_2 = [0.5] -TEST_CASE_3 = [ - 4.5 -] +TEST_CASE_3 = [4.5] class TestAdjustContrastd(NumpyImageTestCase2D): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_correct_results(self, gamma): - adjuster = AdjustContrastd('img', gamma=gamma) - result = adjuster({'img': self.imt}) + adjuster = AdjustContrastd("img", gamma=gamma) + result = adjuster({"img": self.imt}) if gamma == 1.0: expected = self.imt else: @@ -41,8 +34,8 @@ def test_correct_results(self, gamma): img_min = self.imt.min() img_range = self.imt.max() - img_min expected = np.power(((self.imt - img_min) / float(img_range + epsilon)), gamma) * img_range + img_min - np.testing.assert_allclose(expected, result['img'], rtol=1e-05) + np.testing.assert_allclose(expected, result["img"], rtol=1e-05) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_affine.py b/tests/test_affine.py index 22939dd863..188450d1c1 100644 --- a/tests/test_affine.py +++ b/tests/test_affine.py @@ -19,36 +19,47 @@ TEST_CASES = [ [ - dict(padding_mode='zeros', as_tensor_output=False, device=None), - {'img': np.arange(4).reshape((1, 2, 2)), 'spatial_size': (4, 4)}, - np.array([[[0., 0., 0., 0.], [0., 0., 0.25, 0.], [0., 0.5, 0.75, 0.], [0., 0., 0., 0.]]]) + dict(padding_mode="zeros", as_tensor_output=False, device=None), + {"img": np.arange(4).reshape((1, 2, 2)), "spatial_size": (4, 4)}, + np.array([[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.25, 0.0], [0.0, 0.5, 0.75, 0.0], [0.0, 0.0, 0.0, 0.0]]]), ], [ - dict(rotate_params=[np.pi / 2], padding_mode='zeros', as_tensor_output=False, device=None), - {'img': np.arange(4).reshape((1, 2, 2)), 'spatial_size': (4, 4)}, - np.array([[[0., 0., 0., 0.], [0., 0.5, 0., 0.], [0., 0.75, 0.25, 0.], [0., 0., 0., 0.]]]) + dict(rotate_params=[np.pi / 2], padding_mode="zeros", as_tensor_output=False, device=None), + {"img": np.arange(4).reshape((1, 2, 2)), "spatial_size": (4, 4)}, + np.array([[[0.0, 0.0, 0.0, 0.0], [0.0, 0.5, 0.0, 0.0], [0.0, 0.75, 0.25, 0.0], [0.0, 0.0, 0.0, 0.0]]]), ], [ - dict(padding_mode='zeros', as_tensor_output=False, device=None), - {'img': np.arange(8).reshape((1, 2, 2, 2)), 'spatial_size': (4, 4, 4)}, - np.array([[[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]], - [[0., 0., 0., 0.], [0., 0., 0.125, 0.], [0., 0.25, 0.375, 0.], [0., 0., 0., 0.]], - [[0., 0., 0., 0.], [0., 0.5, 0.625, 0.], [0., 0.75, 0.875, 0.], [0., 0., 0., 0.]], - [[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]]]) + dict(padding_mode="zeros", as_tensor_output=False, device=None), + {"img": np.arange(8).reshape((1, 2, 2, 2)), "spatial_size": (4, 4, 4)}, + np.array( + [ + [ + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.125, 0.0], [0.0, 0.25, 0.375, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.5, 0.625, 0.0], [0.0, 0.75, 0.875, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + ] + ] + ), ], [ - dict(rotate_params=[np.pi / 2], padding_mode='zeros', as_tensor_output=False, device=None), - {'img': np.arange(8).reshape((1, 2, 2, 2)), 'spatial_size': (4, 4, 4)}, - np.array([[[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]], - [[0., 0., 0., 0.], [0., 0.25, 0., 0.], [0., 0.375, 0.125, 0.], [0., 0., 0., 0.]], - [[0., 0., 0., 0.], [0., 0.75, 0.5, 0.], [0., 0.875, 0.625, 0.], [0., 0., 0., 0.]], - [[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]]]) + dict(rotate_params=[np.pi / 2], padding_mode="zeros", as_tensor_output=False, device=None), + {"img": np.arange(8).reshape((1, 2, 2, 2)), "spatial_size": (4, 4, 4)}, + np.array( + [ + [ + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.25, 0.0, 0.0], [0.0, 0.375, 0.125, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.75, 0.5, 0.0], [0.0, 0.875, 0.625, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + ] + ] + ), ], ] class TestAffine(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_affine(self, input_param, input_data, expected_val): g = Affine(**input_param) @@ -60,5 +71,5 @@ def test_affine(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_affine_grid.py b/tests/test_affine_grid.py index 6af10604b3..8a24501f22 100644 --- a/tests/test_affine_grid.py +++ b/tests/test_affine_grid.py @@ -18,48 +18,77 @@ from monai.transforms import AffineGrid TEST_CASES = [ - [{'as_tensor_output': False, 'device': torch.device('cpu:0')}, {'spatial_size': (2, 2)}, - np.array([[[-0.5, -0.5], [0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]], [[1., 1.], [1., 1.]]])], - [{'as_tensor_output': True, 'device': None}, {'spatial_size': (2, 2)}, - torch.tensor([[[-0.5, -0.5], [0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]], [[1., 1.], [1., 1.]]])], - [{'as_tensor_output': False, 'device': None}, {'grid': np.ones((3, 3, 3))}, - np.ones((3, 3, 3))], - [{'as_tensor_output': True, 'device': torch.device('cpu:0')}, {'grid': np.ones((3, 3, 3))}, - torch.ones((3, 3, 3))], - [{'as_tensor_output': False, 'device': None}, {'grid': torch.ones((3, 3, 3))}, - np.ones((3, 3, 3))], - [{'as_tensor_output': True, 'device': torch.device('cpu:0')}, {'grid': torch.ones((3, 3, 3))}, - torch.ones((3, 3, 3))], - [{'rotate_params': (1., 1.), 'scale_params': (-20, 10), 'as_tensor_output': True, 'device': torch.device('cpu:0')}, - {'grid': torch.ones((3, 3, 3))}, - torch.tensor([[[-19.2208, -19.2208, -19.2208], [-19.2208, -19.2208, -19.2208], [-19.2208, -19.2208, -19.2208]], - [[-11.4264, -11.4264, -11.4264], [-11.4264, -11.4264, -11.4264], [-11.4264, -11.4264, -11.4264]], - [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]])], + [ + {"as_tensor_output": False, "device": torch.device("cpu:0")}, + {"spatial_size": (2, 2)}, + np.array([[[-0.5, -0.5], [0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]], [[1.0, 1.0], [1.0, 1.0]]]), + ], + [ + {"as_tensor_output": True, "device": None}, + {"spatial_size": (2, 2)}, + torch.tensor([[[-0.5, -0.5], [0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]], [[1.0, 1.0], [1.0, 1.0]]]), + ], + [{"as_tensor_output": False, "device": None}, {"grid": np.ones((3, 3, 3))}, np.ones((3, 3, 3))], + [{"as_tensor_output": True, "device": torch.device("cpu:0")}, {"grid": np.ones((3, 3, 3))}, torch.ones((3, 3, 3))], + [{"as_tensor_output": False, "device": None}, {"grid": torch.ones((3, 3, 3))}, np.ones((3, 3, 3))], + [ + {"as_tensor_output": True, "device": torch.device("cpu:0")}, + {"grid": torch.ones((3, 3, 3))}, + torch.ones((3, 3, 3)), + ], [ { - 'rotate_params': (1., 1., 1.), 'scale_params': (-20, 10), 'as_tensor_output': True, 'device': - torch.device('cpu:0') + "rotate_params": (1.0, 1.0), + "scale_params": (-20, 10), + "as_tensor_output": True, + "device": torch.device("cpu:0"), }, - {'grid': torch.ones((4, 3, 3, 3))}, - torch.tensor([[[[-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435]], - [[-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435]], - [[-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435]]], - [[[-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381]], - [[-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381]], - [[-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, - -20.2381]]], - [[[-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844]], - [[-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844]], - [[-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844]]], - [[[1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000]], - [[1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000]], - [[1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000]]]]), + {"grid": torch.ones((3, 3, 3))}, + torch.tensor( + [ + [[-19.2208, -19.2208, -19.2208], [-19.2208, -19.2208, -19.2208], [-19.2208, -19.2208, -19.2208]], + [[-11.4264, -11.4264, -11.4264], [-11.4264, -11.4264, -11.4264], [-11.4264, -11.4264, -11.4264]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ), + ], + [ + { + "rotate_params": (1.0, 1.0, 1.0), + "scale_params": (-20, 10), + "as_tensor_output": True, + "device": torch.device("cpu:0"), + }, + {"grid": torch.ones((4, 3, 3, 3))}, + torch.tensor( + [ + [ + [[-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435]], + [[-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435]], + [[-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435], [-9.5435, -9.5435, -9.5435]], + ], + [ + [[-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381]], + [[-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381]], + [[-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381], [-20.2381, -20.2381, -20.2381]], + ], + [ + [[-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844]], + [[-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844]], + [[-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844], [-0.5844, -0.5844, -0.5844]], + ], + [ + [[1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000]], + [[1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000]], + [[1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000], [1.0000, 1.0000, 1.0000]], + ], + ] + ), ], ] class TestAffineGrid(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_affine_grid(self, input_param, input_data, expected_val): g = AffineGrid(**input_param) @@ -71,5 +100,5 @@ def test_affine_grid(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_as_channel_first.py b/tests/test_as_channel_first.py index f194a6d5d4..ac66aa9518 100644 --- a/tests/test_as_channel_first.py +++ b/tests/test_as_channel_first.py @@ -14,30 +14,14 @@ from parameterized import parameterized from monai.transforms import AsChannelFirst -TEST_CASE_1 = [ - { - 'channel_dim': -1 - }, - (4, 1, 2, 3) -] - -TEST_CASE_2 = [ - { - 'channel_dim': 3 - }, - (4, 1, 2, 3) -] - -TEST_CASE_3 = [ - { - 'channel_dim': 2 - }, - (3, 1, 2, 4) -] +TEST_CASE_1 = [{"channel_dim": -1}, (4, 1, 2, 3)] +TEST_CASE_2 = [{"channel_dim": 3}, (4, 1, 2, 3)] + +TEST_CASE_3 = [{"channel_dim": 2}, (3, 1, 2, 4)] -class TestAsChannelFirst(unittest.TestCase): +class TestAsChannelFirst(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_shape(self, input_param, expected_shape): test_data = np.random.randint(0, 2, size=[1, 2, 3, 4]) @@ -45,5 +29,5 @@ def test_shape(self, input_param, expected_shape): self.assertTupleEqual(result.shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_as_channel_firstd.py b/tests/test_as_channel_firstd.py index 79dc1d2e8c..748969a75a 100644 --- a/tests/test_as_channel_firstd.py +++ b/tests/test_as_channel_firstd.py @@ -14,45 +14,26 @@ from parameterized import parameterized from monai.transforms import AsChannelFirstd -TEST_CASE_1 = [ - { - 'keys': ['image', 'label', 'extra'], - 'channel_dim': -1 - }, - (4, 1, 2, 3) -] - -TEST_CASE_2 = [ - { - 'keys': ['image', 'label', 'extra'], - 'channel_dim': 3 - }, - (4, 1, 2, 3) -] - -TEST_CASE_3 = [ - { - 'keys': ['image', 'label', 'extra'], - 'channel_dim': 2 - }, - (3, 1, 2, 4) -] +TEST_CASE_1 = [{"keys": ["image", "label", "extra"], "channel_dim": -1}, (4, 1, 2, 3)] +TEST_CASE_2 = [{"keys": ["image", "label", "extra"], "channel_dim": 3}, (4, 1, 2, 3)] + +TEST_CASE_3 = [{"keys": ["image", "label", "extra"], "channel_dim": 2}, (3, 1, 2, 4)] -class TestAsChannelFirstd(unittest.TestCase): +class TestAsChannelFirstd(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_shape(self, input_param, expected_shape): test_data = { - 'image': np.random.randint(0, 2, size=[1, 2, 3, 4]), - 'label': np.random.randint(0, 2, size=[1, 2, 3, 4]), - 'extra': np.random.randint(0, 2, size=[1, 2, 3, 4]) + "image": np.random.randint(0, 2, size=[1, 2, 3, 4]), + "label": np.random.randint(0, 2, size=[1, 2, 3, 4]), + "extra": np.random.randint(0, 2, size=[1, 2, 3, 4]), } result = AsChannelFirstd(**input_param)(test_data) - self.assertTupleEqual(result['image'].shape, expected_shape) - self.assertTupleEqual(result['label'].shape, expected_shape) - self.assertTupleEqual(result['extra'].shape, expected_shape) + self.assertTupleEqual(result["image"].shape, expected_shape) + self.assertTupleEqual(result["label"].shape, expected_shape) + self.assertTupleEqual(result["extra"].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_as_channel_last.py b/tests/test_as_channel_last.py index b15175eadf..02386e702d 100644 --- a/tests/test_as_channel_last.py +++ b/tests/test_as_channel_last.py @@ -14,30 +14,14 @@ from parameterized import parameterized from monai.transforms import AsChannelLast -TEST_CASE_1 = [ - { - 'channel_dim': 0 - }, - (2, 3, 4, 1) -] - -TEST_CASE_2 = [ - { - 'channel_dim': 1 - }, - (1, 3, 4, 2) -] - -TEST_CASE_3 = [ - { - 'channel_dim': 3 - }, - (1, 2, 3, 4) -] +TEST_CASE_1 = [{"channel_dim": 0}, (2, 3, 4, 1)] +TEST_CASE_2 = [{"channel_dim": 1}, (1, 3, 4, 2)] + +TEST_CASE_3 = [{"channel_dim": 3}, (1, 2, 3, 4)] -class TestAsChannelLast(unittest.TestCase): +class TestAsChannelLast(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_shape(self, input_param, expected_shape): test_data = np.random.randint(0, 2, size=[1, 2, 3, 4]) @@ -45,5 +29,5 @@ def test_shape(self, input_param, expected_shape): self.assertTupleEqual(result.shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_as_channel_lastd.py b/tests/test_as_channel_lastd.py index 2ff4bda945..d30aca5a24 100644 --- a/tests/test_as_channel_lastd.py +++ b/tests/test_as_channel_lastd.py @@ -14,45 +14,26 @@ from parameterized import parameterized from monai.transforms import AsChannelLastd -TEST_CASE_1 = [ - { - 'keys': ['image', 'label', 'extra'], - 'channel_dim': 0 - }, - (2, 3, 4, 1) -] - -TEST_CASE_2 = [ - { - 'keys': ['image', 'label', 'extra'], - 'channel_dim': 1 - }, - (1, 3, 4, 2) -] - -TEST_CASE_3 = [ - { - 'keys': ['image', 'label', 'extra'], - 'channel_dim': 3 - }, - (1, 2, 3, 4) -] +TEST_CASE_1 = [{"keys": ["image", "label", "extra"], "channel_dim": 0}, (2, 3, 4, 1)] +TEST_CASE_2 = [{"keys": ["image", "label", "extra"], "channel_dim": 1}, (1, 3, 4, 2)] + +TEST_CASE_3 = [{"keys": ["image", "label", "extra"], "channel_dim": 3}, (1, 2, 3, 4)] -class TestAsChannelLastd(unittest.TestCase): +class TestAsChannelLastd(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_shape(self, input_param, expected_shape): test_data = { - 'image': np.random.randint(0, 2, size=[1, 2, 3, 4]), - 'label': np.random.randint(0, 2, size=[1, 2, 3, 4]), - 'extra': np.random.randint(0, 2, size=[1, 2, 3, 4]) + "image": np.random.randint(0, 2, size=[1, 2, 3, 4]), + "label": np.random.randint(0, 2, size=[1, 2, 3, 4]), + "extra": np.random.randint(0, 2, size=[1, 2, 3, 4]), } result = AsChannelLastd(**input_param)(test_data) - self.assertTupleEqual(result['image'].shape, expected_shape) - self.assertTupleEqual(result['label'].shape, expected_shape) - self.assertTupleEqual(result['extra'].shape, expected_shape) + self.assertTupleEqual(result["image"].shape, expected_shape) + self.assertTupleEqual(result["label"].shape, expected_shape) + self.assertTupleEqual(result["extra"].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_cachedataset.py b/tests/test_cachedataset.py index 14e32e1234..18c67e0ef6 100644 --- a/tests/test_cachedataset.py +++ b/tests/test_cachedataset.py @@ -19,49 +19,45 @@ from monai.data import CacheDataset from monai.transforms import Compose, LoadNiftid -TEST_CASE_1 = [ - (128, 128, 128) -] +TEST_CASE_1 = [(128, 128, 128)] -class TestCacheDataset(unittest.TestCase): +class TestCacheDataset(unittest.TestCase): @parameterized.expand([TEST_CASE_1]) def test_shape(self, expected_shape): test_image = nib.Nifti1Image(np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4)) tempdir = tempfile.mkdtemp() - nib.save(test_image, os.path.join(tempdir, 'test_image1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_label1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_extra1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_image2.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_label2.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_extra2.nii.gz')) + nib.save(test_image, os.path.join(tempdir, "test_image1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_label1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_extra1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_image2.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_label2.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_extra2.nii.gz")) test_data = [ { - 'image': os.path.join(tempdir, 'test_image1.nii.gz'), - 'label': os.path.join(tempdir, 'test_label1.nii.gz'), - 'extra': os.path.join(tempdir, 'test_extra1.nii.gz') + "image": os.path.join(tempdir, "test_image1.nii.gz"), + "label": os.path.join(tempdir, "test_label1.nii.gz"), + "extra": os.path.join(tempdir, "test_extra1.nii.gz"), }, { - 'image': os.path.join(tempdir, 'test_image2.nii.gz'), - 'label': os.path.join(tempdir, 'test_label2.nii.gz'), - 'extra': os.path.join(tempdir, 'test_extra2.nii.gz') - } + "image": os.path.join(tempdir, "test_image2.nii.gz"), + "label": os.path.join(tempdir, "test_label2.nii.gz"), + "extra": os.path.join(tempdir, "test_extra2.nii.gz"), + }, ] dataset = CacheDataset( - data=test_data, - transform=Compose([LoadNiftid(keys=['image', 'label', 'extra'])]), - cache_rate=0.5 + data=test_data, transform=Compose([LoadNiftid(keys=["image", "label", "extra"])]), cache_rate=0.5 ) data1 = dataset[0] data2 = dataset[1] shutil.rmtree(tempdir) - self.assertTupleEqual(data1['image'].shape, expected_shape) - self.assertTupleEqual(data1['label'].shape, expected_shape) - self.assertTupleEqual(data1['extra'].shape, expected_shape) - self.assertTupleEqual(data2['image'].shape, expected_shape) - self.assertTupleEqual(data2['label'].shape, expected_shape) - self.assertTupleEqual(data2['extra'].shape, expected_shape) + self.assertTupleEqual(data1["image"].shape, expected_shape) + self.assertTupleEqual(data1["label"].shape, expected_shape) + self.assertTupleEqual(data1["extra"].shape, expected_shape) + self.assertTupleEqual(data2["image"].shape, expected_shape) + self.assertTupleEqual(data2["label"].shape, expected_shape) + self.assertTupleEqual(data2["extra"].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_cachedataset_parallel.py b/tests/test_cachedataset_parallel.py index aa0b4a7a9c..10e15e8163 100644 --- a/tests/test_cachedataset_parallel.py +++ b/tests/test_cachedataset_parallel.py @@ -19,41 +19,37 @@ from monai.data import CacheDataset from monai.transforms import Compose, LoadNiftid -TEST_CASE_1 = [ - 0, 100 -] +TEST_CASE_1 = [0, 100] -TEST_CASE_2 = [ - 4, 100 -] +TEST_CASE_2 = [4, 100] class TestCacheDatasetParallel(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_shape(self, num_workers, dataset_size): test_image = nib.Nifti1Image(np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4)) tempdir = tempfile.mkdtemp() - nib.save(test_image, os.path.join(tempdir, 'test_image1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_label1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_extra1.nii.gz')) + nib.save(test_image, os.path.join(tempdir, "test_image1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_label1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_extra1.nii.gz")) test_data = [ { - 'image': os.path.join(tempdir, 'test_image1.nii.gz'), - 'label': os.path.join(tempdir, 'test_label1.nii.gz'), - 'extra': os.path.join(tempdir, 'test_extra1.nii.gz') + "image": os.path.join(tempdir, "test_image1.nii.gz"), + "label": os.path.join(tempdir, "test_label1.nii.gz"), + "extra": os.path.join(tempdir, "test_extra1.nii.gz"), } ] * dataset_size dataset = CacheDataset( data=test_data, - transform=Compose([LoadNiftid(keys=['image', 'label', 'extra'])]), + transform=Compose([LoadNiftid(keys=["image", "label", "extra"])]), cache_rate=1, - num_workers=num_workers + num_workers=num_workers, ) shutil.rmtree(tempdir) self.assertEqual(len(dataset._cache), dataset.cache_num) for i in range(dataset.cache_num): self.assertIsNotNone(dataset._cache[i]) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/tests/test_cast_to_type.py b/tests/test_cast_to_type.py index b564f72d3b..00e5823866 100644 --- a/tests/test_cast_to_type.py +++ b/tests/test_cast_to_type.py @@ -14,22 +14,15 @@ from parameterized import parameterized from monai.transforms import CastToType -TEST_CASE_1 = [ - { - 'dtype': np.float64 - }, - np.array([[0, 1], [1, 2]], dtype=np.float32), - np.float64 -] +TEST_CASE_1 = [{"dtype": np.float64}, np.array([[0, 1], [1, 2]], dtype=np.float32), np.float64] class TestCastToType(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_type(self, input_param, input_data, expected_type): result = CastToType(**input_param)(input_data) self.assertEqual(result.dtype, expected_type) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_cast_to_typed.py b/tests/test_cast_to_typed.py index 983a84ba7d..b374379ad2 100644 --- a/tests/test_cast_to_typed.py +++ b/tests/test_cast_to_typed.py @@ -15,23 +15,13 @@ from monai.transforms import CastToTyped TEST_CASE_1 = [ - { - 'keys': ['img'], - 'dtype': np.float64 - }, - { - 'img': np.array([[0, 1], [1, 2]], dtype=np.float32), - 'seg': np.array([[0, 1], [1, 2]], dtype=np.int8) - }, - { - 'img': np.float64, - 'seg': np.int8 - } + {"keys": ["img"], "dtype": np.float64}, + {"img": np.array([[0, 1], [1, 2]], dtype=np.float32), "seg": np.array([[0, 1], [1, 2]], dtype=np.int8)}, + {"img": np.float64, "seg": np.int8}, ] class TestCastToTyped(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_type(self, input_param, input_data, expected_type): result = CastToTyped(**input_param)(input_data) @@ -39,5 +29,5 @@ def test_type(self, input_param, input_data, expected_type): self.assertEqual(v.dtype, expected_type[k]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_center_spatial_crop.py b/tests/test_center_spatial_crop.py index b744a41328..f82ca34b3d 100644 --- a/tests/test_center_spatial_crop.py +++ b/tests/test_center_spatial_crop.py @@ -14,38 +14,16 @@ from parameterized import parameterized from monai.transforms import CenterSpatialCrop -TEST_CASE_1 = [ - { - 'roi_size': [2, 2, 2] - }, - np.random.randint(0, 2, size=[3, 3, 3, 3]), - (3, 2, 2, 2) -] +TEST_CASE_1 = [{"roi_size": [2, 2, 2]}, np.random.randint(0, 2, size=[3, 3, 3, 3]), (3, 2, 2, 2)] TEST_CASE_2 = [ - { - 'roi_size': [2, 2] - }, - np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 1, 2, 1, 0], - [0, 0, 0, 0, 0] - ] - ]), - np.array([ - [ - [1, 2], - [2, 3] - ] - ]), + {"roi_size": [2, 2]}, + np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]]), + np.array([[[1, 2], [2, 3]]]), ] class TestCenterSpatialCrop(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_shape(self, input_param, input_data, expected_shape): result = CenterSpatialCrop(**input_param)(input_data) @@ -57,5 +35,5 @@ def test_value(self, input_param, input_data, expected_value): np.testing.assert_allclose(result, expected_value) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_center_spatial_cropd.py b/tests/test_center_spatial_cropd.py index 393f0a59c5..f27e1f6329 100644 --- a/tests/test_center_spatial_cropd.py +++ b/tests/test_center_spatial_cropd.py @@ -15,51 +15,29 @@ from monai.transforms import CenterSpatialCropd TEST_CASE_1 = [ - { - 'keys': 'img', - 'roi_size': [2, 2, 2] - }, - {'img': np.random.randint(0, 2, size=[3, 3, 3, 3])}, - (3, 2, 2, 2) + {"keys": "img", "roi_size": [2, 2, 2]}, + {"img": np.random.randint(0, 2, size=[3, 3, 3, 3])}, + (3, 2, 2, 2), ] TEST_CASE_2 = [ - { - 'keys': 'img', - 'roi_size': [2, 2] - }, - { - 'img': np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 1, 2, 1, 0], - [0, 0, 0, 0, 0] - ] - ]) - }, - np.array([ - [ - [1, 2], - [2, 3] - ] - ]), + {"keys": "img", "roi_size": [2, 2]}, + {"img": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]])}, + np.array([[[1, 2], [2, 3]]]), ] class TestCenterSpatialCropd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_shape(self, input_param, input_data, expected_shape): result = CenterSpatialCropd(**input_param)(input_data) - self.assertTupleEqual(result['img'].shape, expected_shape) + self.assertTupleEqual(result["img"].shape, expected_shape) @parameterized.expand([TEST_CASE_2]) def test_value(self, input_param, input_data, expected_value): result = CenterSpatialCropd(**input_param)(input_data) - np.testing.assert_allclose(result['img'], expected_value) + np.testing.assert_allclose(result["img"], expected_value) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_compose.py b/tests/test_compose.py index b4a56da0a8..af05869b18 100644 --- a/tests/test_compose.py +++ b/tests/test_compose.py @@ -15,7 +15,6 @@ class TestCompose(unittest.TestCase): - def test_empty_compose(self): c = Compose() i = 1 @@ -23,52 +22,51 @@ def test_empty_compose(self): def test_non_dict_compose(self): def a(i): - return i + 'a' + return i + "a" def b(i): - return i + 'b' + return i + "b" c = Compose([a, b, a, b]) - self.assertEqual(c(''), 'abab') + self.assertEqual(c(""), "abab") def test_dict_compose(self): def a(d): d = dict(d) - d['a'] += 1 + d["a"] += 1 return d def b(d): d = dict(d) - d['b'] += 1 + d["b"] += 1 return d c = Compose([a, b, a, b, a]) - self.assertDictEqual(c({'a': 0, 'b': 0}), {'a': 3, 'b': 2}) + self.assertDictEqual(c({"a": 0, "b": 0}), {"a": 3, "b": 2}) def test_list_dict_compose(self): def a(d): # transform to handle dict data d = dict(d) - d['a'] += 1 + d["a"] += 1 return d def b(d): # transform to generate a batch list of data d = dict(d) - d['b'] += 1 + d["b"] += 1 d = [d] * 5 return d def c(d): # transform to handle dict data d = dict(d) - d['c'] += 1 + d["c"] += 1 return d transforms = Compose([a, a, b, c, c]) - value = transforms({'a': 0, 'b': 0, 'c': 0}) + value = transforms({"a": 0, "b": 0, "c": 0}) for item in value: - self.assertDictEqual(item, {'a': 2, 'b': 1, 'c': 2}) + self.assertDictEqual(item, {"a": 2, "b": 1, "c": 2}) def test_random_compose(self): - class _Acc(Randomizable): self.rand = 0.0 @@ -88,9 +86,7 @@ def __call__(self, data): self.assertAlmostEqual(c(1), 2.57673391) def test_randomize_warn(self): - class _RandomClass(Randomizable): - def randomize(self, foo): pass @@ -99,5 +95,5 @@ def randomize(self, foo): c.randomize() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_compute_meandice.py b/tests/test_compute_meandice.py index 0ee28456b7..2adb477e8f 100644 --- a/tests/test_compute_meandice.py +++ b/tests/test_compute_meandice.py @@ -20,13 +20,13 @@ # keep background TEST_CASE_1 = [ # y (1, 1, 2, 2), y_pred (1, 1, 2, 2), expected out (1, 1) { - 'y_pred': torch.tensor([[[[1., -1.], [-1., 1.]]]]), - 'y': torch.tensor([[[[1., 0.], [1., 1.]]]]), - 'include_background': True, - 'to_onehot_y': False, - 'mutually_exclusive': False, - 'logit_thresh': 0.5, - 'add_sigmoid': True, + "y_pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), + "y": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), + "include_background": True, + "to_onehot_y": False, + "mutually_exclusive": False, + "logit_thresh": 0.5, + "add_sigmoid": True, }, [[0.8]], ] @@ -34,17 +34,16 @@ # remove background and not One-Hot target TEST_CASE_2 = [ # y (2, 1, 2, 2), y_pred (2, 3, 2, 2), expected out (2, 2) (no background) { - 'y_pred': - torch.tensor([[[[-1., 3.], [2., -4.]], [[0., -1.], [3., 2.]], [[0., 1.], [2., -1.]]], - [[[-2., 0.], [3., 1.]], [[0., 2.], [1., -2.]], [[-1., 2.], [4., 0.]]]]), - 'y': - torch.tensor([[[[1., 2.], [1., 0.]]], [[[1., 1.], [2., 0.]]]]), - 'include_background': - False, - 'to_onehot_y': - True, - 'mutually_exclusive': - True, + "y_pred": torch.tensor( + [ + [[[-1.0, 3.0], [2.0, -4.0]], [[0.0, -1.0], [3.0, 2.0]], [[0.0, 1.0], [2.0, -1.0]]], + [[[-2.0, 0.0], [3.0, 1.0]], [[0.0, 2.0], [1.0, -2.0]], [[-1.0, 2.0], [4.0, 0.0]]], + ] + ), + "y": torch.tensor([[[[1.0, 2.0], [1.0, 0.0]]], [[[1.0, 1.0], [2.0, 0.0]]]]), + "include_background": False, + "to_onehot_y": True, + "mutually_exclusive": True, }, [[0.5000, 0.0000], [0.6666, 0.6666]], ] @@ -52,23 +51,17 @@ # should return Nan for all labels=0 case and skip for MeanDice TEST_CASE_3 = [ { - 'y_pred': - torch.zeros(2, 3, 2, 2), - 'y': - torch.tensor([[[[0., 0.], [0., 0.]]], [[[1., 0.], [0., 1.]]]]), - 'include_background': - True, - 'to_onehot_y': - True, - 'mutually_exclusive': - True, + "y_pred": torch.zeros(2, 3, 2, 2), + "y": torch.tensor([[[[0.0, 0.0], [0.0, 0.0]]], [[[1.0, 0.0], [0.0, 1.0]]]]), + "include_background": True, + "to_onehot_y": True, + "mutually_exclusive": True, }, [[False, True, True], [False, False, True]], ] class TestComputeMeanDice(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_value(self, input_data, expected_value): result = compute_meandice(**input_data) @@ -80,5 +73,5 @@ def test_nans(self, input_data, expected_value): self.assertTrue(np.allclose(np.isnan(result.cpu().numpy()), expected_value)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_compute_roc_auc.py b/tests/test_compute_roc_auc.py index c71b558ccd..88d21f03aa 100644 --- a/tests/test_compute_roc_auc.py +++ b/tests/test_compute_roc_auc.py @@ -19,77 +19,58 @@ TEST_CASE_1 = [ { - 'y_pred': torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), - 'y': torch.tensor([[0], [1], [0], [1]]), - 'to_onehot_y': True, - 'add_softmax': True, + "y_pred": torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), + "y": torch.tensor([[0], [1], [0], [1]]), + "to_onehot_y": True, + "add_softmax": True, }, - 0.75 + 0.75, ] -TEST_CASE_2 = [ - { - 'y_pred': torch.tensor([[0.5], [0.5], [0.2], [8.3]]), - 'y': torch.tensor([[0], [1], [0], [1]]) - }, - 0.875 -] +TEST_CASE_2 = [{"y_pred": torch.tensor([[0.5], [0.5], [0.2], [8.3]]), "y": torch.tensor([[0], [1], [0], [1]])}, 0.875] -TEST_CASE_3 = [ - { - 'y_pred': torch.tensor([[0.5], [0.5], [0.2], [8.3]]), - 'y': torch.tensor([0, 1, 0, 1]) - }, - 0.875 -] +TEST_CASE_3 = [{"y_pred": torch.tensor([[0.5], [0.5], [0.2], [8.3]]), "y": torch.tensor([0, 1, 0, 1])}, 0.875] -TEST_CASE_4 = [ - { - 'y_pred': torch.tensor([0.5, 0.5, 0.2, 8.3]), - 'y': torch.tensor([0, 1, 0, 1]) - }, - 0.875 -] +TEST_CASE_4 = [{"y_pred": torch.tensor([0.5, 0.5, 0.2, 8.3]), "y": torch.tensor([0, 1, 0, 1])}, 0.875] TEST_CASE_5 = [ { - 'y_pred': torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), - 'y': torch.tensor([[0], [1], [0], [1]]), - 'to_onehot_y': True, - 'add_softmax': True, - 'average': None + "y_pred": torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), + "y": torch.tensor([[0], [1], [0], [1]]), + "to_onehot_y": True, + "add_softmax": True, + "average": None, }, - [0.75, 0.75] + [0.75, 0.75], ] TEST_CASE_6 = [ { - 'y_pred': torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]), - 'y': torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]), - 'add_softmax': True, - 'average': 'weighted' + "y_pred": torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]), + "y": torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]), + "add_softmax": True, + "average": "weighted", }, - 0.56667 + 0.56667, ] TEST_CASE_7 = [ { - 'y_pred': torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]), - 'y': torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]), - 'add_softmax': True, - 'average': 'micro' + "y_pred": torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]), + "y": torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]), + "add_softmax": True, + "average": "micro", }, - 0.62 + 0.62, ] class TestComputeROCAUC(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) def test_value(self, input_data, expected_value): result = compute_roc_auc(**input_data) np.testing.assert_allclose(expected_value, result, rtol=1e-5) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_create_grid_and_affine.py b/tests/test_create_grid_and_affine.py index d858f1fb0e..930558042d 100644 --- a/tests/test_create_grid_and_affine.py +++ b/tests/test_create_grid_and_affine.py @@ -13,104 +13,136 @@ import numpy as np -from monai.transforms import (create_control_grid, create_grid, create_rotate, create_scale, create_shear, - create_translate) +from monai.transforms import ( + create_control_grid, + create_grid, + create_rotate, + create_scale, + create_shear, + create_translate, +) class TestCreateGrid(unittest.TestCase): - def test_create_grid(self): - with self.assertRaisesRegex(TypeError, ''): + with self.assertRaisesRegex(TypeError, ""): create_grid(None) - with self.assertRaisesRegex(TypeError, ''): - create_grid((1, 1), spacing=2.) - with self.assertRaisesRegex(TypeError, ''): - create_grid((1, 1), spacing=2.) + with self.assertRaisesRegex(TypeError, ""): + create_grid((1, 1), spacing=2.0) + with self.assertRaisesRegex(TypeError, ""): + create_grid((1, 1), spacing=2.0) g = create_grid((1, 1)) - expected = np.array([[[0.]], [[0.]], [[1.]]]) + expected = np.array([[[0.0]], [[0.0]], [[1.0]]]) np.testing.assert_allclose(g, expected) g = create_grid((1, 1), homogeneous=False) - expected = np.array([[[0.]], [[0.]]]) + expected = np.array([[[0.0]], [[0.0]]]) np.testing.assert_allclose(g, expected) g = create_grid((1, 1), spacing=(1.2, 1.3)) - expected = np.array([[[0.]], [[0.]], [[1.]]]) + expected = np.array([[[0.0]], [[0.0]], [[1.0]]]) np.testing.assert_allclose(g, expected) g = create_grid((1, 1, 1), spacing=(1.2, 1.3, 1.0)) - expected = np.array([[[[0.]]], [[[0.]]], [[[0.]]], [[[1.]]]]) + expected = np.array([[[[0.0]]], [[[0.0]]], [[[0.0]]], [[[1.0]]]]) np.testing.assert_allclose(g, expected) g = create_grid((1, 1, 1), spacing=(1.2, 1.3, 1.0), homogeneous=False) - expected = np.array([[[[0.]]], [[[0.]]], [[[0.]]]]) + expected = np.array([[[[0.0]]], [[[0.0]]], [[[0.0]]]]) np.testing.assert_allclose(g, expected) g = create_grid((1, 1, 1), spacing=(1.2, 1.3, 1.0), dtype=np.int32) np.testing.assert_equal(g.dtype, np.int32) g = create_grid((2, 2, 2)) - expected = np.array([[[[-0.5, -0.5], [-0.5, -0.5]], [[0.5, 0.5], [0.5, 0.5]]], - [[[-0.5, -0.5], [0.5, 0.5]], [[-0.5, -0.5], [0.5, 0.5]]], - [[[-0.5, 0.5], [-0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]]], - [[[1., 1.], [1., 1.]], [[1., 1.], [1., 1.]]]]) + expected = np.array( + [ + [[[-0.5, -0.5], [-0.5, -0.5]], [[0.5, 0.5], [0.5, 0.5]]], + [[[-0.5, -0.5], [0.5, 0.5]], [[-0.5, -0.5], [0.5, 0.5]]], + [[[-0.5, 0.5], [-0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]]], + [[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]], + ] + ) np.testing.assert_allclose(g, expected) g = create_grid((2, 2, 2), spacing=(1.2, 1.3, 1.0)) - expected = np.array([[[[-0.6, -0.6], [-0.6, -0.6]], [[0.6, 0.6], [0.6, 0.6]]], - [[[-0.65, -0.65], [0.65, 0.65]], [[-0.65, -0.65], [0.65, 0.65]]], - [[[-0.5, 0.5], [-0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]]], - [[[1., 1.], [1., 1.]], [[1., 1.], [1., 1.]]]]) + expected = np.array( + [ + [[[-0.6, -0.6], [-0.6, -0.6]], [[0.6, 0.6], [0.6, 0.6]]], + [[[-0.65, -0.65], [0.65, 0.65]], [[-0.65, -0.65], [0.65, 0.65]]], + [[[-0.5, 0.5], [-0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]]], + [[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]], + ] + ) np.testing.assert_allclose(g, expected) def test_create_control_grid(self): - with self.assertRaisesRegex(TypeError, ''): + with self.assertRaisesRegex(TypeError, ""): create_control_grid(None, None) - with self.assertRaisesRegex(TypeError, ''): - create_control_grid((1, 1), 2.) - - g = create_control_grid((1., 1.), (1., 1.)) - expected = np.array([ - [[-1., -1., -1.], [0., 0., 0.], [1., 1., 1.]], - [[-1., 0., 1.], [-1., 0., 1.], [-1., 0., 1.]], - [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], - ]) + with self.assertRaisesRegex(TypeError, ""): + create_control_grid((1, 1), 2.0) + + g = create_control_grid((1.0, 1.0), (1.0, 1.0)) + expected = np.array( + [ + [[-1.0, -1.0, -1.0], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0]], + [[-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ) np.testing.assert_allclose(g, expected) - g = create_control_grid((1., 1.), (2., 2.)) - expected = np.array([ - [[-2., -2., -2.], [0., 0., 0.], [2., 2., 2.]], - [[-2., 0., 2.], [-2., 0., 2.], [-2., 0., 2.]], - [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], - ]) + g = create_control_grid((1.0, 1.0), (2.0, 2.0)) + expected = np.array( + [ + [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], + [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ) np.testing.assert_allclose(g, expected) - g = create_control_grid((2., 2.), (1., 1.)) - expected = np.array([ - [[-1.5, -1.5, -1.5, -1.5], [-0.5, -0.5, -0.5, -0.5], [0.5, 0.5, 0.5, 0.5], [1.5, 1.5, 1.5, 1.5]], - [[-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5]], - [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]], - ]) + g = create_control_grid((2.0, 2.0), (1.0, 1.0)) + expected = np.array( + [ + [[-1.5, -1.5, -1.5, -1.5], [-0.5, -0.5, -0.5, -0.5], [0.5, 0.5, 0.5, 0.5], [1.5, 1.5, 1.5, 1.5]], + [[-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5]], + [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], + ] + ) np.testing.assert_allclose(g, expected) - g = create_control_grid((2., 2.), (2., 2.)) - expected = np.array([ - [[-3., -3., -3., -3.], [-1., -1., -1., -1.], [1., 1., 1., 1.], [3., 3., 3., 3.]], - [[-3., -1., 1., 3.], [-3., -1., 1., 3.], [-3., -1., 1., 3.], [-3., -1., 1., 3.]], - [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]], - ]) + g = create_control_grid((2.0, 2.0), (2.0, 2.0)) + expected = np.array( + [ + [[-3.0, -3.0, -3.0, -3.0], [-1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0]], + [[-3.0, -1.0, 1.0, 3.0], [-3.0, -1.0, 1.0, 3.0], [-3.0, -1.0, 1.0, 3.0], [-3.0, -1.0, 1.0, 3.0]], + [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], + ] + ) np.testing.assert_allclose(g, expected) - g = create_control_grid((1., 1., 1.), (2., 2., 2.), homogeneous=False) - expected = np.array([[[[-2., -2., -2.], [-2., -2., -2.], [-2., -2., -2.]], - [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[2., 2., 2.], [2., 2., 2.], [2., 2., 2.]]], - [[[-2., -2., -2.], [0., 0., 0.], [2., 2., 2.]], - [[-2., -2., -2.], [0., 0., 0.], [2., 2., 2.]], - [[-2., -2., -2.], [0., 0., 0.], [2., 2., 2.]]], - [[[-2., 0., 2.], [-2., 0., 2.], [-2., 0., 2.]], - [[-2., 0., 2.], [-2., 0., 2.], [-2., 0., 2.]], - [[-2., 0., 2.], [-2., 0., 2.], [-2., 0., 2.]]]]) + g = create_control_grid((1.0, 1.0, 1.0), (2.0, 2.0, 2.0), homogeneous=False) + expected = np.array( + [ + [ + [[-2.0, -2.0, -2.0], [-2.0, -2.0, -2.0], [-2.0, -2.0, -2.0]], + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], + [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], + ], + [ + [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], + [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], + [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], + ], + [ + [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], + [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], + [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], + ], + ] + ) np.testing.assert_allclose(g, expected) @@ -120,57 +152,107 @@ def test_assert(func, params, expected): class TestCreateAffine(unittest.TestCase): - def test_create_rotate(self): - with self.assertRaisesRegex(TypeError, ''): + with self.assertRaisesRegex(TypeError, ""): create_rotate(2, None) - with self.assertRaisesRegex(ValueError, ''): + with self.assertRaisesRegex(ValueError, ""): create_rotate(5, 1) - test_assert(create_rotate, (2, 1.1), - np.array([[0.45359612, -0.89120736, 0.], [0.89120736, 0.45359612, 0.], [0., 0., 1.]])) test_assert( - create_rotate, (3, 1.1), - np.array([[1., 0., 0., 0.], [0., 0.45359612, -0.89120736, 0.], [0., 0.89120736, 0.45359612, 0.], - [0., 0., 0., 1.]])) + create_rotate, + (2, 1.1), + np.array([[0.45359612, -0.89120736, 0.0], [0.89120736, 0.45359612, 0.0], [0.0, 0.0, 1.0]]), + ) + test_assert( + create_rotate, + (3, 1.1), + np.array( + [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 0.45359612, -0.89120736, 0.0], + [0.0, 0.89120736, 0.45359612, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + ), + ) test_assert( - create_rotate, (3, (1.1, 1)), - np.array([[0.54030231, 0., 0.84147098, 0.], [0.74992513, 0.45359612, -0.48152139, 0.], - [-0.38168798, 0.89120736, 0.24507903, 0.], [0., 0., 0., 1.]])) + create_rotate, + (3, (1.1, 1)), + np.array( + [ + [0.54030231, 0.0, 0.84147098, 0.0], + [0.74992513, 0.45359612, -0.48152139, 0.0], + [-0.38168798, 0.89120736, 0.24507903, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + ), + ) test_assert( - create_rotate, (3, (1, 1, 1.1)), - np.array([[0.24507903, -0.48152139, 0.84147098, 0.], [0.80270075, -0.38596121, -0.45464871, 0.], - [0.54369824, 0.78687425, 0.29192658, 0.], [0., 0., 0., 1.]])) - test_assert(create_rotate, (3, (0, 0, np.pi / 2)), - np.array([[0., -1., 0., 0.], [1., 0., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])) + create_rotate, + (3, (1, 1, 1.1)), + np.array( + [ + [0.24507903, -0.48152139, 0.84147098, 0.0], + [0.80270075, -0.38596121, -0.45464871, 0.0], + [0.54369824, 0.78687425, 0.29192658, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + ), + ) + test_assert( + create_rotate, + (3, (0, 0, np.pi / 2)), + np.array([[0.0, -1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]), + ) def test_create_shear(self): - test_assert(create_shear, (2, 1.), np.array([[1., 1., 0.], [0., 1., 0.], [0., 0., 1.]])) - test_assert(create_shear, (2, (2., 3.)), np.array([[1., 2., 0.], [3., 1., 0.], [0., 0., 1.]])) - test_assert(create_shear, (3, 1.), - np.array([[1., 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])) + test_assert(create_shear, (2, 1.0), np.array([[1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])) + test_assert(create_shear, (2, (2.0, 3.0)), np.array([[1.0, 2.0, 0.0], [3.0, 1.0, 0.0], [0.0, 0.0, 1.0]])) + test_assert( + create_shear, + (3, 1.0), + np.array([[1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]), + ) def test_create_scale(self): - test_assert(create_scale, (2, 2), np.array([[2., 0., 0.], [0., 1., 0.], [0., 0., 1.]])) - test_assert(create_scale, (2, [2, 2, 2]), np.array([[2., 0., 0.], [0., 2., 0.], [0., 0., 1.]])) - test_assert(create_scale, (3, [1.5, 2.4]), - np.array([[1.5, 0., 0., 0.], [0., 2.4, 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])) - test_assert(create_scale, (3, 1.5), - np.array([[1.5, 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])) - test_assert(create_scale, (3, [1, 2, 3, 4, 5]), - np.array([[1., 0., 0., 0.], [0., 2., 0., 0.], [0., 0., 3., 0.], [0., 0., 0., 1.]])) + test_assert(create_scale, (2, 2), np.array([[2.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])) + test_assert(create_scale, (2, [2, 2, 2]), np.array([[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 1.0]])) + test_assert( + create_scale, + (3, [1.5, 2.4]), + np.array([[1.5, 0.0, 0.0, 0.0], [0.0, 2.4, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]), + ) + test_assert( + create_scale, + (3, 1.5), + np.array([[1.5, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]), + ) + test_assert( + create_scale, + (3, [1, 2, 3, 4, 5]), + np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 3.0, 0.0], [0.0, 0.0, 0.0, 1.0]]), + ) def test_create_translate(self): - test_assert(create_translate, (2, 2), np.array([[1., 0., 2.], [0., 1., 0.], [0., 0., 1.]])) - test_assert(create_translate, (2, [2, 2, 2]), np.array([[1., 0., 2.], [0., 1., 2.], [0., 0., 1.]])) - test_assert(create_translate, (3, [1.5, 2.4]), - np.array([[1., 0., 0., 1.5], [0., 1., 0., 2.4], [0., 0., 1., 0.], [0., 0., 0., 1.]])) - test_assert(create_translate, (3, 1.5), - np.array([[1., 0., 0., 1.5], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])) - test_assert(create_translate, (3, [1, 2, 3, 4, 5]), - np.array([[1., 0., 0., 1.], [0., 1., 0., 2.], [0., 0., 1., 3.], [0., 0., 0., 1.]])) + test_assert(create_translate, (2, 2), np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])) + test_assert(create_translate, (2, [2, 2, 2]), np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 2.0], [0.0, 0.0, 1.0]])) + test_assert( + create_translate, + (3, [1.5, 2.4]), + np.array([[1.0, 0.0, 0.0, 1.5], [0.0, 1.0, 0.0, 2.4], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]), + ) + test_assert( + create_translate, + (3, 1.5), + np.array([[1.0, 0.0, 0.0, 1.5], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]), + ) + test_assert( + create_translate, + (3, [1, 2, 3, 4, 5]), + np.array([[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 2.0], [0.0, 0.0, 1.0, 3.0], [0.0, 0.0, 0.0, 1.0]]), + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_crop_foreground.py b/tests/test_crop_foreground.py index 1f89ef6037..5b759dbc0e 100644 --- a/tests/test_crop_foreground.py +++ b/tests/test_crop_foreground.py @@ -16,108 +16,36 @@ from monai.transforms import CropForeground TEST_CASE_1 = [ - { - 'select_fn': lambda x: x > 0, - 'channel_indexes': None, - 'margin': 0 - }, - np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 1, 2, 1, 0], - [0, 0, 0, 0, 0] - ] - ]), - np.array([ - [ - [1, 2, 1], - [2, 3, 2], - [1, 2, 1], - ] - ]) + {"select_fn": lambda x: x > 0, "channel_indexes": None, "margin": 0}, + np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]]), + np.array([[[1, 2, 1], [2, 3, 2], [1, 2, 1]]]), ] TEST_CASE_2 = [ - { - 'select_fn': lambda x: x > 1, - 'channel_indexes': None, - 'margin': 0 - }, - np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 3, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0] - ] - ]), - np.array([ - [ - [3] - ] - ]) + {"select_fn": lambda x: x > 1, "channel_indexes": None, "margin": 0}, + np.array([[[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 3, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]]), + np.array([[[3]]]), ] TEST_CASE_3 = [ - { - 'select_fn': lambda x: x > 0, - 'channel_indexes': 0, - 'margin': 0 - }, - np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 1, 2, 1, 0], - [0, 0, 0, 0, 0] - ] - ]), - np.array([ - [ - [1, 2, 1], - [2, 3, 2], - [1, 2, 1], - ] - ]) + {"select_fn": lambda x: x > 0, "channel_indexes": 0, "margin": 0}, + np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]]), + np.array([[[1, 2, 1], [2, 3, 2], [1, 2, 1]]]), ] TEST_CASE_4 = [ - { - 'select_fn': lambda x: x > 0, - 'channel_indexes': None, - 'margin': 1 - }, - np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0] - ] - ]), - np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 0, 0, 0, 0] - ] - ]) + {"select_fn": lambda x: x > 0, "channel_indexes": None, "margin": 1}, + np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]), + np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 0, 0, 0, 0]]]), ] class TestCropForeground(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_value(self, argments, image, expected_data): result = CropForeground(**argments)(image) np.testing.assert_allclose(result, expected_data) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_crop_foregroundd.py b/tests/test_crop_foregroundd.py index 3bad09c45f..9d52ec462e 100644 --- a/tests/test_crop_foregroundd.py +++ b/tests/test_crop_foregroundd.py @@ -17,132 +17,44 @@ TEST_CASE_1 = [ { - 'keys': ['img', 'label'], - 'source_key': 'label', - 'select_fn': lambda x: x > 0, - 'channel_indexes': None, - 'margin': 0 + "keys": ["img", "label"], + "source_key": "label", + "select_fn": lambda x: x > 0, + "channel_indexes": None, + "margin": 0, }, { - 'img': np.array([ - [ - [1, 0, 2, 0, 1], - [0, 1, 2, 1, 0], - [2, 2, 3, 2, 2], - [0, 1, 2, 1, 0], - [1, 0, 2, 0, 1] - ] - ]), - 'label': np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 0, 1, 0], - [0, 0, 1, 0, 0], - [0, 1, 0, 1, 0], - [0, 0, 0, 0, 0] - ] - ]) + "img": np.array([[[1, 0, 2, 0, 1], [0, 1, 2, 1, 0], [2, 2, 3, 2, 2], [0, 1, 2, 1, 0], [1, 0, 2, 0, 1]]]), + "label": np.array([[[0, 0, 0, 0, 0], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 0, 0, 0, 0]]]), }, - np.array([ - [ - [1, 2, 1], - [2, 3, 2], - [1, 2, 1], - ] - ]) + np.array([[[1, 2, 1], [2, 3, 2], [1, 2, 1]]]), ] TEST_CASE_2 = [ - { - 'keys': ['img'], - 'source_key': 'img', - 'select_fn': lambda x: x > 1, - 'channel_indexes': None, - 'margin': 0 - }, - { - 'img': np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 3, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0] - ] - ]) - }, - np.array([ - [ - [3] - ] - ]) + {"keys": ["img"], "source_key": "img", "select_fn": lambda x: x > 1, "channel_indexes": None, "margin": 0}, + {"img": np.array([[[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 3, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]])}, + np.array([[[3]]]), ] TEST_CASE_3 = [ - { - 'keys': ['img'], - 'source_key': 'img', - 'select_fn': lambda x: x > 0, - 'channel_indexes': 0, - 'margin': 0 - }, - { - 'img': np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 1, 2, 1, 0], - [0, 0, 0, 0, 0] - ] - ]) - }, - np.array([ - [ - [1, 2, 1], - [2, 3, 2], - [1, 2, 1], - ] - ]) + {"keys": ["img"], "source_key": "img", "select_fn": lambda x: x > 0, "channel_indexes": 0, "margin": 0}, + {"img": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]])}, + np.array([[[1, 2, 1], [2, 3, 2], [1, 2, 1]]]), ] TEST_CASE_4 = [ - { - 'keys': ['img'], - 'source_key': 'img', - 'select_fn': lambda x: x > 0, - 'channel_indexes': None, - 'margin': 1 - }, - { - 'img': np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0] - ] - ]) - }, - np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 0, 0, 0, 0] - ] - ]) + {"keys": ["img"], "source_key": "img", "select_fn": lambda x: x > 0, "channel_indexes": None, "margin": 1}, + {"img": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]])}, + np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 0, 0, 0, 0]]]), ] class TestCropForegroundd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_value(self, argments, image, expected_data): result = CropForegroundd(**argments)(image) - np.testing.assert_allclose(result['img'], expected_data) + np.testing.assert_allclose(result["img"], expected_data) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_csv_saver.py b/tests/test_csv_saver.py index 120bd9e650..fa21fcac3c 100644 --- a/tests/test_csv_saver.py +++ b/tests/test_csv_saver.py @@ -21,28 +21,27 @@ class TestCSVSaver(unittest.TestCase): - def test_saved_content(self): - default_dir = os.path.join('.', 'tempdir') + default_dir = os.path.join(".", "tempdir") shutil.rmtree(default_dir, ignore_errors=True) - saver = CSVSaver(output_dir=default_dir, filename='predictions.csv') + saver = CSVSaver(output_dir=default_dir, filename="predictions.csv") - meta_data = {'filename_or_obj': ['testfile' + str(i) for i in range(8)]} + meta_data = {"filename_or_obj": ["testfile" + str(i) for i in range(8)]} saver.save_batch(torch.zeros(8), meta_data) saver.finalize() - filepath = os.path.join(default_dir, 'predictions.csv') + filepath = os.path.join(default_dir, "predictions.csv") self.assertTrue(os.path.exists(filepath)) - with open(filepath, 'r') as f: + with open(filepath, "r") as f: reader = csv.reader(f) i = 0 for row in reader: - self.assertEqual(row[0], 'testfile' + str(i)) + self.assertEqual(row[0], "testfile" + str(i)) self.assertEqual(np.array(row[1:]).astype(np.float32), 0.0) i += 1 self.assertEqual(i, 8) shutil.rmtree(default_dir) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_dataset.py b/tests/test_dataset.py index a0f9bc3172..791fcf122f 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -18,59 +18,60 @@ from monai.data import Dataset from monai.transforms import Compose, LoadNiftid, SimulateDelayd -TEST_CASE_1 = [ - (128, 128, 128) -] +TEST_CASE_1 = [(128, 128, 128)] -class TestDataset(unittest.TestCase): +class TestDataset(unittest.TestCase): @parameterized.expand([TEST_CASE_1]) def test_shape(self, expected_shape): test_image = nib.Nifti1Image(np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4)) tempdir = tempfile.mkdtemp() - nib.save(test_image, os.path.join(tempdir, 'test_image1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_label1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_extra1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_image2.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_label2.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_extra2.nii.gz')) + nib.save(test_image, os.path.join(tempdir, "test_image1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_label1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_extra1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_image2.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_label2.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_extra2.nii.gz")) test_data = [ { - 'image': os.path.join(tempdir, 'test_image1.nii.gz'), - 'label': os.path.join(tempdir, 'test_label1.nii.gz'), - 'extra': os.path.join(tempdir, 'test_extra1.nii.gz') + "image": os.path.join(tempdir, "test_image1.nii.gz"), + "label": os.path.join(tempdir, "test_label1.nii.gz"), + "extra": os.path.join(tempdir, "test_extra1.nii.gz"), }, { - 'image': os.path.join(tempdir, 'test_image2.nii.gz'), - 'label': os.path.join(tempdir, 'test_label2.nii.gz'), - 'extra': os.path.join(tempdir, 'test_extra2.nii.gz') - } + "image": os.path.join(tempdir, "test_image2.nii.gz"), + "label": os.path.join(tempdir, "test_label2.nii.gz"), + "extra": os.path.join(tempdir, "test_extra2.nii.gz"), + }, ] - test_transform = Compose([LoadNiftid(keys=['image', 'label', 'extra']), - SimulateDelayd(keys=['image', 'label', 'extra'], - delay_time=[1e-7, 1e-6, 1e-5])]) + test_transform = Compose( + [ + LoadNiftid(keys=["image", "label", "extra"]), + SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), + ] + ) dataset = Dataset(data=test_data, transform=test_transform) data1 = dataset[0] data2 = dataset[1] - self.assertTupleEqual(data1['image'].shape, expected_shape) - self.assertTupleEqual(data1['label'].shape, expected_shape) - self.assertTupleEqual(data1['extra'].shape, expected_shape) - self.assertTupleEqual(data2['image'].shape, expected_shape) - self.assertTupleEqual(data2['label'].shape, expected_shape) - self.assertTupleEqual(data2['extra'].shape, expected_shape) + self.assertTupleEqual(data1["image"].shape, expected_shape) + self.assertTupleEqual(data1["label"].shape, expected_shape) + self.assertTupleEqual(data1["extra"].shape, expected_shape) + self.assertTupleEqual(data2["image"].shape, expected_shape) + self.assertTupleEqual(data2["label"].shape, expected_shape) + self.assertTupleEqual(data2["extra"].shape, expected_shape) - dataset = Dataset(data=test_data, transform=LoadNiftid(keys=['image', 'label', 'extra'])) + dataset = Dataset(data=test_data, transform=LoadNiftid(keys=["image", "label", "extra"])) data1_simple = dataset[0] data2_simple = dataset[1] - self.assertTupleEqual(data1_simple['image'].shape, expected_shape) - self.assertTupleEqual(data1_simple['label'].shape, expected_shape) - self.assertTupleEqual(data1_simple['extra'].shape, expected_shape) - self.assertTupleEqual(data2_simple['image'].shape, expected_shape) - self.assertTupleEqual(data2_simple['label'].shape, expected_shape) - self.assertTupleEqual(data2_simple['extra'].shape, expected_shape) + self.assertTupleEqual(data1_simple["image"].shape, expected_shape) + self.assertTupleEqual(data1_simple["label"].shape, expected_shape) + self.assertTupleEqual(data1_simple["extra"].shape, expected_shape) + self.assertTupleEqual(data2_simple["image"].shape, expected_shape) + self.assertTupleEqual(data2_simple["label"].shape, expected_shape) + self.assertTupleEqual(data2_simple["extra"].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_delete_keys.py b/tests/test_delete_keys.py index 5d9f28dce7..13a0a1d4ab 100644 --- a/tests/test_delete_keys.py +++ b/tests/test_delete_keys.py @@ -16,13 +16,12 @@ from monai.transforms import DeleteKeysd TEST_CASE_1 = [ - {'keys': [str(i) for i in range(30)]}, + {"keys": [str(i) for i in range(30)]}, 20, ] class TestDeleteKeysd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_memory(self, input_param, expected_key_size): input_data = dict() @@ -31,9 +30,9 @@ def test_memory(self, input_param, expected_key_size): result = DeleteKeysd(**input_param)(input_data) self.assertEqual(len(result.keys()), expected_key_size) self.assertGreaterEqual( - sys.getsizeof(input_data) * float(expected_key_size) / len(input_data), - sys.getsizeof(result)) + sys.getsizeof(input_data) * float(expected_key_size) / len(input_data), sys.getsizeof(result) + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_densenet.py b/tests/test_densenet.py index 0fef9d0d2b..13b8778ba0 100644 --- a/tests/test_densenet.py +++ b/tests/test_densenet.py @@ -18,18 +18,13 @@ TEST_CASE_1 = [ # 4-channel 3D, batch 16 - { - 'spatial_dims': 3, - 'in_channels': 2, - 'out_channels': 3 - }, + {"spatial_dims": 3, "in_channels": 2, "out_channels": 3}, torch.randn(16, 2, 32, 64, 48), - (16, 3) + (16, 3), ] class TestDENSENET(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_121_shape(self, input_param, input_data, expected_shape): net = densenet121(**input_param) diff --git a/tests/test_dice_loss.py b/tests/test_dice_loss.py index 1320cb32f2..0733db1bf0 100644 --- a/tests/test_dice_loss.py +++ b/tests/test_dice_loss.py @@ -17,122 +17,94 @@ from monai.losses import DiceLoss TEST_CASE_1 = [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) + {"include_background": True, "do_sigmoid": True}, { - 'include_background': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 0.], [1., 1.]]]]), - 'smooth': 1e-6, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), + "smooth": 1e-6, }, 0.307576, ] TEST_CASE_2 = [ # shape: (2, 1, 2, 2), (2, 1, 2, 2) + {"include_background": True, "do_sigmoid": True}, { - 'include_background': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]], [[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 1.], [1., 1.]]], [[[1., 0.], [1., 0.]]]]), - 'smooth': 1e-4, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]], [[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 1.0], [1.0, 1.0]]], [[[1.0, 0.0], [1.0, 0.0]]]]), + "smooth": 1e-4, }, 0.416657, ] TEST_CASE_3 = [ # shape: (2, 2, 3), (2, 1, 3) + {"include_background": False, "to_onehot_y": True}, { - 'include_background': False, - 'to_onehot_y': True, - }, - { - 'pred': torch.tensor([[[1., 1., 0.], [0., 0., 1.]], [[1., 0., 1.], [0., 1., 0.]]]), - 'ground': torch.tensor([[[0., 0., 1.]], [[0., 1., 0.]]]), - 'smooth': 0.0, + "pred": torch.tensor([[[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]]]), + "ground": torch.tensor([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]]]), + "smooth": 0.0, }, 0.0, ] TEST_CASE_4 = [ # shape: (2, 2, 3), (2, 1, 3) + {"include_background": True, "to_onehot_y": True, "do_sigmoid": True}, { - 'include_background': True, - 'to_onehot_y': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[-1., 0., 1.], [1., 0., -1.]], [[0., 0., 0.], [0., 0., 0.]]]), - 'ground': torch.tensor([[[1., 0., 0.]], [[1., 1., 0.]]]), - 'smooth': 1e-4, + "pred": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]), + "ground": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]), + "smooth": 1e-4, }, 0.435050, ] TEST_CASE_5 = [ # shape: (2, 2, 3), (2, 1, 3) + {"include_background": True, "to_onehot_y": True, "do_softmax": True}, { - 'include_background': True, - 'to_onehot_y': True, - 'do_softmax': True, - }, - { - 'pred': torch.tensor([[[-1., 0., 1.], [1., 0., -1.]], [[0., 0., 0.], [0., 0., 0.]]]), - 'ground': torch.tensor([[[1., 0., 0.]], [[1., 1., 0.]]]), - 'smooth': 1e-4, + "pred": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]), + "ground": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]), + "smooth": 1e-4, }, 0.383713, ] TEST_CASE_6 = [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) + {"include_background": True, "do_sigmoid": True}, { - 'include_background': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 0.], [1., 1.]]]]), - 'smooth': 1e-6, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), + "smooth": 1e-6, }, 0.307576, ] TEST_CASE_7 = [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) + {"include_background": True, "do_sigmoid": True, "squared_pred": True}, { - 'include_background': True, - 'do_sigmoid': True, - 'squared_pred': True, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 0.], [1., 1.]]]]), - 'smooth': 1e-5, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), + "smooth": 1e-5, }, 0.178337, ] TEST_CASE_8 = [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) + {"include_background": True, "do_sigmoid": True, "jaccard": True}, { - 'include_background': True, - 'do_sigmoid': True, - 'jaccard': True, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 0.], [1., 1.]]]]), - 'smooth': 1e-5, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), + "smooth": 1e-5, }, -0.059094, ] class TestDiceLoss(unittest.TestCase): - - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, - TEST_CASE_5, TEST_CASE_6, TEST_CASE_7, TEST_CASE_8]) + @parameterized.expand( + [TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7, TEST_CASE_8] + ) def test_shape(self, input_param, input_data, expected_val): result = DiceLoss(**input_param).forward(**input_data) self.assertAlmostEqual(result.item(), expected_val, places=5) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_flip.py b/tests/test_flip.py index 050d66d8db..1f17c36e7a 100644 --- a/tests/test_flip.py +++ b/tests/test_flip.py @@ -17,16 +17,12 @@ from monai.transforms import Flip from tests.utils import NumpyImageTestCase2D -INVALID_CASES = [("wrong_axis", ['s', 1], TypeError), - ("not_numbers", 's', TypeError)] +INVALID_CASES = [("wrong_axis", ["s", 1], TypeError), ("not_numbers", "s", TypeError)] -VALID_CASES = [("no_axis", None), - ("one_axis", 1), - ("many_axis", [0, 1])] +VALID_CASES = [("no_axis", None), ("one_axis", 1), ("many_axis", [0, 1])] class TestFlip(NumpyImageTestCase2D): - @parameterized.expand(INVALID_CASES) def test_invalid_inputs(self, _, spatial_axis, raises): with self.assertRaises(raises): @@ -43,5 +39,5 @@ def test_correct_results(self, _, spatial_axis): self.assertTrue(np.allclose(expected, flip(self.imt[0]))) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_flipd.py b/tests/test_flipd.py index e2fcb6b915..ec81b78fcc 100644 --- a/tests/test_flipd.py +++ b/tests/test_flipd.py @@ -17,32 +17,28 @@ from monai.transforms import Flipd from tests.utils import NumpyImageTestCase2D -INVALID_CASES = [("wrong_axis", ['s', 1], TypeError), - ("not_numbers", 's', TypeError)] +INVALID_CASES = [("wrong_axis", ["s", 1], TypeError), ("not_numbers", "s", TypeError)] -VALID_CASES = [("no_axis", None), - ("one_axis", 1), - ("many_axis", [0, 1])] +VALID_CASES = [("no_axis", None), ("one_axis", 1), ("many_axis", [0, 1])] class TestFlipd(NumpyImageTestCase2D): - @parameterized.expand(INVALID_CASES) def test_invalid_cases(self, _, spatial_axis, raises): with self.assertRaises(raises): - flip = Flipd(keys='img', spatial_axis=spatial_axis) - flip({'img': self.imt[0]}) + flip = Flipd(keys="img", spatial_axis=spatial_axis) + flip({"img": self.imt[0]}) @parameterized.expand(VALID_CASES) def test_correct_results(self, _, spatial_axis): - flip = Flipd(keys='img', spatial_axis=spatial_axis) + flip = Flipd(keys="img", spatial_axis=spatial_axis) expected = list() for channel in self.imt[0]: expected.append(np.flip(channel, spatial_axis)) expected = np.stack(expected) - res = flip({'img': self.imt[0]}) - assert np.allclose(expected, res['img']) + res = flip({"img": self.imt[0]}) + assert np.allclose(expected, res["img"]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_gaussian_filter.py b/tests/test_gaussian_filter.py index ac1ad2f88c..857f68321f 100644 --- a/tests/test_gaussian_filter.py +++ b/tests/test_gaussian_filter.py @@ -18,41 +18,80 @@ class GaussianFilterTestCase(unittest.TestCase): - def test_1d(self): a = torch.ones(1, 8, 10) - g = GaussianFilter(1, 3, 3, torch.device('cpu:0')) - expected = np.array([[ + g = GaussianFilter(1, 3, 3, torch.device("cpu:0")) + expected = np.array( [ - 0.56658804, 0.69108766, 0.79392236, 0.86594427, 0.90267116, 0.9026711, 0.8659443, 0.7939224, 0.6910876, - 0.56658804 - ], - ]]) + [ + [ + 0.56658804, + 0.69108766, + 0.79392236, + 0.86594427, + 0.90267116, + 0.9026711, + 0.8659443, + 0.7939224, + 0.6910876, + 0.56658804, + ], + ] + ] + ) expected = np.tile(expected, (1, 8, 1)) np.testing.assert_allclose(g(a).cpu().numpy(), expected, rtol=1e-5) def test_2d(self): a = torch.ones(1, 1, 3, 3) - g = GaussianFilter(2, 3, 3, torch.device('cpu:0')) - expected = np.array([[[[0.13380532, 0.14087981, 0.13380532], [0.14087981, 0.14832835, 0.14087981], - [0.13380532, 0.14087981, 0.13380532]]]]) + g = GaussianFilter(2, 3, 3, torch.device("cpu:0")) + expected = np.array( + [ + [ + [ + [0.13380532, 0.14087981, 0.13380532], + [0.14087981, 0.14832835, 0.14087981], + [0.13380532, 0.14087981, 0.13380532], + ] + ] + ] + ) np.testing.assert_allclose(g(a).cpu().numpy(), expected, rtol=1e-5) def test_3d(self): a = torch.ones(1, 1, 4, 3, 4) - g = GaussianFilter(3, 3, 3, torch.device('cpu:0')) + g = GaussianFilter(3, 3, 3, torch.device("cpu:0")) expected = np.array( - [[[[[0.07294822, 0.08033235, 0.08033235, 0.07294822], [0.07680509, 0.08457965, 0.08457965, 0.07680509], - [0.07294822, 0.08033235, 0.08033235, 0.07294822]], - [[0.08033235, 0.08846395, 0.08846395, 0.08033235], [0.08457965, 0.09314119, 0.09314119, 0.08457966], - [0.08033235, 0.08846396, 0.08846396, 0.08033236]], - [[0.08033235, 0.08846395, 0.08846395, 0.08033235], [0.08457965, 0.09314119, 0.09314119, 0.08457966], - [0.08033235, 0.08846396, 0.08846396, 0.08033236]], - [[0.07294822, 0.08033235, 0.08033235, 0.07294822], [0.07680509, 0.08457965, 0.08457965, 0.07680509], - [0.07294822, 0.08033235, 0.08033235, 0.07294822]]]]],) + [ + [ + [ + [ + [0.07294822, 0.08033235, 0.08033235, 0.07294822], + [0.07680509, 0.08457965, 0.08457965, 0.07680509], + [0.07294822, 0.08033235, 0.08033235, 0.07294822], + ], + [ + [0.08033235, 0.08846395, 0.08846395, 0.08033235], + [0.08457965, 0.09314119, 0.09314119, 0.08457966], + [0.08033235, 0.08846396, 0.08846396, 0.08033236], + ], + [ + [0.08033235, 0.08846395, 0.08846395, 0.08033235], + [0.08457965, 0.09314119, 0.09314119, 0.08457966], + [0.08033235, 0.08846396, 0.08846396, 0.08033236], + ], + [ + [0.07294822, 0.08033235, 0.08033235, 0.07294822], + [0.07680509, 0.08457965, 0.08457965, 0.07680509], + [0.07294822, 0.08033235, 0.08033235, 0.07294822], + ], + ] + ] + ], + ) np.testing.assert_allclose(g(a).cpu().numpy(), expected, rtol=1e-5) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_generalized_dice_loss.py b/tests/test_generalized_dice_loss.py index a263f6383f..ae0b2e295d 100644 --- a/tests/test_generalized_dice_loss.py +++ b/tests/test_generalized_dice_loss.py @@ -17,106 +17,82 @@ from monai.losses import GeneralizedDiceLoss TEST_CASE_0 = [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) + {"include_background": True, "do_sigmoid": True}, { - 'include_background': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 0.], [1., 1.]]]]), - 'smooth': 1e-6, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), + "smooth": 1e-6, }, 0.307576, ] TEST_CASE_1 = [ # shape: (2, 1, 2, 2), (2, 1, 2, 2) + {"include_background": True, "do_sigmoid": True}, { - 'include_background': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]], [[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 1.], [1., 1.]]], [[[1., 0.], [1., 0.]]]]), - 'smooth': 1e-4, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]], [[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 1.0], [1.0, 1.0]]], [[[1.0, 0.0], [1.0, 0.0]]]]), + "smooth": 1e-4, }, 0.416597, ] TEST_CASE_2 = [ # shape: (2, 2, 3), (2, 1, 3) + {"include_background": False, "to_onehot_y": True}, { - 'include_background': False, - 'to_onehot_y': True, - }, - { - 'pred': torch.tensor([[[1., 1., 0.], [0., 0., 1.]], [[1., 0., 1.], [0., 1., 0.]]]), - 'ground': torch.tensor([[[0., 0., 1.]], [[0., 1., 0.]]]), - 'smooth': 0.0, + "pred": torch.tensor([[[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]]]), + "ground": torch.tensor([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]]]), + "smooth": 0.0, }, 0.0, ] TEST_CASE_3 = [ # shape: (2, 2, 3), (2, 1, 3) + {"include_background": True, "to_onehot_y": True, "do_sigmoid": True}, { - 'include_background': True, - 'to_onehot_y': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[-1., 0., 1.], [1., 0., -1.]], [[0., 0., 0.], [0., 0., 0.]]]), - 'ground': torch.tensor([[[1., 0., 0.]], [[1., 1., 0.]]]), - 'smooth': 1e-4, + "pred": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]), + "ground": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]), + "smooth": 1e-4, }, 0.435034, ] TEST_CASE_4 = [ # shape: (2, 2, 3), (2, 1, 3) + {"include_background": True, "to_onehot_y": True, "do_softmax": True}, { - 'include_background': True, - 'to_onehot_y': True, - 'do_softmax': True, - }, - { - 'pred': torch.tensor([[[-1., 0., 1.], [1., 0., -1.]], [[0., 0., 0.], [0., 0., 0.]]]), - 'ground': torch.tensor([[[1., 0., 0.]], [[1., 1., 0.]]]), - 'smooth': 1e-4, + "pred": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]), + "ground": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]), + "smooth": 1e-4, }, 0.383699, ] TEST_CASE_5 = [ # shape: (2, 2, 3), (2, 1, 3) + {"include_background": False, "to_onehot_y": True}, { - 'include_background': False, - 'to_onehot_y': True, - }, - { - 'pred': torch.tensor([[[1., 1., 0.], [0., 0., 1.]], [[1., 0., 1.], [0., 1., 0.]]]), - 'ground': torch.tensor([[[0., 0., 0.]], [[0., 0., 0.]]]), - 'smooth': 1e-8, + "pred": torch.tensor([[[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]]]), + "ground": torch.tensor([[[0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0]]]), + "smooth": 1e-8, }, 0.0, ] TEST_CASE_6 = [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) + {"include_background": True, "do_sigmoid": True}, { - 'include_background': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 0.], [1., 1.]]]]), - 'smooth': 1e-6, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), + "smooth": 1e-6, }, 0.307576, ] class TestGeneralizedDiceLoss(unittest.TestCase): - @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6]) def test_shape(self, input_param, input_data, expected_val): result = GeneralizedDiceLoss(**input_param).forward(**input_data) self.assertAlmostEqual(result.item(), expected_val, places=5) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_generate_pos_neg_label_crop_centers.py b/tests/test_generate_pos_neg_label_crop_centers.py index c0ab6143f3..0ecec6accf 100644 --- a/tests/test_generate_pos_neg_label_crop_centers.py +++ b/tests/test_generate_pos_neg_label_crop_centers.py @@ -17,22 +17,21 @@ TEST_CASE_1 = [ { - 'label': np.random.randint(0, 2, size=[3, 3, 3, 3]), - 'size': [2, 2, 2], - 'num_samples': 2, - 'pos_ratio': 1.0, - 'image': None, - 'image_threshold': 0, - 'rand_state': np.random.RandomState() + "label": np.random.randint(0, 2, size=[3, 3, 3, 3]), + "size": [2, 2, 2], + "num_samples": 2, + "pos_ratio": 1.0, + "image": None, + "image_threshold": 0, + "rand_state": np.random.RandomState(), }, list, 2, - 3 + 3, ] class TestGeneratePosNegLabelCropCenters(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_type_shape(self, input_data, expected_type, expected_count, expected_shape): result = generate_pos_neg_label_crop_centers(**input_data) @@ -41,5 +40,5 @@ def test_type_shape(self, input_data, expected_type, expected_count, expected_sh self.assertEqual(len(result[0]), expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_generate_spatial_bounding_box.py b/tests/test_generate_spatial_bounding_box.py index c09b13240a..2afe1ba95f 100644 --- a/tests/test_generate_spatial_bounding_box.py +++ b/tests/test_generate_spatial_bounding_box.py @@ -17,84 +17,51 @@ TEST_CASE_1 = [ { - 'img': np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 1, 2, 1, 0], - [0, 0, 0, 0, 0] - ] - ]), - 'select_fn': lambda x: x > 0, - 'channel_indexes': None, - 'margin': 0 + "img": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]]), + "select_fn": lambda x: x > 0, + "channel_indexes": None, + "margin": 0, }, - ([1, 1], [4, 4]) + ([1, 1], [4, 4]), ] TEST_CASE_2 = [ { - 'img': np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 1, 3, 1, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 0] - ] - ]), - 'select_fn': lambda x: x > 1, - 'channel_indexes': None, - 'margin': 0 + "img": np.array([[[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 3, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]]), + "select_fn": lambda x: x > 1, + "channel_indexes": None, + "margin": 0, }, - ([2, 2], [3, 3]) + ([2, 2], [3, 3]), ] TEST_CASE_3 = [ { - 'img': np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 1, 2, 1, 0], - [0, 0, 0, 0, 0] - ] - ]), - 'select_fn': lambda x: x > 0, - 'channel_indexes': 0, - 'margin': 0 + "img": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]]), + "select_fn": lambda x: x > 0, + "channel_indexes": 0, + "margin": 0, }, - ([1, 1], [4, 4]) + ([1, 1], [4, 4]), ] TEST_CASE_4 = [ { - 'img': np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0] - ] - ]), - 'select_fn': lambda x: x > 0, - 'channel_indexes': None, - 'margin': 1 + "img": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]), + "select_fn": lambda x: x > 0, + "channel_indexes": None, + "margin": 1, }, - ([0, 0], [4, 5]) + ([0, 0], [4, 5]), ] class TestGenerateSpatialBoundingBox(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_value(self, input_data, expected_box): result = generate_spatial_bounding_box(**input_data) self.assertTupleEqual(result, expected_box) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_handler_classification_saver.py b/tests/test_handler_classification_saver.py index fcd789c8a2..42c8be6f3e 100644 --- a/tests/test_handler_classification_saver.py +++ b/tests/test_handler_classification_saver.py @@ -21,9 +21,8 @@ class TestHandlerClassificationSaver(unittest.TestCase): - def test_saved_content(self): - default_dir = os.path.join('.', 'tempdir') + default_dir = os.path.join(".", "tempdir") shutil.rmtree(default_dir, ignore_errors=True) # set up engine @@ -33,23 +32,23 @@ def _train_func(engine, batch): engine = Engine(_train_func) # set up testing handler - saver = ClassificationSaver(output_dir=default_dir, filename='predictions.csv') + saver = ClassificationSaver(output_dir=default_dir, filename="predictions.csv") saver.attach(engine) - data = [{'filename_or_obj': ['testfile' + str(i) for i in range(8)]}] + data = [{"filename_or_obj": ["testfile" + str(i) for i in range(8)]}] engine.run(data, max_epochs=1) - filepath = os.path.join(default_dir, 'predictions.csv') + filepath = os.path.join(default_dir, "predictions.csv") self.assertTrue(os.path.exists(filepath)) - with open(filepath, 'r') as f: + with open(filepath, "r") as f: reader = csv.reader(f) i = 0 for row in reader: - self.assertEqual(row[0], 'testfile' + str(i)) + self.assertEqual(row[0], "testfile" + str(i)) self.assertEqual(np.array(row[1:]).astype(np.float32), 0.0) i += 1 self.assertEqual(i, 8) shutil.rmtree(default_dir) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_handler_mean_dice.py b/tests/test_handler_mean_dice.py index a74a9b74be..4d4a3060d6 100644 --- a/tests/test_handler_mean_dice.py +++ b/tests/test_handler_mean_dice.py @@ -16,9 +16,9 @@ from monai.handlers import MeanDice -TEST_CASE_1 = [{'to_onehot_y': True, 'mutually_exclusive': True}, 0.75] -TEST_CASE_2 = [{'include_background': False, 'to_onehot_y': True, 'mutually_exclusive': False}, 0.66666] -TEST_CASE_3 = [{'mutually_exclusive': True, 'add_sigmoid': True}] +TEST_CASE_1 = [{"to_onehot_y": True, "mutually_exclusive": True}, 0.75] +TEST_CASE_2 = [{"include_background": False, "to_onehot_y": True, "mutually_exclusive": False}, 0.66666] +TEST_CASE_3 = [{"mutually_exclusive": True, "add_sigmoid": True}] class TestHandlerMeanDice(unittest.TestCase): @@ -41,7 +41,7 @@ def test_compute(self, input_params, expected_avg): @parameterized.expand([TEST_CASE_3]) def test_misconfig(self, input_params): - with self.assertRaisesRegex(ValueError, 'compatib'): + with self.assertRaisesRegex(ValueError, "compatib"): dice_metric = MeanDice(**input_params) y_pred = torch.Tensor([[0, 1], [1, 0]]) @@ -62,5 +62,5 @@ def test_shape_mismatch(self, input_params, _expected): dice_metric.update([y_pred, y]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_handler_rocauc.py b/tests/test_handler_rocauc.py index c36af7209f..73e11ffba3 100644 --- a/tests/test_handler_rocauc.py +++ b/tests/test_handler_rocauc.py @@ -17,7 +17,6 @@ class TestHandlerROCAUC(unittest.TestCase): - def test_compute(self): auc_metric = ROCAUC(to_onehot_y=True, add_softmax=True) @@ -33,5 +32,5 @@ def test_compute(self): np.testing.assert_allclose(0.75, auc) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_handler_segmentation_saver.py b/tests/test_handler_segmentation_saver.py index e1fb047799..1fc11a5c21 100644 --- a/tests/test_handler_segmentation_saver.py +++ b/tests/test_handler_segmentation_saver.py @@ -19,9 +19,8 @@ class TestHandlerSegmentationSaver(unittest.TestCase): - def test_saved_content(self): - default_dir = os.path.join('.', 'tempdir') + default_dir = os.path.join(".", "tempdir") shutil.rmtree(default_dir, ignore_errors=True) # set up engine @@ -31,16 +30,16 @@ def _train_func(engine, batch): engine = Engine(_train_func) # set up testing handler - saver = SegmentationSaver(output_dir=default_dir, output_postfix='seg', output_ext='.nii.gz') + saver = SegmentationSaver(output_dir=default_dir, output_postfix="seg", output_ext=".nii.gz") saver.attach(engine) - data = [{'filename_or_obj': ['testfile' + str(i) for i in range(8)]}] + data = [{"filename_or_obj": ["testfile" + str(i) for i in range(8)]}] engine.run(data, max_epochs=1) for i in range(8): - filepath = os.path.join('testfile' + str(i), 'testfile' + str(i) + '_seg.nii.gz') + filepath = os.path.join("testfile" + str(i), "testfile" + str(i) + "_seg.nii.gz") self.assertTrue(os.path.exists(os.path.join(default_dir, filepath))) shutil.rmtree(default_dir) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_handler_stats.py b/tests/test_handler_stats.py index 475d3fbf6f..b7ec601a20 100644 --- a/tests/test_handler_stats.py +++ b/tests/test_handler_stats.py @@ -21,12 +21,11 @@ class TestHandlerStats(unittest.TestCase): - def test_metrics_print(self): log_stream = StringIO() logging.basicConfig(stream=log_stream, level=logging.INFO) - key_to_handler = 'test_logging' - key_to_print = 'testing_metric' + key_to_handler = "test_logging" + key_to_print = "testing_metric" # set up engine def _train_func(engine, batch): @@ -48,9 +47,9 @@ def _update_metric(engine): # check logging output output_str = log_stream.getvalue() - grep = re.compile('.*{}.*'.format(key_to_handler)) - has_key_word = re.compile('.*{}.*'.format(key_to_print)) - for idx, line in enumerate(output_str.split('\n')): + grep = re.compile(".*{}.*".format(key_to_handler)) + has_key_word = re.compile(".*{}.*".format(key_to_print)) + for idx, line in enumerate(output_str.split("\n")): if grep.match(line): if idx in [5, 10]: self.assertTrue(has_key_word.match(line)) @@ -58,8 +57,8 @@ def _update_metric(engine): def test_loss_print(self): log_stream = StringIO() logging.basicConfig(stream=log_stream, level=logging.INFO) - key_to_handler = 'test_logging' - key_to_print = 'myLoss' + key_to_handler = "test_logging" + key_to_print = "myLoss" # set up engine def _train_func(engine, batch): @@ -75,9 +74,9 @@ def _train_func(engine, batch): # check logging output output_str = log_stream.getvalue() - grep = re.compile('.*{}.*'.format(key_to_handler)) - has_key_word = re.compile('.*{}.*'.format(key_to_print)) - for idx, line in enumerate(output_str.split('\n')): + grep = re.compile(".*{}.*".format(key_to_handler)) + has_key_word = re.compile(".*{}.*".format(key_to_print)) + for idx, line in enumerate(output_str.split("\n")): if grep.match(line): if idx in [1, 2, 3, 6, 7, 8]: self.assertTrue(has_key_word.match(line)) @@ -85,8 +84,8 @@ def _train_func(engine, batch): def test_loss_dict(self): log_stream = StringIO() logging.basicConfig(stream=log_stream, level=logging.INFO) - key_to_handler = 'test_logging' - key_to_print = 'myLoss1' + key_to_handler = "test_logging" + key_to_print = "myLoss1" # set up engine def _train_func(engine, batch): @@ -95,21 +94,20 @@ def _train_func(engine, batch): engine = Engine(_train_func) # set up testing handler - stats_handler = StatsHandler(name=key_to_handler, - output_transform=lambda x: {key_to_print: x}) + stats_handler = StatsHandler(name=key_to_handler, output_transform=lambda x: {key_to_print: x}) stats_handler.attach(engine) engine.run(range(3), max_epochs=2) # check logging output output_str = log_stream.getvalue() - grep = re.compile('.*{}.*'.format(key_to_handler)) - has_key_word = re.compile('.*{}.*'.format(key_to_print)) - for idx, line in enumerate(output_str.split('\n')): + grep = re.compile(".*{}.*".format(key_to_handler)) + has_key_word = re.compile(".*{}.*".format(key_to_print)) + for idx, line in enumerate(output_str.split("\n")): if grep.match(line): if idx in [1, 2, 3, 6, 7, 8]: self.assertTrue(has_key_word.match(line)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_handler_tb_image.py b/tests/test_handler_tb_image.py index 03fa0b37a4..6af7b9e184 100644 --- a/tests/test_handler_tb_image.py +++ b/tests/test_handler_tb_image.py @@ -32,10 +32,9 @@ class TestHandlerTBImage(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_tb_image_shape(self, shape): - default_dir = os.path.join('.', 'runs') + default_dir = os.path.join(".", "runs") shutil.rmtree(default_dir, ignore_errors=True) # set up engine @@ -56,5 +55,5 @@ def _train_func(engine, batch): shutil.rmtree(default_dir) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_handler_tb_stats.py b/tests/test_handler_tb_stats.py index 49fb68e797..31c6a94454 100644 --- a/tests/test_handler_tb_stats.py +++ b/tests/test_handler_tb_stats.py @@ -22,9 +22,8 @@ class TestHandlerTBStats(unittest.TestCase): - def test_metrics_print(self): - default_dir = os.path.join('.', 'runs') + default_dir = os.path.join(".", "runs") shutil.rmtree(default_dir, ignore_errors=True) # set up engine @@ -36,8 +35,8 @@ def _train_func(engine, batch): # set up dummy metric @engine.on(Events.EPOCH_COMPLETED) def _update_metric(engine): - current_metric = engine.state.metrics.get('acc', 0.1) - engine.state.metrics['acc'] = current_metric + 0.1 + current_metric = engine.state.metrics.get("acc", 0.1) + engine.state.metrics["acc"] = current_metric + 0.1 # set up testing handler stats_handler = TensorBoardStatsHandler() @@ -49,7 +48,7 @@ def _update_metric(engine): shutil.rmtree(default_dir) def test_metrics_writer(self): - default_dir = os.path.join('.', 'runs') + default_dir = os.path.join(".", "runs") shutil.rmtree(default_dir, ignore_errors=True) with tempfile.TemporaryDirectory() as temp_dir: @@ -62,14 +61,14 @@ def _train_func(engine, batch): # set up dummy metric @engine.on(Events.EPOCH_COMPLETED) def _update_metric(engine): - current_metric = engine.state.metrics.get('acc', 0.1) - engine.state.metrics['acc'] = current_metric + 0.1 + current_metric = engine.state.metrics.get("acc", 0.1) + engine.state.metrics["acc"] = current_metric + 0.1 # set up testing handler writer = SummaryWriter(log_dir=temp_dir) stats_handler = TensorBoardStatsHandler( - writer, output_transform=lambda x: {'loss': x * 2.0}, - global_epoch_transform=lambda x: x * 3.0) + writer, output_transform=lambda x: {"loss": x * 2.0}, global_epoch_transform=lambda x: x * 3.0 + ) stats_handler.attach(engine) engine.run(range(3), max_epochs=2) # check logging output @@ -77,5 +76,5 @@ def _update_metric(engine): self.assertTrue(not os.path.exists(default_dir)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_header_correct.py b/tests/test_header_correct.py index c7c0344913..e9b2a8821b 100644 --- a/tests/test_header_correct.py +++ b/tests/test_header_correct.py @@ -18,19 +18,23 @@ class TestCorrection(unittest.TestCase): - def test_correct(self): test_img = nib.Nifti1Image(np.zeros((1, 2, 3)), np.eye(4)) test_img.header.set_zooms((100, 100, 100)) test_img = correct_nifti_header_if_necessary(test_img) np.testing.assert_allclose( - test_img.affine, np.array([[100., 0., 0., 0.], [0., 100., 0., 0.], [0., 0., 100., 0.], [0., 0., 0., 1.]])) + test_img.affine, + np.array([[100.0, 0.0, 0.0, 0.0], [0.0, 100.0, 0.0, 0.0], [0.0, 0.0, 100.0, 0.0], [0.0, 0.0, 0.0, 1.0]]), + ) def test_affine(self): - test_img = nib.Nifti1Image(np.zeros((1, 2, 3)), np.eye(4) * 20.) + test_img = nib.Nifti1Image(np.zeros((1, 2, 3)), np.eye(4) * 20.0) test_img = correct_nifti_header_if_necessary(test_img) np.testing.assert_allclose( - test_img.affine, np.array([[20., 0., 0., 0.], [0., 20., 0., 0.], [0., 0., 20., 0.], [0., 0., 0., 20.]])) + test_img.affine, + np.array([[20.0, 0.0, 0.0, 0.0], [0.0, 20.0, 0.0, 0.0], [0.0, 0.0, 20.0, 0.0], [0.0, 0.0, 0.0, 20.0]]), + ) + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_highresnet.py b/tests/test_highresnet.py index 209266a301..a9471bb5ed 100644 --- a/tests/test_highresnet.py +++ b/tests/test_highresnet.py @@ -17,51 +17,31 @@ from monai.networks.nets import HighResNet TEST_CASE_1 = [ # single channel 3D, batch 16 - { - 'spatial_dims': 3, - 'in_channels': 1, - 'out_channels': 3, - 'norm_type': 'instance', - }, + {"spatial_dims": 3, "in_channels": 1, "out_channels": 3, "norm_type": "instance"}, torch.randn(16, 1, 32, 24, 48), (16, 3, 32, 24, 48), ] TEST_CASE_2 = [ # 4-channel 3D, batch 1 - { - 'spatial_dims': 3, - 'in_channels': 4, - 'out_channels': 3, - 'acti_type': 'relu6', - }, + {"spatial_dims": 3, "in_channels": 4, "out_channels": 3, "acti_type": "relu6"}, torch.randn(1, 4, 17, 64, 48), (1, 3, 17, 64, 48), ] TEST_CASE_3 = [ # 4-channel 2D, batch 7 - { - 'spatial_dims': 2, - 'in_channels': 4, - 'out_channels': 3, - }, + {"spatial_dims": 2, "in_channels": 4, "out_channels": 3}, torch.randn(7, 4, 64, 48), (7, 3, 64, 48), ] TEST_CASE_4 = [ # 4-channel 1D, batch 16 - { - 'spatial_dims': 1, - 'in_channels': 4, - 'out_channels': 3, - 'dropout_prob': 0.1, - }, + {"spatial_dims": 1, "in_channels": 4, "out_channels": 3, "dropout_prob": 0.1}, torch.randn(16, 4, 63), (16, 3, 63), ] class TestHighResNet(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_shape(self, input_param, input_data, expected_shape): net = HighResNet(**input_param) @@ -71,5 +51,5 @@ def test_shape(self, input_param, input_data, expected_shape): self.assertEqual(result.shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_integration_classification_2d.py b/tests/test_integration_classification_2d.py index bc8e86bcc0..9e30cbacd8 100644 --- a/tests/test_integration_classification_2d.py +++ b/tests/test_integration_classification_2d.py @@ -23,15 +23,23 @@ import monai from monai.metrics import compute_roc_auc from monai.networks.nets import densenet121 -from monai.transforms import (AddChannel, Compose, LoadPNG, RandFlip, RandRotate, RandZoom, Resize, ScaleIntensity, - ToTensor) +from monai.transforms import ( + AddChannel, + Compose, + LoadPNG, + RandFlip, + RandRotate, + RandZoom, + Resize, + ScaleIntensity, + ToTensor, +) from tests.utils import skip_if_quick -TEST_DATA_URL = 'https://www.dropbox.com/s/5wwskxctvcxiuea/MedNIST.tar.gz' +TEST_DATA_URL = "https://www.dropbox.com/s/5wwskxctvcxiuea/MedNIST.tar.gz" class MedNISTDataset(torch.utils.data.Dataset): - def __init__(self, image_files, labels, transforms): self.image_files = image_files self.labels = labels @@ -48,16 +56,18 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device=torch.dev monai.config.print_config() # define transforms for image and classification - train_transforms = Compose([ - LoadPNG(image_only=True), - AddChannel(), - ScaleIntensity(), - RandRotate(degrees=15, prob=0.5), - RandFlip(spatial_axis=0, prob=0.5), - RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5), - Resize(spatial_size=(64, 64), mode='constant'), - ToTensor() - ]) + train_transforms = Compose( + [ + LoadPNG(image_only=True), + AddChannel(), + ScaleIntensity(), + RandRotate(degrees=15, prob=0.5), + RandFlip(spatial_axis=0, prob=0.5), + RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5), + Resize(spatial_size=(64, 64), mode="constant"), + ToTensor(), + ] + ) train_transforms.set_random_state(1234) val_transforms = Compose([LoadPNG(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()]) @@ -68,11 +78,7 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device=torch.dev val_ds = MedNISTDataset(val_x, val_y, val_transforms) val_loader = DataLoader(val_ds, batch_size=300, num_workers=10) - model = densenet121( - spatial_dims=2, - in_channels=1, - out_channels=len(np.unique(train_y)), - ).to(device) + model = densenet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(train_y)),).to(device) loss_function = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), 1e-5) epoch_num = 4 @@ -83,10 +89,10 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device=torch.dev best_metric_epoch = -1 epoch_loss_values = list() metric_values = list() - model_filename = os.path.join(root_dir, 'best_metric_model.pth') + model_filename = os.path.join(root_dir, "best_metric_model.pth") for epoch in range(epoch_num): - print('-' * 10) - print('Epoch {}/{}'.format(epoch + 1, epoch_num)) + print("-" * 10) + print("Epoch {}/{}".format(epoch + 1, epoch_num)) model.train() epoch_loss = 0 step = 0 @@ -120,10 +126,12 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device=torch.dev best_metric = auc_metric best_metric_epoch = epoch + 1 torch.save(model.state_dict(), model_filename) - print('saved new best metric model') - print("current epoch %d current AUC: %0.4f current accuracy: %0.4f best AUC: %0.4f at epoch %d" % - (epoch + 1, auc_metric, acc_metric, best_metric, best_metric_epoch)) - print('train completed, best_metric: %0.4f at epoch: %d' % (best_metric, best_metric_epoch)) + print("saved new best metric model") + print( + "current epoch %d current AUC: %0.4f current accuracy: %0.4f best AUC: %0.4f at epoch %d" + % (epoch + 1, auc_metric, acc_metric, best_metric, best_metric_epoch) + ) + print("train completed, best_metric: %0.4f at epoch: %d" % (best_metric, best_metric_epoch)) return epoch_loss_values, best_metric, best_metric_epoch @@ -133,13 +141,9 @@ def run_inference_test(root_dir, test_x, test_y, device=torch.device("cuda:0")): val_ds = MedNISTDataset(test_x, test_y, val_transforms) val_loader = DataLoader(val_ds, batch_size=300, num_workers=10) - model = densenet121( - spatial_dims=2, - in_channels=1, - out_channels=len(np.unique(test_y)), - ).to(device) + model = densenet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(test_y)),).to(device) - model_filename = os.path.join(root_dir, 'best_metric_model.pth') + model_filename = os.path.join(root_dir, "best_metric_model.pth") model.load_state_dict(torch.load(model_filename)) model.eval() y_true = list() @@ -156,7 +160,6 @@ def run_inference_test(root_dir, test_x, test_y, device=torch.device("cuda:0")): class IntegrationClassification2D(unittest.TestCase): - def setUp(self): torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False @@ -164,8 +167,8 @@ def setUp(self): self.data_dir = tempfile.mkdtemp() # download - subprocess.call(['wget', '-nv', '-P', self.data_dir, TEST_DATA_URL]) - dataset_file = os.path.join(self.data_dir, 'MedNIST.tar.gz') + subprocess.call(["wget", "-nv", "-P", self.data_dir, TEST_DATA_URL]) + dataset_file = os.path.join(self.data_dir, "MedNIST.tar.gz") assert os.path.exists(dataset_file) # extract tarfile @@ -174,11 +177,12 @@ def setUp(self): datafile.close() # find image files and labels - data_dir = os.path.join(self.data_dir, 'MedNIST') + data_dir = os.path.join(self.data_dir, "MedNIST") class_names = sorted([x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x))]) - image_files = [[ - os.path.join(data_dir, class_name, x) for x in sorted(os.listdir(os.path.join(data_dir, class_name))) - ] for class_name in class_names] + image_files = [ + [os.path.join(data_dir, class_name, x) for x in sorted(os.listdir(os.path.join(data_dir, class_name)))] + for class_name in class_names + ] image_file_list, image_classes = [], [] for i, class_name in enumerate(class_names): image_file_list.extend(image_files[i]) @@ -202,7 +206,7 @@ def setUp(self): self.train_y.append(image_classes[i]) np.random.seed(seed=None) - self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0') + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0") def tearDown(self): shutil.rmtree(self.data_dir) @@ -214,18 +218,20 @@ def test_training(self): torch.manual_seed(0) repeated.append([]) - losses, best_metric, best_metric_epoch = \ - run_training_test(self.data_dir, self.train_x, self.train_y, self.val_x, self.val_y, device=self.device) + losses, best_metric, best_metric_epoch = run_training_test( + self.data_dir, self.train_x, self.train_y, self.val_x, self.val_y, device=self.device + ) # check training properties np.testing.assert_allclose( - losses, [0.8501208358129878, 0.18469145818121113, 0.08108749352158255, 0.04965383692342005], rtol=1e-3) + losses, [0.8501208358129878, 0.18469145818121113, 0.08108749352158255, 0.04965383692342005], rtol=1e-3 + ) repeated[i].extend(losses) - print('best metric', best_metric) + print("best metric", best_metric) np.testing.assert_allclose(best_metric, 0.9999480167572079, rtol=1e-4) repeated[i].append(best_metric) np.testing.assert_allclose(best_metric_epoch, 4) - model_file = os.path.join(self.data_dir, 'best_metric_model.pth') + model_file = os.path.join(self.data_dir, "best_metric_model.pth") self.assertTrue(os.path.exists(model_file)) infer_metric = run_inference_test(self.data_dir, self.test_x, self.test_y, device=self.device) @@ -237,5 +243,5 @@ def test_training(self): np.testing.assert_allclose(repeated[0], repeated[1]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_integration_determinism.py b/tests/test_integration_determinism.py index 3f125c4a17..0a49e5aeda 100644 --- a/tests/test_integration_determinism.py +++ b/tests/test_integration_determinism.py @@ -22,9 +22,7 @@ def run_test(batch_size=64, train_steps=200, device=torch.device("cuda:0")): - class _TestBatch(Dataset): - def __init__(self, transforms): self.transforms = transforms @@ -41,23 +39,14 @@ def __len__(self): return train_steps net = UNet( - dimensions=2, - in_channels=1, - out_channels=1, - channels=(4, 8, 16, 32), - strides=(2, 2, 2), - num_res_units=2, + dimensions=2, in_channels=1, out_channels=1, channels=(4, 8, 16, 32), strides=(2, 2, 2), num_res_units=2, ).to(device) loss = DiceLoss(do_sigmoid=True) opt = torch.optim.Adam(net.parameters(), 1e-2) - train_transforms = Compose([ - AddChannel(), - ScaleIntensity(), - RandSpatialCrop((96, 96), random_size=False), - RandRotate90(), - ToTensor() - ]) + train_transforms = Compose( + [AddChannel(), ScaleIntensity(), RandSpatialCrop((96, 96), random_size=False), RandRotate90(), ToTensor()] + ) src = DataLoader(_TestBatch(train_transforms), batch_size=batch_size) @@ -78,17 +67,16 @@ def __len__(self): class TestDeterminism(unittest.TestCase): - def setUp(self): np.random.seed(0) torch.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False - self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0') + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0") def test_training(self): loss, step = run_test(device=self.device) - print('Deterministic loss {} at training step {}'.format(loss, step)) + print("Deterministic loss {} at training step {}".format(loss, step)) np.testing.assert_allclose(step, 4) np.testing.assert_allclose(loss, 0.5346279, rtol=1e-6) diff --git a/tests/test_integration_segmentation_3d.py b/tests/test_integration_segmentation_3d.py index 3128a16145..73cc856290 100644 --- a/tests/test_integration_segmentation_3d.py +++ b/tests/test_integration_segmentation_3d.py @@ -25,8 +25,15 @@ from monai.data import create_test_image_3d, sliding_window_inference, NiftiSaver, list_data_collate from monai.metrics import compute_meandice from monai.networks.nets import UNet -from monai.transforms import \ - Compose, AsChannelFirstd, LoadNiftid, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, ToTensord +from monai.transforms import ( + Compose, + AsChannelFirstd, + LoadNiftid, + RandCropByPosNegLabeld, + RandRotate90d, + ScaleIntensityd, + ToTensord, +) from monai.visualize import plot_2d_or_3d_image from tests.utils import skip_if_quick @@ -34,27 +41,33 @@ def run_training_test(root_dir, device=torch.device("cuda:0"), cachedataset=False): monai.config.print_config() - images = sorted(glob(os.path.join(root_dir, 'img*.nii.gz'))) - segs = sorted(glob(os.path.join(root_dir, 'seg*.nii.gz'))) - train_files = [{'img': img, 'seg': seg} for img, seg in zip(images[:20], segs[:20])] - val_files = [{'img': img, 'seg': seg} for img, seg in zip(images[-20:], segs[-20:])] + images = sorted(glob(os.path.join(root_dir, "img*.nii.gz"))) + segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz"))) + train_files = [{"img": img, "seg": seg} for img, seg in zip(images[:20], segs[:20])] + val_files = [{"img": img, "seg": seg} for img, seg in zip(images[-20:], segs[-20:])] # define transforms for image and segmentation - train_transforms = Compose([ - LoadNiftid(keys=['img', 'seg']), - AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), - ScaleIntensityd(keys=['img', 'seg']), - RandCropByPosNegLabeld(keys=['img', 'seg'], label_key='seg', size=[96, 96, 96], pos=1, neg=1, num_samples=4), - RandRotate90d(keys=['img', 'seg'], prob=0.8, spatial_axes=[0, 2]), - ToTensord(keys=['img', 'seg']) - ]) + train_transforms = Compose( + [ + LoadNiftid(keys=["img", "seg"]), + AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), + ScaleIntensityd(keys=["img", "seg"]), + RandCropByPosNegLabeld( + keys=["img", "seg"], label_key="seg", size=[96, 96, 96], pos=1, neg=1, num_samples=4 + ), + RandRotate90d(keys=["img", "seg"], prob=0.8, spatial_axes=[0, 2]), + ToTensord(keys=["img", "seg"]), + ] + ) train_transforms.set_random_state(1234) - val_transforms = Compose([ - LoadNiftid(keys=['img', 'seg']), - AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), - ScaleIntensityd(keys=['img', 'seg']), - ToTensord(keys=['img', 'seg']) - ]) + val_transforms = Compose( + [ + LoadNiftid(keys=["img", "seg"]), + AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), + ScaleIntensityd(keys=["img", "seg"]), + ToTensord(keys=["img", "seg"]), + ] + ) # create a training data loader if cachedataset: @@ -62,19 +75,19 @@ def run_training_test(root_dir, device=torch.device("cuda:0"), cachedataset=Fals else: train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training - train_loader = DataLoader(train_ds, - batch_size=2, - shuffle=True, - num_workers=4, - collate_fn=list_data_collate, - pin_memory=torch.cuda.is_available()) + train_loader = DataLoader( + train_ds, + batch_size=2, + shuffle=True, + num_workers=4, + collate_fn=list_data_collate, + pin_memory=torch.cuda.is_available(), + ) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) - val_loader = DataLoader(val_ds, - batch_size=1, - num_workers=4, - collate_fn=list_data_collate, - pin_memory=torch.cuda.is_available()) + val_loader = DataLoader( + val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available() + ) # create UNet, DiceLoss and Adam optimizer model = monai.networks.nets.UNet( @@ -93,17 +106,17 @@ def run_training_test(root_dir, device=torch.device("cuda:0"), cachedataset=Fals best_metric, best_metric_epoch = -1, -1 epoch_loss_values = list() metric_values = list() - writer = SummaryWriter(log_dir=os.path.join(root_dir, 'runs')) - model_filename = os.path.join(root_dir, 'best_metric_model.pth') + writer = SummaryWriter(log_dir=os.path.join(root_dir, "runs")) + model_filename = os.path.join(root_dir, "best_metric_model.pth") for epoch in range(6): - print('-' * 10) - print('Epoch {}/{}'.format(epoch + 1, 6)) + print("-" * 10) + print("Epoch {}/{}".format(epoch + 1, 6)) model.train() epoch_loss = 0 step = 0 for batch_data in train_loader: step += 1 - inputs, labels = batch_data['img'].to(device), batch_data['seg'].to(device) + inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device) optimizer.zero_grad() outputs = model(inputs) loss = loss_function(outputs, labels) @@ -112,7 +125,7 @@ def run_training_test(root_dir, device=torch.device("cuda:0"), cachedataset=Fals epoch_loss += loss.item() epoch_len = len(train_ds) // train_loader.batch_size print("%d/%d, train_loss:%0.4f" % (step, epoch_len, loss.item())) - writer.add_scalar('train_loss', loss.item(), epoch_len * epoch + step) + writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step) epoch_loss /= step epoch_loss_values.append(epoch_loss) print("epoch %d average loss:%0.4f" % (epoch + 1, epoch_loss)) @@ -120,17 +133,18 @@ def run_training_test(root_dir, device=torch.device("cuda:0"), cachedataset=Fals if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): - metric_sum = 0. + metric_sum = 0.0 metric_count = 0 val_images = None val_labels = None val_outputs = None for val_data in val_loader: - val_images, val_labels = val_data['img'].to(device), val_data['seg'].to(device) + val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device) sw_batch_size, roi_size = 4, (96, 96, 96) val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model) - value = compute_meandice(y_pred=val_outputs, y=val_labels, include_background=True, - to_onehot_y=False, add_sigmoid=True) + value = compute_meandice( + y_pred=val_outputs, y=val_labels, include_background=True, to_onehot_y=False, add_sigmoid=True + ) metric_count += len(value) metric_sum += value.sum().item() metric = metric_sum / metric_count @@ -139,38 +153,40 @@ def run_training_test(root_dir, device=torch.device("cuda:0"), cachedataset=Fals best_metric = metric best_metric_epoch = epoch + 1 torch.save(model.state_dict(), model_filename) - print('saved new best metric model') - print("current epoch %d current mean dice: %0.4f best mean dice: %0.4f at epoch %d" % - (epoch + 1, metric, best_metric, best_metric_epoch)) - writer.add_scalar('val_mean_dice', metric, epoch + 1) + print("saved new best metric model") + print( + "current epoch %d current mean dice: %0.4f best mean dice: %0.4f at epoch %d" + % (epoch + 1, metric, best_metric, best_metric_epoch) + ) + writer.add_scalar("val_mean_dice", metric, epoch + 1) # plot the last model output as GIF image in TensorBoard with the corresponding image and label - plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag='image') - plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag='label') - plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag='output') - print('train completed, best_metric: %0.4f at epoch: %d' % (best_metric, best_metric_epoch)) + plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag="image") + plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag="label") + plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag="output") + print("train completed, best_metric: %0.4f at epoch: %d" % (best_metric, best_metric_epoch)) writer.close() return epoch_loss_values, best_metric, best_metric_epoch def run_inference_test(root_dir, device=torch.device("cuda:0")): - images = sorted(glob(os.path.join(root_dir, 'im*.nii.gz'))) - segs = sorted(glob(os.path.join(root_dir, 'seg*.nii.gz'))) - val_files = [{'img': img, 'seg': seg} for img, seg in zip(images, segs)] + images = sorted(glob(os.path.join(root_dir, "im*.nii.gz"))) + segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz"))) + val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)] # define transforms for image and segmentation - val_transforms = Compose([ - LoadNiftid(keys=['img', 'seg']), - AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), - ScaleIntensityd(keys=['img', 'seg']), - ToTensord(keys=['img', 'seg']) - ]) + val_transforms = Compose( + [ + LoadNiftid(keys=["img", "seg"]), + AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), + ScaleIntensityd(keys=["img", "seg"]), + ToTensord(keys=["img", "seg"]), + ] + ) val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) # sliding window inferene need to input 1 image in every iteration - val_loader = DataLoader(val_ds, - batch_size=1, - num_workers=4, - collate_fn=list_data_collate, - pin_memory=torch.cuda.is_available()) + val_loader = DataLoader( + val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available() + ) model = UNet( dimensions=3, @@ -181,32 +197,32 @@ def run_inference_test(root_dir, device=torch.device("cuda:0")): num_res_units=2, ).to(device) - model_filename = os.path.join(root_dir, 'best_metric_model.pth') + model_filename = os.path.join(root_dir, "best_metric_model.pth") model.load_state_dict(torch.load(model_filename)) model.eval() with torch.no_grad(): - metric_sum = 0. + metric_sum = 0.0 metric_count = 0 - saver = NiftiSaver(output_dir=os.path.join(root_dir, 'output'), dtype=int) + saver = NiftiSaver(output_dir=os.path.join(root_dir, "output"), dtype=int) for val_data in val_loader: - val_images, val_labels = val_data['img'].to(device), val_data['seg'].to(device) + val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device) # define sliding window size and batch size for windows inference sw_batch_size, roi_size = 4, (96, 96, 96) val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model) - value = compute_meandice(y_pred=val_outputs, y=val_labels, include_background=True, - to_onehot_y=False, add_sigmoid=True) + value = compute_meandice( + y_pred=val_outputs, y=val_labels, include_background=True, to_onehot_y=False, add_sigmoid=True + ) metric_count += len(value) metric_sum += value.sum().item() val_outputs = (val_outputs.sigmoid() >= 0.5).float() saver.save_batch( - val_outputs, - {'filename_or_obj': val_data['img.filename_or_obj'], 'affine': val_data['img.affine']}) + val_outputs, {"filename_or_obj": val_data["img.filename_or_obj"], "affine": val_data["img.affine"]} + ) metric = metric_sum / metric_count return metric class IntegrationSegmentation3D(unittest.TestCase): - def setUp(self): torch.manual_seed(0) torch.backends.cudnn.deterministic = True @@ -217,12 +233,12 @@ def setUp(self): for i in range(40): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) - nib.save(n, os.path.join(self.data_dir, 'img%i.nii.gz' % i)) + nib.save(n, os.path.join(self.data_dir, "img%i.nii.gz" % i)) n = nib.Nifti1Image(seg, np.eye(4)) - nib.save(n, os.path.join(self.data_dir, 'seg%i.nii.gz' % i)) + nib.save(n, os.path.join(self.data_dir, "seg%i.nii.gz" % i)) np.random.seed(seed=None) - self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0') + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0") def tearDown(self): shutil.rmtree(self.data_dir) @@ -234,21 +250,30 @@ def test_training(self): torch.manual_seed(0) repeated.append([]) - losses, best_metric, best_metric_epoch = \ - run_training_test(self.data_dir, device=self.device, cachedataset=(i == 2)) + losses, best_metric, best_metric_epoch = run_training_test( + self.data_dir, device=self.device, cachedataset=(i == 2) + ) # check training properties - np.testing.assert_allclose(losses, [ - 0.5241468191146851, 0.4485286593437195, 0.42851402163505553, 0.4130884766578674, 0.39990419149398804, - 0.38985557556152345 - ], rtol=1e-4) + np.testing.assert_allclose( + losses, + [ + 0.5241468191146851, + 0.4485286593437195, + 0.42851402163505553, + 0.4130884766578674, + 0.39990419149398804, + 0.38985557556152345, + ], + rtol=1e-4, + ) repeated[i].extend(losses) - print('best metric', best_metric) + print("best metric", best_metric) np.testing.assert_allclose(best_metric, 0.936915835738182, rtol=1e-4) repeated[i].append(best_metric) np.testing.assert_allclose(best_metric_epoch, 6) - self.assertTrue(len(glob(os.path.join(self.data_dir, 'runs'))) > 0) - model_file = os.path.join(self.data_dir, 'best_metric_model.pth') + self.assertTrue(len(glob(os.path.join(self.data_dir, "runs"))) > 0) + model_file = os.path.join(self.data_dir, "best_metric_model.pth") self.assertTrue(os.path.exists(model_file)) infer_metric = run_inference_test(self.data_dir, device=self.device) @@ -256,15 +281,49 @@ def test_training(self): # check inference properties np.testing.assert_allclose(infer_metric, 0.9382847994565964, rtol=1e-4) repeated[i].append(infer_metric) - output_files = sorted(glob(os.path.join(self.data_dir, 'output', 'img*', '*.nii.gz'))) - sums = [0.14089012145996094, 0.15014171600341797, 0.14881277084350586, 0.1385650634765625, 0.1845254898071289, - 0.16743040084838867, 0.14531803131103516, 0.16558170318603516, 0.15594959259033203, 0.17697954177856445, - 0.1602783203125, 0.16418695449829102, 0.14412164688110352, 0.11254501342773438, 0.1596541404724121, - 0.19611215591430664, 0.17372655868530273, 0.09818077087402344, 0.19010257720947266, 0.19887447357177734, - 0.19475173950195312, 0.2032027244567871, 0.15918874740600586, 0.1304488182067871, 0.1496739387512207, - 0.1408066749572754, 0.22757959365844727, 0.1601700782775879, 0.14635848999023438, 0.10335826873779297, - 0.11824846267700195, 0.12940073013305664, 0.11342906951904297, 0.15047359466552734, 0.16041946411132812, - 0.18996095657348633, 0.21734333038330078, 0.17714214324951172, 0.1853632926940918, 0.079422] + output_files = sorted(glob(os.path.join(self.data_dir, "output", "img*", "*.nii.gz"))) + sums = [ + 0.14089012145996094, + 0.15014171600341797, + 0.14881277084350586, + 0.1385650634765625, + 0.1845254898071289, + 0.16743040084838867, + 0.14531803131103516, + 0.16558170318603516, + 0.15594959259033203, + 0.17697954177856445, + 0.1602783203125, + 0.16418695449829102, + 0.14412164688110352, + 0.11254501342773438, + 0.1596541404724121, + 0.19611215591430664, + 0.17372655868530273, + 0.09818077087402344, + 0.19010257720947266, + 0.19887447357177734, + 0.19475173950195312, + 0.2032027244567871, + 0.15918874740600586, + 0.1304488182067871, + 0.1496739387512207, + 0.1408066749572754, + 0.22757959365844727, + 0.1601700782775879, + 0.14635848999023438, + 0.10335826873779297, + 0.11824846267700195, + 0.12940073013305664, + 0.11342906951904297, + 0.15047359466552734, + 0.16041946411132812, + 0.18996095657348633, + 0.21734333038330078, + 0.17714214324951172, + 0.1853632926940918, + 0.079422, + ] for (output, s) in zip(output_files, sums): ave = np.mean(nib.load(output).get_fdata()) np.testing.assert_allclose(ave, s, rtol=1e-3) @@ -273,5 +332,5 @@ def test_training(self): np.testing.assert_allclose(repeated[0], repeated[2]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_integration_sliding_window.py b/tests/test_integration_sliding_window.py index 54c13703a4..5b9b6ab5f5 100644 --- a/tests/test_integration_sliding_window.py +++ b/tests/test_integration_sliding_window.py @@ -32,12 +32,7 @@ def run_test(batch_size, img_name, seg_name, output_dir, device=torch.device("cu loader = DataLoader(ds, batch_size=1, pin_memory=torch.cuda.is_available()) net = UNet( - dimensions=3, - in_channels=1, - out_channels=1, - channels=(4, 8, 16, 32), - strides=(2, 2, 2), - num_res_units=2, + dimensions=3, in_channels=1, out_channels=1, channels=(4, 8, 16, 32), strides=(2, 2, 2), num_res_units=2, ).to(device) roi_size = (16, 32, 48) sw_batch_size = batch_size @@ -51,18 +46,18 @@ def _sliding_window_processor(_engine, batch): infer_engine = Engine(_sliding_window_processor) - SegmentationSaver(output_dir=output_dir, output_ext='.nii.gz', output_postfix='seg', - batch_transform=lambda x: x[2]).attach(infer_engine) + SegmentationSaver( + output_dir=output_dir, output_ext=".nii.gz", output_postfix="seg", batch_transform=lambda x: x[2] + ).attach(infer_engine) infer_engine.run(loader) - basename = os.path.basename(img_name)[:-len('.nii.gz')] - saved_name = os.path.join(output_dir, basename, '{}_seg.nii.gz'.format(basename)) + basename = os.path.basename(img_name)[: -len(".nii.gz")] + saved_name = os.path.join(output_dir, basename, "{}_seg.nii.gz".format(basename)) return saved_name class TestIntegrationSlidingWindow(unittest.TestCase): - def setUp(self): np.random.seed(0) torch.manual_seed(0) @@ -72,7 +67,7 @@ def setUp(self): im, seg = create_test_image_3d(25, 28, 63, rad_max=10, noise_max=1, num_objs=4, num_seg_classes=1) self.img_name = make_nifti_image(im) self.seg_name = make_nifti_image(seg) - self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0') + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0") def tearDown(self): if os.path.exists(self.img_name): @@ -82,11 +77,9 @@ def tearDown(self): def test_training(self): with tempfile.TemporaryDirectory() as temp_dir: - output_file = run_test(batch_size=2, - img_name=self.img_name, - seg_name=self.seg_name, - output_dir=temp_dir, - device=self.device) + output_file = run_test( + batch_size=2, img_name=self.img_name, seg_name=self.seg_name, output_dir=temp_dir, device=self.device + ) output_image = nib.load(output_file).get_fdata() np.testing.assert_allclose(np.sum(output_image), 34070) np.testing.assert_allclose(output_image.shape, (28, 25, 63, 1)) diff --git a/tests/test_integration_unet_2d.py b/tests/test_integration_unet_2d.py index e05f8c80af..9eded1b4df 100644 --- a/tests/test_integration_unet_2d.py +++ b/tests/test_integration_unet_2d.py @@ -22,9 +22,7 @@ def run_test(batch_size=64, train_steps=100, device=torch.device("cuda:0")): - class _TestBatch(Dataset): - def __getitem__(self, _unused_id): im, seg = create_test_image_2d(128, 128, noise_max=1, num_objs=4, num_seg_classes=1) return im[None], seg[None].astype(np.float32) @@ -33,12 +31,7 @@ def __len__(self): return train_steps net = UNet( - dimensions=2, - in_channels=1, - out_channels=1, - channels=(4, 8, 16, 32), - strides=(2, 2, 2), - num_res_units=2, + dimensions=2, in_channels=1, out_channels=1, channels=(4, 8, 16, 32), strides=(2, 2, 2), num_res_units=2, ).to(device) loss = DiceLoss(do_sigmoid=True) @@ -53,12 +46,11 @@ def __len__(self): class TestIntegrationUnet2D(unittest.TestCase): - def test_unet_training(self): - loss = run_test(device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0')) + loss = run_test(device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0")) print(loss) self.assertGreaterEqual(0.85, loss) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_list_data_collate.py b/tests/test_list_data_collate.py index 9e73eaa9af..d23817a141 100644 --- a/tests/test_list_data_collate.py +++ b/tests/test_list_data_collate.py @@ -16,39 +16,30 @@ from monai.data import list_data_collate -a = {'image': np.array([1, 2, 3]), 'label': np.array([4, 5, 6])} -b = {'image': np.array([7, 8, 9]), 'label': np.array([10, 11, 12])} -c = {'image': np.array([13, 14, 15]), 'label': np.array([16, 7, 18])} -d = {'image': np.array([19, 20, 21]), 'label': np.array([22, 23, 24])} -TEST_CASE_1 = [ - [[a, b], [c, d]], # dataset returns a list of dictionary data - dict, - torch.Size([4, 3]) -] +a = {"image": np.array([1, 2, 3]), "label": np.array([4, 5, 6])} +b = {"image": np.array([7, 8, 9]), "label": np.array([10, 11, 12])} +c = {"image": np.array([13, 14, 15]), "label": np.array([16, 7, 18])} +d = {"image": np.array([19, 20, 21]), "label": np.array([22, 23, 24])} +TEST_CASE_1 = [[[a, b], [c, d]], dict, torch.Size([4, 3])] # dataset returns a list of dictionary data e = (np.array([1, 2, 3]), np.array([4, 5, 6])) f = (np.array([7, 8, 9]), np.array([10, 11, 12])) g = (np.array([13, 14, 15]), np.array([16, 7, 18])) h = (np.array([19, 20, 21]), np.array([22, 23, 24])) -TEST_CASE_2 = [ - [[e, f], [g, h]], # dataset returns a list of tuple data - list, - torch.Size([4, 3]) -] +TEST_CASE_2 = [[[e, f], [g, h]], list, torch.Size([4, 3])] # dataset returns a list of tuple data class TestListDataCollate(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_type_shape(self, input_data, expected_type, expected_shape): result = list_data_collate(input_data) self.assertIsInstance(result, expected_type) if isinstance(result, dict): - data = result['image'] + data = result["image"] else: data = result[0] self.assertEqual(data.shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_load_nifti.py b/tests/test_load_nifti.py index 62db906f7d..e6627adb82 100644 --- a/tests/test_load_nifti.py +++ b/tests/test_load_nifti.py @@ -17,45 +17,24 @@ from parameterized import parameterized from monai.transforms import LoadNifti -TEST_CASE_1 = [ - { - 'as_closest_canonical': False, - 'image_only': True - }, - ['test_image.nii.gz'], - (128, 128, 128) -] +TEST_CASE_1 = [{"as_closest_canonical": False, "image_only": True}, ["test_image.nii.gz"], (128, 128, 128)] -TEST_CASE_2 = [ - { - 'as_closest_canonical': False, - 'image_only': False - }, - ['test_image.nii.gz'], - (128, 128, 128) -] +TEST_CASE_2 = [{"as_closest_canonical": False, "image_only": False}, ["test_image.nii.gz"], (128, 128, 128)] TEST_CASE_3 = [ - { - 'as_closest_canonical': False, - 'image_only': True - }, - ['test_image1.nii.gz', 'test_image2.nii.gz', 'test_image3.nii.gz'], - (3, 128, 128, 128) + {"as_closest_canonical": False, "image_only": True}, + ["test_image1.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"], + (3, 128, 128, 128), ] TEST_CASE_4 = [ - { - 'as_closest_canonical': False, - 'image_only': False - }, - ['test_image1.nii.gz', 'test_image2.nii.gz', 'test_image3.nii.gz'], - (3, 128, 128, 128) + {"as_closest_canonical": False, "image_only": False}, + ["test_image1.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"], + (3, 128, 128, 128), ] class TestLoadNifti(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_shape(self, input_param, filenames, expected_shape): test_image = np.random.randint(0, 2, size=[128, 128, 128]) @@ -67,12 +46,12 @@ def test_shape(self, input_param, filenames, expected_shape): result = LoadNifti(**input_param)(filenames) if isinstance(result, tuple): result, header = result - self.assertTrue('affine' in header) - np.testing.assert_allclose(header['affine'], np.eye(4)) - if input_param['as_closest_canonical']: - np.testing.asesrt_allclose(header['original_affine'], np.eye(4)) + self.assertTrue("affine" in header) + np.testing.assert_allclose(header["affine"], np.eye(4)) + if input_param["as_closest_canonical"]: + np.testing.asesrt_allclose(header["original_affine"], np.eye(4)) self.assertTupleEqual(result.shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_load_niftid.py b/tests/test_load_niftid.py index 32f739a27b..429d6d6db3 100644 --- a/tests/test_load_niftid.py +++ b/tests/test_load_niftid.py @@ -17,19 +17,12 @@ from parameterized import parameterized from monai.transforms import LoadNiftid -KEYS = ['image', 'label', 'extra'] +KEYS = ["image", "label", "extra"] -TEST_CASE_1 = [ - { - 'keys': KEYS, - 'as_closest_canonical': False - }, - (128, 128, 128) -] +TEST_CASE_1 = [{"keys": KEYS, "as_closest_canonical": False}, (128, 128, 128)] class TestLoadNiftid(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_shape(self, input_param, expected_shape): test_image = nib.Nifti1Image(np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4)) @@ -37,12 +30,12 @@ def test_shape(self, input_param, expected_shape): test_data = dict() with tempfile.TemporaryDirectory() as tempdir: for key in KEYS: - nib.save(test_image, os.path.join(tempdir, key + '.nii.gz')) - test_data.update({key: os.path.join(tempdir, key + '.nii.gz')}) + nib.save(test_image, os.path.join(tempdir, key + ".nii.gz")) + test_data.update({key: os.path.join(tempdir, key + ".nii.gz")}) result = LoadNiftid(**input_param)(test_data) for key in KEYS: self.assertTupleEqual(result[key].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_load_png.py b/tests/test_load_png.py index 8cd77c7d01..218555c795 100644 --- a/tests/test_load_png.py +++ b/tests/test_load_png.py @@ -17,30 +17,14 @@ from parameterized import parameterized from monai.transforms import LoadPNG -TEST_CASE_1 = [ - (128, 128), - ['test_image.png'], - (128, 128), - (128, 128) -] - -TEST_CASE_2 = [ - (128, 128, 3), - ['test_image.png'], - (128, 128, 3), - (128, 128) -] - -TEST_CASE_3 = [ - (128, 128), - ['test_image1.png', 'test_image2.png', 'test_image3.png'], - (3, 128, 128), - (128, 128) -] +TEST_CASE_1 = [(128, 128), ["test_image.png"], (128, 128), (128, 128)] +TEST_CASE_2 = [(128, 128, 3), ["test_image.png"], (128, 128, 3), (128, 128)] + +TEST_CASE_3 = [(128, 128), ["test_image1.png", "test_image2.png", "test_image3.png"], (3, 128, 128), (128, 128)] -class TestLoadPNG(unittest.TestCase): +class TestLoadPNG(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_shape(self, data_shape, filenames, expected_shape, meta_shape): test_image = np.random.randint(0, 256, size=data_shape) @@ -48,11 +32,11 @@ def test_shape(self, data_shape, filenames, expected_shape, meta_shape): with tempfile.TemporaryDirectory() as tempdir: for i, name in enumerate(filenames): filenames[i] = os.path.join(tempdir, name) - Image.fromarray(test_image.astype('uint8')).save(filenames[i]) + Image.fromarray(test_image.astype("uint8")).save(filenames[i]) result = LoadPNG()(filenames) - self.assertTupleEqual(result[1]['spatial_shape'], meta_shape) + self.assertTupleEqual(result[1]["spatial_shape"], meta_shape) self.assertTupleEqual(result[0].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_load_pngd.py b/tests/test_load_pngd.py index bb5387e0dd..2891c95687 100644 --- a/tests/test_load_pngd.py +++ b/tests/test_load_pngd.py @@ -17,18 +17,12 @@ from parameterized import parameterized from monai.transforms import LoadPNGd -KEYS = ['image', 'label', 'extra'] +KEYS = ["image", "label", "extra"] -TEST_CASE_1 = [ - { - 'keys': KEYS - }, - (128, 128, 3) -] +TEST_CASE_1 = [{"keys": KEYS}, (128, 128, 3)] class TestLoadPNGd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_shape(self, input_param, expected_shape): test_image = np.random.randint(0, 256, size=[128, 128, 3]) @@ -36,12 +30,12 @@ def test_shape(self, input_param, expected_shape): test_data = dict() with tempfile.TemporaryDirectory() as tempdir: for key in KEYS: - Image.fromarray(test_image.astype('uint8')).save(os.path.join(tempdir, key + '.png')) - test_data.update({key: os.path.join(tempdir, key + '.png')}) + Image.fromarray(test_image.astype("uint8")).save(os.path.join(tempdir, key + ".png")) + test_data.update({key: os.path.join(tempdir, key + ".png")}) result = LoadPNGd(**input_param)(test_data) for key in KEYS: self.assertTupleEqual(result[key].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_load_spacing_orientation.py b/tests/test_load_spacing_orientation.py index dddd245ca0..dce10abe20 100644 --- a/tests/test_load_spacing_orientation.py +++ b/tests/test_load_spacing_orientation.py @@ -17,98 +17,118 @@ from nibabel.processing import resample_to_output from parameterized import parameterized -from monai.transforms import (AddChanneld, LoadNiftid, Orientationd, Spacingd) +from monai.transforms import AddChanneld, LoadNiftid, Orientationd, Spacingd FILES = tuple( - os.path.join(os.path.dirname(__file__), 'testing_data', filename) - for filename in ('anatomical.nii', 'reoriented_anat_moved.nii')) + os.path.join(os.path.dirname(__file__), "testing_data", filename) + for filename in ("anatomical.nii", "reoriented_anat_moved.nii") +) class TestLoadSpacingOrientation(unittest.TestCase): - @parameterized.expand(FILES) def test_load_spacingd(self, filename): - data = {'image': filename} - data_dict = LoadNiftid(keys='image')(data) - data_dict = AddChanneld(keys='image')(data_dict) - res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=True, mode='constant')(data_dict) - np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine']) - anat = nibabel.Nifti1Image(data_dict['image'][0], data_dict['image.affine']) + data = {"image": filename} + data_dict = LoadNiftid(keys="image")(data) + data_dict = AddChanneld(keys="image")(data_dict) + res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=True, mode="constant")(data_dict) + np.testing.assert_allclose(data_dict["image.affine"], res_dict["image.original_affine"]) + anat = nibabel.Nifti1Image(data_dict["image"][0], data_dict["image.affine"]) ref = resample_to_output(anat, (1, 2, 3)) - np.testing.assert_allclose(res_dict['image.affine'], ref.affine) - np.testing.assert_allclose(res_dict['image'].shape[1:], ref.shape) - np.testing.assert_allclose(ref.get_fdata(), res_dict['image'][0]) + np.testing.assert_allclose(res_dict["image.affine"], ref.affine) + np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape) + np.testing.assert_allclose(ref.get_fdata(), res_dict["image"][0]) @parameterized.expand(FILES) def test_load_spacingd_rotate(self, filename): - data = {'image': filename} - data_dict = LoadNiftid(keys='image')(data) - data_dict = AddChanneld(keys='image')(data_dict) - affine = data_dict['image.affine'] - data_dict['image.original_affine'] = data_dict['image.affine'] = \ + data = {"image": filename} + data_dict = LoadNiftid(keys="image")(data) + data_dict = AddChanneld(keys="image")(data_dict) + affine = data_dict["image.affine"] + data_dict["image.original_affine"] = data_dict["image.affine"] = ( np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]]) @ affine - res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=True, mode='constant')(data_dict) - np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine']) - anat = nibabel.Nifti1Image(data_dict['image'][0], data_dict['image.affine']) + ) + res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=True, mode="constant")(data_dict) + np.testing.assert_allclose(data_dict["image.affine"], res_dict["image.original_affine"]) + anat = nibabel.Nifti1Image(data_dict["image"][0], data_dict["image.affine"]) ref = resample_to_output(anat, (1, 2, 3)) - np.testing.assert_allclose(res_dict['image.affine'], ref.affine) - if 'anatomical' not in filename: - np.testing.assert_allclose(res_dict['image'].shape[1:], ref.shape) - np.testing.assert_allclose(ref.get_fdata(), res_dict['image'][0]) + np.testing.assert_allclose(res_dict["image.affine"], ref.affine) + if "anatomical" not in filename: + np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape) + np.testing.assert_allclose(ref.get_fdata(), res_dict["image"][0]) else: # different from the ref implementation (shape computed by round # instead of ceil) - np.testing.assert_allclose(ref.get_fdata()[..., :-1], res_dict['image'][0]) + np.testing.assert_allclose(ref.get_fdata()[..., :-1], res_dict["image"][0]) def test_load_spacingd_non_diag(self): - data = {'image': FILES[1]} - data_dict = LoadNiftid(keys='image')(data) - data_dict = AddChanneld(keys='image')(data_dict) - affine = data_dict['image.affine'] - data_dict['image.original_affine'] = data_dict['image.affine'] = \ + data = {"image": FILES[1]} + data_dict = LoadNiftid(keys="image")(data) + data_dict = AddChanneld(keys="image")(data_dict) + affine = data_dict["image.affine"] + data_dict["image.original_affine"] = data_dict["image.affine"] = ( np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]]) @ affine - res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=False, mode='constant')(data_dict) - np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine']) + ) + res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=False, mode="constant")(data_dict) + np.testing.assert_allclose(data_dict["image.affine"], res_dict["image.original_affine"]) np.testing.assert_allclose( - res_dict['image.affine'], - np.array([[0., 0., 3., -27.599409], [0., 2., 0., -47.977585], [-1., 0., 0., 35.297897], [0., 0., 0., 1.]])) + res_dict["image.affine"], + np.array( + [ + [0.0, 0.0, 3.0, -27.599409], + [0.0, 2.0, 0.0, -47.977585], + [-1.0, 0.0, 0.0, 35.297897], + [0.0, 0.0, 0.0, 1.0], + ] + ), + ) def test_load_spacingd_rotate_non_diag(self): - data = {'image': FILES[0]} - data_dict = LoadNiftid(keys='image')(data) - data_dict = AddChanneld(keys='image')(data_dict) - res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=False, mode='nearest')(data_dict) - np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine']) + data = {"image": FILES[0]} + data_dict = LoadNiftid(keys="image")(data) + data_dict = AddChanneld(keys="image")(data_dict) + res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=False, mode="nearest")(data_dict) + np.testing.assert_allclose(data_dict["image.affine"], res_dict["image.original_affine"]) np.testing.assert_allclose( - res_dict['image.affine'], - np.array([[-1., 0., 0., 32.], [0., 2., 0., -40.], [0., 0., 3., -16.], [0., 0., 0., 1.]])) + res_dict["image.affine"], + np.array([[-1.0, 0.0, 0.0, 32.0], [0.0, 2.0, 0.0, -40.0], [0.0, 0.0, 3.0, -16.0], [0.0, 0.0, 0.0, 1.0]]), + ) def test_load_spacingd_rotate_non_diag_ornt(self): - data = {'image': FILES[0]} - data_dict = LoadNiftid(keys='image')(data) - data_dict = AddChanneld(keys='image')(data_dict) - res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=False, mode='nearest')(data_dict) - res_dict = Orientationd(keys='image', axcodes='LPI')(res_dict) - np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine']) + data = {"image": FILES[0]} + data_dict = LoadNiftid(keys="image")(data) + data_dict = AddChanneld(keys="image")(data_dict) + res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=False, mode="nearest")(data_dict) + res_dict = Orientationd(keys="image", axcodes="LPI")(res_dict) + np.testing.assert_allclose(data_dict["image.affine"], res_dict["image.original_affine"]) np.testing.assert_allclose( - res_dict['image.affine'], - np.array([[-1., 0., 0., 32.], [0., -2., 0., 40.], [0., 0., -3., 32.], [0., 0., 0., 1.]])) + res_dict["image.affine"], + np.array([[-1.0, 0.0, 0.0, 32.0], [0.0, -2.0, 0.0, 40.0], [0.0, 0.0, -3.0, 32.0], [0.0, 0.0, 0.0, 1.0]]), + ) def test_load_spacingd_non_diag_ornt(self): - data = {'image': FILES[1]} - data_dict = LoadNiftid(keys='image')(data) - data_dict = AddChanneld(keys='image')(data_dict) - affine = data_dict['image.affine'] - data_dict['image.original_affine'] = data_dict['image.affine'] = \ + data = {"image": FILES[1]} + data_dict = LoadNiftid(keys="image")(data) + data_dict = AddChanneld(keys="image")(data_dict) + affine = data_dict["image.affine"] + data_dict["image.original_affine"] = data_dict["image.affine"] = ( np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]]) @ affine - res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=False, mode='constant')(data_dict) - res_dict = Orientationd(keys='image', axcodes='LPI')(res_dict) - np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine']) + ) + res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=False, mode="constant")(data_dict) + res_dict = Orientationd(keys="image", axcodes="LPI")(res_dict) + np.testing.assert_allclose(data_dict["image.affine"], res_dict["image.original_affine"]) np.testing.assert_allclose( - res_dict['image.affine'], - np.array([[-3., 0., 0., 56.4005909], [0., -2., 0., 52.02241516], [0., 0., -1., 35.29789734], - [0., 0., 0., 1.]])) + res_dict["image.affine"], + np.array( + [ + [-3.0, 0.0, 0.0, 56.4005909], + [0.0, -2.0, 0.0, 52.02241516], + [0.0, 0.0, -1.0, 35.29789734], + [0.0, 0.0, 0.0, 1.0], + ] + ), + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_map_transform.py b/tests/test_map_transform.py index 0809741e3b..a8aad712f5 100644 --- a/tests/test_map_transform.py +++ b/tests/test_map_transform.py @@ -16,9 +16,9 @@ from monai.transforms import MapTransform TEST_CASES = [ - ['item', ('item',)], + ["item", ("item",)], [None, (None,)], - [['item1', 'item2'], ('item1', 'item2')], + [["item1", "item2"], ("item1", "item2")], ] TEST_ILL_CASES = [ @@ -34,7 +34,6 @@ def __call__(self, data): class TestRandomizable(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_keys(self, keys, expected): transform = MapTest(keys=keys) @@ -42,9 +41,9 @@ def test_keys(self, keys, expected): @parameterized.expand(TEST_ILL_CASES) def test_wrong_keys(self, keys): - with self.assertRaisesRegex(ValueError, ''): + with self.assertRaisesRegex(ValueError, ""): MapTest(keys=keys) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_nifti_header_revise.py b/tests/test_nifti_header_revise.py index 232bce992e..4bb09286c0 100644 --- a/tests/test_nifti_header_revise.py +++ b/tests/test_nifti_header_revise.py @@ -18,12 +18,11 @@ class TestRectifyHeaderSformQform(unittest.TestCase): - def test_revise_q(self): img = nib.Nifti1Image(np.zeros((10, 10, 10)), np.eye(4)) img.header.set_zooms((0.1, 0.2, 0.3)) output = rectify_header_sform_qform(img) - expected = np.diag([0.1, 0.2, 0.3, 1.]) + expected = np.diag([0.1, 0.2, 0.3, 1.0]) np.testing.assert_allclose(output.affine, expected) def test_revise_both(self): @@ -32,9 +31,9 @@ def test_revise_both(self): img.header.set_qform(np.diag([2, 3, 4, 1])) img.header.set_zooms((0.1, 0.2, 0.3)) output = rectify_header_sform_qform(img) - expected = np.diag([0.1, 0.2, 0.3, 1.]) + expected = np.diag([0.1, 0.2, 0.3, 1.0]) np.testing.assert_allclose(output.affine, expected) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_nifti_rw.py b/tests/test_nifti_rw.py index 08bdf133ee..63e6773067 100644 --- a/tests/test_nifti_rw.py +++ b/tests/test_nifti_rw.py @@ -22,32 +22,30 @@ from tests.utils import make_nifti_image TEST_IMAGE = np.arange(24).reshape((2, 4, 3)) -TEST_AFFINE = np.array([[-5.3, 0., 0., 102.01], [0., 0.52, 2.17, -7.50], [-0., 1.98, -0.26, -23.12], [0., 0., 0., 1.]]) +TEST_AFFINE = np.array( + [[-5.3, 0.0, 0.0, 102.01], [0.0, 0.52, 2.17, -7.50], [-0.0, 1.98, -0.26, -23.12], [0.0, 0.0, 0.0, 1.0]] +) TEST_CASES = [ - [TEST_IMAGE, TEST_AFFINE, - dict(as_closest_canonical=True, image_only=False), - np.arange(24).reshape((2, 4, 3))], + [TEST_IMAGE, TEST_AFFINE, dict(as_closest_canonical=True, image_only=False), np.arange(24).reshape((2, 4, 3))], [ - TEST_IMAGE, TEST_AFFINE, + TEST_IMAGE, + TEST_AFFINE, dict(as_closest_canonical=True, image_only=True), - np.array([[[12., 15., 18., 21.], [13., 16., 19., 22.], [14., 17., 20., 23.]], - [[0., 3., 6., 9.], [1., 4., 7., 10.], [2., 5., 8., 11.]]]) + np.array( + [ + [[12.0, 15.0, 18.0, 21.0], [13.0, 16.0, 19.0, 22.0], [14.0, 17.0, 20.0, 23.0]], + [[0.0, 3.0, 6.0, 9.0], [1.0, 4.0, 7.0, 10.0], [2.0, 5.0, 8.0, 11.0]], + ] + ), ], - [TEST_IMAGE, TEST_AFFINE, - dict(as_closest_canonical=False, image_only=True), - np.arange(24).reshape((2, 4, 3))], - [TEST_IMAGE, TEST_AFFINE, - dict(as_closest_canonical=False, image_only=False), - np.arange(24).reshape((2, 4, 3))], - [TEST_IMAGE, None, - dict(as_closest_canonical=False, image_only=False), - np.arange(24).reshape((2, 4, 3))], + [TEST_IMAGE, TEST_AFFINE, dict(as_closest_canonical=False, image_only=True), np.arange(24).reshape((2, 4, 3))], + [TEST_IMAGE, TEST_AFFINE, dict(as_closest_canonical=False, image_only=False), np.arange(24).reshape((2, 4, 3))], + [TEST_IMAGE, None, dict(as_closest_canonical=False, image_only=False), np.arange(24).reshape((2, 4, 3))], ] class TestNiftiLoadRead(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_orientation(self, array, affine, reader_param, expected): test_image = make_nifti_image(array, affine) @@ -65,7 +63,7 @@ def test_orientation(self, array, affine, reader_param, expected): # write test cases if header is not None: - write_nifti(data_array, test_image, header['affine'], header.get('original_affine', None)) + write_nifti(data_array, test_image, header["affine"], header.get("original_affine", None)) elif affine is not None: write_nifti(data_array, test_image, affine) else: @@ -84,17 +82,19 @@ def test_consistency(self): np.set_printoptions(suppress=True, precision=3) test_image = make_nifti_image(np.arange(64).reshape(1, 8, 8), np.diag([1.5, 1.5, 1.5, 1])) data, header = LoadNifti(as_closest_canonical=False)(test_image) - data, original_affine, new_affine = Spacing([0.8, 0.8, 0.8])(data[None], header['affine'], interp_order=0) - data, _, new_affine = Orientation('ILP')(data, new_affine) + data, original_affine, new_affine = Spacing([0.8, 0.8, 0.8])(data[None], header["affine"], interp_order=0) + data, _, new_affine = Orientation("ILP")(data, new_affine) if os.path.exists(test_image): os.remove(test_image) - write_nifti(data[0], test_image, new_affine, original_affine, interp_order=0, mode='reflect') + write_nifti(data[0], test_image, new_affine, original_affine, interp_order=0, mode="reflect") saved = nib.load(test_image) saved_data = saved.get_fdata() np.testing.assert_allclose(saved_data, np.arange(64).reshape(1, 8, 8), atol=1e-7) if os.path.exists(test_image): os.remove(test_image) - write_nifti(data[0], test_image, new_affine, original_affine, interp_order=0, mode='reflect', output_shape=(1, 8, 8)) + write_nifti( + data[0], test_image, new_affine, original_affine, interp_order=0, mode="reflect", output_shape=(1, 8, 8) + ) saved = nib.load(test_image) saved_data = saved.get_fdata() np.testing.assert_allclose(saved_data, np.arange(64).reshape(1, 8, 8), atol=1e-7) @@ -103,21 +103,21 @@ def test_consistency(self): def test_write_1d(self): with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(5).reshape(-1) - write_nifti(img, image_name, affine=np.diag([1, 1, 1]), target_affine=np.diag([1.4, 2., 1])) + write_nifti(img, image_name, affine=np.diag([1, 1, 1]), target_affine=np.diag([1.4, 2.0, 1])) out = nib.load(image_name) np.testing.assert_allclose(out.get_fdata(), [0, 1, 3, 0]) np.testing.assert_allclose(out.affine, np.diag([1.4, 1, 1, 1])) with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(5).reshape(-1) write_nifti(img, image_name, affine=[[1]], target_affine=[[1.4]]) out = nib.load(image_name) np.testing.assert_allclose(out.get_fdata(), [0, 1, 3, 0]) np.testing.assert_allclose(out.affine, np.diag([1.4, 1, 1, 1])) with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(5).reshape(-1) write_nifti(img, image_name, affine=np.diag([1.5, 1.5, 1.5]), target_affine=np.diag([1.5, 1.5, 1.5])) out = nib.load(image_name) @@ -126,69 +126,71 @@ def test_write_1d(self): def test_write_2d(self): with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(6).reshape((2, 3)) write_nifti(img, image_name, affine=np.diag([1]), target_affine=np.diag([1.4])) out = nib.load(image_name) np.testing.assert_allclose(out.get_fdata(), [[0, 1, 2], [0, 0, 0]]) np.testing.assert_allclose(out.affine, np.diag([1.4, 1, 1, 1])) with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(5).reshape((1, 5)) - write_nifti(img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2., 1, 3, 5])) + write_nifti(img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2.0, 1, 3, 5])) out = nib.load(image_name) np.testing.assert_allclose(out.get_fdata(), [[0, 2, 4]]) np.testing.assert_allclose(out.affine, np.diag([1.4, 2, 1, 1])) def test_write_3d(self): with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(6).reshape((1, 2, 3)) write_nifti(img, image_name, affine=np.diag([1]), target_affine=np.diag([1.4])) out = nib.load(image_name) np.testing.assert_allclose(out.get_fdata(), [[[0, 1, 2], [3, 4, 5]]]) np.testing.assert_allclose(out.affine, np.diag([1.4, 1, 1, 1])) with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(5).reshape((1, 1, 5)) - write_nifti(img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2., 2, 3, 5])) + write_nifti(img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2.0, 2, 3, 5])) out = nib.load(image_name) np.testing.assert_allclose(out.get_fdata(), [[[0, 2, 4]]]) np.testing.assert_allclose(out.affine, np.diag([1.4, 2, 2, 1])) def test_write_4d(self): with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(6).reshape((1, 1, 3, 2)) write_nifti(img, image_name, affine=np.diag([1.4, 1]), target_affine=np.diag([1, 1.4, 1])) out = nib.load(image_name) np.testing.assert_allclose(out.get_fdata(), [[[[0, 1], [2, 3], [4, 5]]]]) np.testing.assert_allclose(out.affine, np.diag([1, 1.4, 1, 1])) with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(5).reshape((1, 1, 5, 1)) - write_nifti(img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2., 2, 3, 5])) + write_nifti(img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2.0, 2, 3, 5])) out = nib.load(image_name) np.testing.assert_allclose(out.get_fdata(), [[[[0], [2], [4]]]]) np.testing.assert_allclose(out.affine, np.diag([1.4, 2, 2, 1])) def test_write_5d(self): with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(12).reshape((1, 1, 3, 2, 2)) write_nifti(img, image_name, affine=np.diag([1]), target_affine=np.diag([1.4])) out = nib.load(image_name) np.testing.assert_allclose( - out.get_fdata(), np.array([[[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]], [[8., 9.], [10., 11.]]]]])) + out.get_fdata(), + np.array([[[[[0.0, 1.0], [2.0, 3.0]], [[4.0, 5.0], [6.0, 7.0]], [[8.0, 9.0], [10.0, 11.0]]]]]), + ) np.testing.assert_allclose(out.affine, np.diag([1.4, 1, 1, 1])) with tempfile.TemporaryDirectory() as out_dir: - image_name = os.path.join(out_dir, 'test.nii.gz') + image_name = os.path.join(out_dir, "test.nii.gz") img = np.arange(10).reshape((1, 1, 5, 1, 2)) - write_nifti(img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2., 2, 3, 5])) + write_nifti(img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2.0, 2, 3, 5])) out = nib.load(image_name) - np.testing.assert_allclose(out.get_fdata(), np.array([[[[[0., 1.]], [[4., 5.]], [[8., 9.]]]]])) + np.testing.assert_allclose(out.get_fdata(), np.array([[[[[0.0, 1.0]], [[4.0, 5.0]], [[8.0, 9.0]]]]])) np.testing.assert_allclose(out.affine, np.diag([1.4, 2, 2, 1])) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_nifti_saver.py b/tests/test_nifti_saver.py index 13dab73dda..406dbc632e 100644 --- a/tests/test_nifti_saver.py +++ b/tests/test_nifti_saver.py @@ -19,20 +19,19 @@ class TestNiftiSaver(unittest.TestCase): - def test_saved_content(self): - default_dir = os.path.join('.', 'tempdir') + default_dir = os.path.join(".", "tempdir") shutil.rmtree(default_dir, ignore_errors=True) - saver = NiftiSaver(output_dir=default_dir, output_postfix='seg', output_ext='.nii.gz') + saver = NiftiSaver(output_dir=default_dir, output_postfix="seg", output_ext=".nii.gz") - meta_data = {'filename_or_obj': ['testfile' + str(i) for i in range(8)]} + meta_data = {"filename_or_obj": ["testfile" + str(i) for i in range(8)]} saver.save_batch(torch.zeros(8, 1, 2, 2), meta_data) for i in range(8): - filepath = os.path.join('testfile' + str(i), 'testfile' + str(i) + '_seg.nii.gz') + filepath = os.path.join("testfile" + str(i), "testfile" + str(i) + "_seg.nii.gz") self.assertTrue(os.path.exists(os.path.join(default_dir, filepath))) shutil.rmtree(default_dir) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_normalize_intensity.py b/tests/test_normalize_intensity.py index 677e672903..7d256175de 100644 --- a/tests/test_normalize_intensity.py +++ b/tests/test_normalize_intensity.py @@ -16,35 +16,18 @@ from monai.transforms import NormalizeIntensity from tests.utils import NumpyImageTestCase2D -TEST_CASE_1 = [ - { - 'nonzero': True - }, - np.array([0., 3., 0., 4.]), - np.array([0., -1., 0., 1.]) -] +TEST_CASE_1 = [{"nonzero": True}, np.array([0.0, 3.0, 0.0, 4.0]), np.array([0.0, -1.0, 0.0, 1.0])] TEST_CASE_2 = [ - { - 'subtrahend': np.array([3.5, 3.5, 3.5, 3.5]), - 'divisor': np.array([0.5, 0.5, 0.5, 0.5]), - 'nonzero': True - }, - np.array([0., 3., 0., 4.]), - np.array([0., -1., 0., 1.]) + {"subtrahend": np.array([3.5, 3.5, 3.5, 3.5]), "divisor": np.array([0.5, 0.5, 0.5, 0.5]), "nonzero": True}, + np.array([0.0, 3.0, 0.0, 4.0]), + np.array([0.0, -1.0, 0.0, 1.0]), ] -TEST_CASE_3 = [ - { - 'nonzero': True - }, - np.array([0., 0., 0., 0.]), - np.array([0., 0., 0., 0.]) -] +TEST_CASE_3 = [{"nonzero": True}, np.array([0.0, 0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 0.0])] class TestNormalizeIntensity(NumpyImageTestCase2D): - def test_default(self): normalizer = NormalizeIntensity() normalized = normalizer(self.imt) @@ -58,10 +41,10 @@ def test_nonzero(self, input_param, input_data, expected_data): def test_channel_wise(self): normalizer = NormalizeIntensity(nonzero=True, channel_wise=True) - input_data = np.array([[0., 3., 0., 4.], [0., 4., 0., 5.]]) - expected = np.array([[0., -1., 0., 1.], [0., -1., 0., 1.]]) + input_data = np.array([[0.0, 3.0, 0.0, 4.0], [0.0, 4.0, 0.0, 5.0]]) + expected = np.array([[0.0, -1.0, 0.0, 1.0], [0.0, -1.0, 0.0, 1.0]]) np.testing.assert_allclose(expected, normalizer(input_data)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_normalize_intensityd.py b/tests/test_normalize_intensityd.py index aa3fbcca5c..f3f2489425 100644 --- a/tests/test_normalize_intensityd.py +++ b/tests/test_normalize_intensityd.py @@ -17,39 +17,32 @@ from tests.utils import NumpyImageTestCase2D TEST_CASE_1 = [ - { - 'keys': ['img'], - 'nonzero': True - }, - {'img': np.array([0., 3., 0., 4.])}, - np.array([0., -1., 0., 1.]) + {"keys": ["img"], "nonzero": True}, + {"img": np.array([0.0, 3.0, 0.0, 4.0])}, + np.array([0.0, -1.0, 0.0, 1.0]), ] TEST_CASE_2 = [ { - 'keys': ['img'], - 'subtrahend': np.array([3.5, 3.5, 3.5, 3.5]), - 'divisor': np.array([0.5, 0.5, 0.5, 0.5]), - 'nonzero': True + "keys": ["img"], + "subtrahend": np.array([3.5, 3.5, 3.5, 3.5]), + "divisor": np.array([0.5, 0.5, 0.5, 0.5]), + "nonzero": True, }, - {'img': np.array([0., 3., 0., 4.])}, - np.array([0., -1., 0., 1.]) + {"img": np.array([0.0, 3.0, 0.0, 4.0])}, + np.array([0.0, -1.0, 0.0, 1.0]), ] TEST_CASE_3 = [ - { - 'keys': ['img'], - 'nonzero': True - }, - {'img': np.array([0., 0., 0., 0.])}, - np.array([0., 0., 0., 0.]) + {"keys": ["img"], "nonzero": True}, + {"img": np.array([0.0, 0.0, 0.0, 0.0])}, + np.array([0.0, 0.0, 0.0, 0.0]), ] class TestNormalizeIntensityd(NumpyImageTestCase2D): - def test_image_normalize_intensityd(self): - key = 'img' + key = "img" normalizer = NormalizeIntensityd(keys=[key]) normalized = normalizer({key: self.imt}) expected = (self.imt - np.mean(self.imt)) / np.std(self.imt) @@ -58,15 +51,15 @@ def test_image_normalize_intensityd(self): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_nonzero(self, input_param, input_data, expected_data): normalizer = NormalizeIntensityd(**input_param) - np.testing.assert_allclose(expected_data, normalizer(input_data)['img']) + np.testing.assert_allclose(expected_data, normalizer(input_data)["img"]) def test_channel_wise(self): - key = 'img' + key = "img" normalizer = NormalizeIntensityd(keys=key, nonzero=True, channel_wise=True) - input_data = {key: np.array([[0., 3., 0., 4.], [0., 4., 0., 5.]])} - expected = np.array([[0., -1., 0., 1.], [0., -1., 0., 1.]]) + input_data = {key: np.array([[0.0, 3.0, 0.0, 4.0], [0.0, 4.0, 0.0, 5.0]])} + expected = np.array([[0.0, -1.0, 0.0, 1.0], [0.0, -1.0, 0.0, 1.0]]) np.testing.assert_allclose(expected, normalizer(input_data)[key]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_orientation.py b/tests/test_orientation.py index 305e680e6b..4f64d1678b 100644 --- a/tests/test_orientation.py +++ b/tests/test_orientation.py @@ -18,74 +18,107 @@ from monai.transforms import Orientation, create_rotate, create_translate TEST_CASES = [ - [{'axcodes': 'RAS'}, - np.arange(12).reshape((2, 1, 2, 3)), {'affine': np.eye(4)}, - np.arange(12).reshape((2, 1, 2, 3)), 'RAS'], - [{'axcodes': 'ALS'}, - np.arange(12).reshape((2, 1, 2, 3)), {'affine': np.diag([-1, -1, 1, 1])}, - np.array([[[[3, 4, 5]], [[0, 1, 2]]], [[[9, 10, 11]], [[6, 7, 8]]]]), 'ALS'], - [{'axcodes': 'RAS'}, - np.arange(12).reshape((2, 1, 2, 3)), {'affine': np.diag([-1, -1, 1, 1])}, - np.array([[[[3, 4, 5], [0, 1, 2]]], [[[9, 10, 11], [6, 7, 8]]]]), 'RAS'], - [{'axcodes': 'AL'}, - np.arange(6).reshape((2, 1, 3)), {'affine': np.eye(3)}, - np.array([[[0], [1], [2]], [[3], [4], [5]]]), 'AL'], - [{'axcodes': 'L'}, - np.arange(6).reshape((2, 3)), {'affine': np.eye(2)}, - np.array([[2, 1, 0], [5, 4, 3]]), 'L'], - [{'axcodes': 'L'}, - np.arange(6).reshape((2, 3)), {'affine': np.eye(2)}, - np.array([[2, 1, 0], [5, 4, 3]]), 'L'], - [{'axcodes': 'L'}, - np.arange(6).reshape((2, 3)), {'affine': np.diag([-1, 1])}, - np.arange(6).reshape((2, 3)), 'L'], - [{'axcodes': 'LPS'}, - np.arange(12).reshape((2, 1, 2, 3)), { - 'affine': - create_translate(3, (10, 20, 30)) @ - create_rotate(3, (np.pi / 2, np.pi / 2, np.pi / 4)) @ np.diag([-1, 1, 1, 1])}, - np.array([[[[2, 5]], [[1, 4]], [[0, 3]]], [[[8, 11]], [[7, 10]], [[6, 9]]]]), 'LPS'], - [{'as_closest_canonical': True}, - np.arange(12).reshape((2, 1, 2, 3)), { - 'affine': - create_translate(3, (10, 20, 30)) @ - create_rotate(3, (np.pi / 2, np.pi / 2, np.pi / 4)) @ np.diag([-1, 1, 1, 1])}, - np.array([[[[0, 3]], [[1, 4]], [[2, 5]]], [[[6, 9]], [[7, 10]], [[8, 11]]]]), 'RAS'], - [{'as_closest_canonical': True}, - np.arange(6).reshape((1, 2, 3)), - {'affine': create_translate(2, (10, 20)) @ create_rotate(2, (np.pi / 3)) @ np.diag([-1, -0.2, 1])}, - np.array([[[3, 0], [4, 1], [5, 2]]]), 'RA'], - [{'axcodes': 'LP'}, - np.arange(6).reshape((1, 2, 3)), - {'affine': create_translate(2, (10, 20)) @ create_rotate(2, (np.pi / 3)) @ np.diag([-1, -0.2, 1])}, - np.array([[[2, 5], [1, 4], [0, 3]]]), 'LP'], - [{'axcodes': 'LPID', 'labels': tuple(zip('LPIC', 'RASD'))}, - np.zeros((1, 2, 3, 4, 5)), {'affine': np.diag([-1, -0.2, -1, 1, 1])}, - np.zeros((1, 2, 3, 4, 5)), 'LPID'], - [{'as_closest_canonical': True, 'labels': tuple(zip('LPIC', 'RASD'))}, - np.zeros((1, 2, 3, 4, 5)), {'affine': np.diag([-1, -0.2, -1, 1, 1])}, - np.zeros((1, 2, 3, 4, 5)), 'RASD'], + [ + {"axcodes": "RAS"}, + np.arange(12).reshape((2, 1, 2, 3)), + {"affine": np.eye(4)}, + np.arange(12).reshape((2, 1, 2, 3)), + "RAS", + ], + [ + {"axcodes": "ALS"}, + np.arange(12).reshape((2, 1, 2, 3)), + {"affine": np.diag([-1, -1, 1, 1])}, + np.array([[[[3, 4, 5]], [[0, 1, 2]]], [[[9, 10, 11]], [[6, 7, 8]]]]), + "ALS", + ], + [ + {"axcodes": "RAS"}, + np.arange(12).reshape((2, 1, 2, 3)), + {"affine": np.diag([-1, -1, 1, 1])}, + np.array([[[[3, 4, 5], [0, 1, 2]]], [[[9, 10, 11], [6, 7, 8]]]]), + "RAS", + ], + [ + {"axcodes": "AL"}, + np.arange(6).reshape((2, 1, 3)), + {"affine": np.eye(3)}, + np.array([[[0], [1], [2]], [[3], [4], [5]]]), + "AL", + ], + [{"axcodes": "L"}, np.arange(6).reshape((2, 3)), {"affine": np.eye(2)}, np.array([[2, 1, 0], [5, 4, 3]]), "L"], + [{"axcodes": "L"}, np.arange(6).reshape((2, 3)), {"affine": np.eye(2)}, np.array([[2, 1, 0], [5, 4, 3]]), "L"], + [{"axcodes": "L"}, np.arange(6).reshape((2, 3)), {"affine": np.diag([-1, 1])}, np.arange(6).reshape((2, 3)), "L"], + [ + {"axcodes": "LPS"}, + np.arange(12).reshape((2, 1, 2, 3)), + { + "affine": create_translate(3, (10, 20, 30)) + @ create_rotate(3, (np.pi / 2, np.pi / 2, np.pi / 4)) + @ np.diag([-1, 1, 1, 1]) + }, + np.array([[[[2, 5]], [[1, 4]], [[0, 3]]], [[[8, 11]], [[7, 10]], [[6, 9]]]]), + "LPS", + ], + [ + {"as_closest_canonical": True}, + np.arange(12).reshape((2, 1, 2, 3)), + { + "affine": create_translate(3, (10, 20, 30)) + @ create_rotate(3, (np.pi / 2, np.pi / 2, np.pi / 4)) + @ np.diag([-1, 1, 1, 1]) + }, + np.array([[[[0, 3]], [[1, 4]], [[2, 5]]], [[[6, 9]], [[7, 10]], [[8, 11]]]]), + "RAS", + ], + [ + {"as_closest_canonical": True}, + np.arange(6).reshape((1, 2, 3)), + {"affine": create_translate(2, (10, 20)) @ create_rotate(2, (np.pi / 3)) @ np.diag([-1, -0.2, 1])}, + np.array([[[3, 0], [4, 1], [5, 2]]]), + "RA", + ], + [ + {"axcodes": "LP"}, + np.arange(6).reshape((1, 2, 3)), + {"affine": create_translate(2, (10, 20)) @ create_rotate(2, (np.pi / 3)) @ np.diag([-1, -0.2, 1])}, + np.array([[[2, 5], [1, 4], [0, 3]]]), + "LP", + ], + [ + {"axcodes": "LPID", "labels": tuple(zip("LPIC", "RASD"))}, + np.zeros((1, 2, 3, 4, 5)), + {"affine": np.diag([-1, -0.2, -1, 1, 1])}, + np.zeros((1, 2, 3, 4, 5)), + "LPID", + ], + [ + {"as_closest_canonical": True, "labels": tuple(zip("LPIC", "RASD"))}, + np.zeros((1, 2, 3, 4, 5)), + {"affine": np.diag([-1, -0.2, -1, 1, 1])}, + np.zeros((1, 2, 3, 4, 5)), + "RASD", + ], ] ILL_CASES = [ # no axcodes or as_cloest_canonical - [{}, np.arange(6).reshape((2, 3)), 'L'], + [{}, np.arange(6).reshape((2, 3)), "L"], # too short axcodes - [{'axcodes': 'RA'}, np.arange(12).reshape((2, 1, 2, 3)), {'affine': np.eye(4)}], + [{"axcodes": "RA"}, np.arange(12).reshape((2, 1, 2, 3)), {"affine": np.eye(4)}], ] class TestOrientationCase(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_ornt(self, init_param, img, data_param, expected_data, expected_code): ornt = Orientation(**init_param) res = ornt(img, **data_param) np.testing.assert_allclose(res[0], expected_data) - original_affine = data_param['affine'] + original_affine = data_param["affine"] np.testing.assert_allclose(original_affine, res[1]) new_code = nib.orientations.aff2axcodes(res[2], labels=ornt.labels) - self.assertEqual(''.join(new_code), expected_code) + self.assertEqual("".join(new_code), expected_code) @parameterized.expand(ILL_CASES) def test_bad_params(self, init_param, img, data_param): @@ -93,5 +126,5 @@ def test_bad_params(self, init_param, img, data_param): Orientation(**init_param)(img, **data_param) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_orientationd.py b/tests/test_orientationd.py index 4c19dd11ab..8df9e22a6b 100644 --- a/tests/test_orientationd.py +++ b/tests/test_orientationd.py @@ -18,61 +18,66 @@ class TestOrientationdCase(unittest.TestCase): - def test_orntd(self): - data = {'seg': np.ones((2, 1, 2, 3)), 'seg.affine': np.eye(4)} - ornt = Orientationd(keys='seg', axcodes='RAS') + data = {"seg": np.ones((2, 1, 2, 3)), "seg.affine": np.eye(4)} + ornt = Orientationd(keys="seg", axcodes="RAS") res = ornt(data) - np.testing.assert_allclose(res['seg'].shape, (2, 1, 2, 3)) - code = nib.aff2axcodes(res['seg.affine'], ornt.ornt_transform.labels) - self.assertEqual(code, ('R', 'A', 'S')) + np.testing.assert_allclose(res["seg"].shape, (2, 1, 2, 3)) + code = nib.aff2axcodes(res["seg.affine"], ornt.ornt_transform.labels) + self.assertEqual(code, ("R", "A", "S")) def test_orntd_3d(self): data = { - 'seg': np.ones((2, 1, 2, 3)), 'img': np.ones((2, 1, 2, 3)), 'seg.affine': np.eye(4), 'img.affine': np.eye(4) + "seg": np.ones((2, 1, 2, 3)), + "img": np.ones((2, 1, 2, 3)), + "seg.affine": np.eye(4), + "img.affine": np.eye(4), } - ornt = Orientationd(keys=('img', 'seg'), axcodes='PLI') + ornt = Orientationd(keys=("img", "seg"), axcodes="PLI") res = ornt(data) - np.testing.assert_allclose(res['img'].shape, (2, 2, 1, 3)) - np.testing.assert_allclose(res['seg'].shape, (2, 2, 1, 3)) - code = nib.aff2axcodes(res['seg.affine'], ornt.ornt_transform.labels) - self.assertEqual(code, ('P', 'L', 'I')) - code = nib.aff2axcodes(res['img.affine'], ornt.ornt_transform.labels) - self.assertEqual(code, ('P', 'L', 'I')) + np.testing.assert_allclose(res["img"].shape, (2, 2, 1, 3)) + np.testing.assert_allclose(res["seg"].shape, (2, 2, 1, 3)) + code = nib.aff2axcodes(res["seg.affine"], ornt.ornt_transform.labels) + self.assertEqual(code, ("P", "L", "I")) + code = nib.aff2axcodes(res["img.affine"], ornt.ornt_transform.labels) + self.assertEqual(code, ("P", "L", "I")) def test_orntd_2d(self): - data = {'seg': np.ones((2, 1, 3)), 'img': np.ones((2, 1, 3)), 'seg.affine': np.eye(4), 'img.affine': np.eye(4)} - ornt = Orientationd(keys=('img', 'seg'), axcodes='PLI') + data = {"seg": np.ones((2, 1, 3)), "img": np.ones((2, 1, 3)), "seg.affine": np.eye(4), "img.affine": np.eye(4)} + ornt = Orientationd(keys=("img", "seg"), axcodes="PLI") res = ornt(data) - np.testing.assert_allclose(res['img'].shape, (2, 3, 1)) - code = nib.aff2axcodes(res['seg.affine'], ornt.ornt_transform.labels) - self.assertEqual(code, ('P', 'L', 'S')) - code = nib.aff2axcodes(res['img.affine'], ornt.ornt_transform.labels) - self.assertEqual(code, ('P', 'L', 'S')) + np.testing.assert_allclose(res["img"].shape, (2, 3, 1)) + code = nib.aff2axcodes(res["seg.affine"], ornt.ornt_transform.labels) + self.assertEqual(code, ("P", "L", "S")) + code = nib.aff2axcodes(res["img.affine"], ornt.ornt_transform.labels) + self.assertEqual(code, ("P", "L", "S")) def test_orntd_1d(self): - data = {'seg': np.ones((2, 3)), 'img': np.ones((2, 3)), 'seg.affine': np.eye(4), 'img.affine': np.eye(4)} - ornt = Orientationd(keys=('img', 'seg'), axcodes='L') + data = {"seg": np.ones((2, 3)), "img": np.ones((2, 3)), "seg.affine": np.eye(4), "img.affine": np.eye(4)} + ornt = Orientationd(keys=("img", "seg"), axcodes="L") res = ornt(data) - np.testing.assert_allclose(res['img'].shape, (2, 3)) - code = nib.aff2axcodes(res['seg.affine'], ornt.ornt_transform.labels) - self.assertEqual(code, ('L', 'A', 'S')) - code = nib.aff2axcodes(res['img.affine'], ornt.ornt_transform.labels) - self.assertEqual(code, ('L', 'A', 'S')) + np.testing.assert_allclose(res["img"].shape, (2, 3)) + code = nib.aff2axcodes(res["seg.affine"], ornt.ornt_transform.labels) + self.assertEqual(code, ("L", "A", "S")) + code = nib.aff2axcodes(res["img.affine"], ornt.ornt_transform.labels) + self.assertEqual(code, ("L", "A", "S")) def test_orntd_canonical(self): data = { - 'seg': np.ones((2, 1, 2, 3)), 'img': np.ones((2, 1, 2, 3)), 'seg.affine': np.eye(4), 'img.affine': np.eye(4) + "seg": np.ones((2, 1, 2, 3)), + "img": np.ones((2, 1, 2, 3)), + "seg.affine": np.eye(4), + "img.affine": np.eye(4), } - ornt = Orientationd(keys=('img', 'seg'), as_closest_canonical=True) + ornt = Orientationd(keys=("img", "seg"), as_closest_canonical=True) res = ornt(data) - np.testing.assert_allclose(res['img'].shape, (2, 1, 2, 3)) - np.testing.assert_allclose(res['seg'].shape, (2, 1, 2, 3)) - code = nib.aff2axcodes(res['seg.affine'], ornt.ornt_transform.labels) - self.assertEqual(code, ('R', 'A', 'S')) - code = nib.aff2axcodes(res['img.affine'], ornt.ornt_transform.labels) - self.assertEqual(code, ('R', 'A', 'S')) + np.testing.assert_allclose(res["img"].shape, (2, 1, 2, 3)) + np.testing.assert_allclose(res["seg"].shape, (2, 1, 2, 3)) + code = nib.aff2axcodes(res["seg.affine"], ornt.ornt_transform.labels) + self.assertEqual(code, ("R", "A", "S")) + code = nib.aff2axcodes(res["img.affine"], ornt.ornt_transform.labels) + self.assertEqual(code, ("R", "A", "S")) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_persistentdataset.py b/tests/test_persistentdataset.py index 5407bceeab..31fc71165e 100644 --- a/tests/test_persistentdataset.py +++ b/tests/test_persistentdataset.py @@ -19,38 +19,38 @@ from monai.data import PersistentDataset from monai.transforms import Compose, LoadNiftid, SimulateDelayd -TEST_CASE_1 = [ - (128, 128, 128) -] +TEST_CASE_1 = [(128, 128, 128)] class TestDataset(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_shape(self, expected_shape): test_image = nib.Nifti1Image(np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4)) tempdir = tempfile.mkdtemp() - nib.save(test_image, os.path.join(tempdir, 'test_image1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_label1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_extra1.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_image2.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_label2.nii.gz')) - nib.save(test_image, os.path.join(tempdir, 'test_extra2.nii.gz')) + nib.save(test_image, os.path.join(tempdir, "test_image1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_label1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_extra1.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_image2.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_label2.nii.gz")) + nib.save(test_image, os.path.join(tempdir, "test_extra2.nii.gz")) test_data = [ { - 'image': os.path.join(tempdir, 'test_image1.nii.gz'), - 'label': os.path.join(tempdir, 'test_label1.nii.gz'), - 'extra': os.path.join(tempdir, 'test_extra1.nii.gz') + "image": os.path.join(tempdir, "test_image1.nii.gz"), + "label": os.path.join(tempdir, "test_label1.nii.gz"), + "extra": os.path.join(tempdir, "test_extra1.nii.gz"), }, { - 'image': os.path.join(tempdir, 'test_image2.nii.gz'), - 'label': os.path.join(tempdir, 'test_label2.nii.gz'), - 'extra': os.path.join(tempdir, 'test_extra2.nii.gz') - } + "image": os.path.join(tempdir, "test_image2.nii.gz"), + "label": os.path.join(tempdir, "test_label2.nii.gz"), + "extra": os.path.join(tempdir, "test_extra2.nii.gz"), + }, ] - test_transform = Compose([LoadNiftid(keys=['image', 'label', 'extra']), - SimulateDelayd(keys=['image', 'label', 'extra'], - delay_time=[1e-7, 1e-6, 1e-5])]) + test_transform = Compose( + [ + LoadNiftid(keys=["image", "label", "extra"]), + SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), + ] + ) dataset_precached = PersistentDataset(data=test_data, transform=test_transform, cache_dir=tempdir) data1_precached = dataset_precached[0] @@ -61,20 +61,20 @@ def test_shape(self, expected_shape): data2_postcached = dataset_postcached[1] shutil.rmtree(tempdir) - self.assertTupleEqual(data1_precached['image'].shape, expected_shape) - self.assertTupleEqual(data1_precached['label'].shape, expected_shape) - self.assertTupleEqual(data1_precached['extra'].shape, expected_shape) - self.assertTupleEqual(data2_precached['image'].shape, expected_shape) - self.assertTupleEqual(data2_precached['label'].shape, expected_shape) - self.assertTupleEqual(data2_precached['extra'].shape, expected_shape) + self.assertTupleEqual(data1_precached["image"].shape, expected_shape) + self.assertTupleEqual(data1_precached["label"].shape, expected_shape) + self.assertTupleEqual(data1_precached["extra"].shape, expected_shape) + self.assertTupleEqual(data2_precached["image"].shape, expected_shape) + self.assertTupleEqual(data2_precached["label"].shape, expected_shape) + self.assertTupleEqual(data2_precached["extra"].shape, expected_shape) - self.assertTupleEqual(data1_postcached['image'].shape, expected_shape) - self.assertTupleEqual(data1_postcached['label'].shape, expected_shape) - self.assertTupleEqual(data1_postcached['extra'].shape, expected_shape) - self.assertTupleEqual(data2_postcached['image'].shape, expected_shape) - self.assertTupleEqual(data2_postcached['label'].shape, expected_shape) - self.assertTupleEqual(data2_postcached['extra'].shape, expected_shape) + self.assertTupleEqual(data1_postcached["image"].shape, expected_shape) + self.assertTupleEqual(data1_postcached["label"].shape, expected_shape) + self.assertTupleEqual(data1_postcached["extra"].shape, expected_shape) + self.assertTupleEqual(data2_postcached["image"].shape, expected_shape) + self.assertTupleEqual(data2_postcached["label"].shape, expected_shape) + self.assertTupleEqual(data2_postcached["extra"].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_plot_2d_or_3d_image.py b/tests/test_plot_2d_or_3d_image.py index a9cf850f0e..efb68fe3a0 100644 --- a/tests/test_plot_2d_or_3d_image.py +++ b/tests/test_plot_2d_or_3d_image.py @@ -18,32 +18,21 @@ from parameterized import parameterized from monai.visualize import plot_2d_or_3d_image -TEST_CASE_1 = [ - (1, 1, 10, 10) -] +TEST_CASE_1 = [(1, 1, 10, 10)] -TEST_CASE_2 = [ - (1, 3, 10, 10) -] +TEST_CASE_2 = [(1, 3, 10, 10)] -TEST_CASE_3 = [ - (1, 4, 10, 10) -] +TEST_CASE_3 = [(1, 4, 10, 10)] -TEST_CASE_4 = [ - (1, 1, 10, 10, 10) -] +TEST_CASE_4 = [(1, 1, 10, 10, 10)] -TEST_CASE_5 = [ - (1, 3, 10, 10, 10) -] +TEST_CASE_5 = [(1, 3, 10, 10, 10)] class TestPlot2dOr3dImage(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5]) def test_tb_image_shape(self, shape): - default_dir = os.path.join('.', 'runs') + default_dir = os.path.join(".", "runs") shutil.rmtree(default_dir, ignore_errors=True) plot_2d_or_3d_image(torch.zeros(shape), 0, SummaryWriter()) @@ -53,5 +42,5 @@ def test_tb_image_shape(self, shape): shutil.rmtree(default_dir, ignore_errors=True) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_adjust_contrast.py b/tests/test_rand_adjust_contrast.py index 224cd157cb..ce3c965fa0 100644 --- a/tests/test_rand_adjust_contrast.py +++ b/tests/test_rand_adjust_contrast.py @@ -15,17 +15,12 @@ from monai.transforms import RandAdjustContrast from tests.utils import NumpyImageTestCase2D -TEST_CASE_1 = [ - (0.5, 4.5) -] +TEST_CASE_1 = [(0.5, 4.5)] -TEST_CASE_2 = [ - 1.5 -] +TEST_CASE_2 = [1.5] class TestRandAdjustContrast(NumpyImageTestCase2D): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_correct_results(self, gamma): adjuster = RandAdjustContrast(prob=1.0, gamma=gamma) @@ -33,10 +28,11 @@ def test_correct_results(self, gamma): epsilon = 1e-7 img_min = self.imt.min() img_range = self.imt.max() - img_min - expected = np.power(((self.imt - img_min) / float(img_range + epsilon)), adjuster.gamma_value) * \ - img_range + img_min + expected = ( + np.power(((self.imt - img_min) / float(img_range + epsilon)), adjuster.gamma_value) * img_range + img_min + ) np.testing.assert_allclose(expected, result, rtol=1e-05) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_adjust_contrastd.py b/tests/test_rand_adjust_contrastd.py index bdbeb015bb..63b00fbf29 100644 --- a/tests/test_rand_adjust_contrastd.py +++ b/tests/test_rand_adjust_contrastd.py @@ -15,28 +15,24 @@ from monai.transforms import RandAdjustContrastd from tests.utils import NumpyImageTestCase2D -TEST_CASE_1 = [ - (0.5, 4.5) -] +TEST_CASE_1 = [(0.5, 4.5)] -TEST_CASE_2 = [ - 1.5 -] +TEST_CASE_2 = [1.5] class TestRandAdjustContrastd(NumpyImageTestCase2D): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_correct_results(self, gamma): - adjuster = RandAdjustContrastd('img', prob=1.0, gamma=gamma) - result = adjuster({'img': self.imt}) + adjuster = RandAdjustContrastd("img", prob=1.0, gamma=gamma) + result = adjuster({"img": self.imt}) epsilon = 1e-7 img_min = self.imt.min() img_range = self.imt.max() - img_min - expected = np.power(((self.imt - img_min) / float(img_range + epsilon)), adjuster.gamma_value) * \ - img_range + img_min - np.testing.assert_allclose(expected, result['img'], rtol=1e-05) + expected = ( + np.power(((self.imt - img_min) / float(img_range + epsilon)), adjuster.gamma_value) * img_range + img_min + ) + np.testing.assert_allclose(expected, result["img"], rtol=1e-05) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_affine.py b/tests/test_rand_affine.py index cec0ac2895..dfb96397c9 100644 --- a/tests/test_rand_affine.py +++ b/tests/test_rand_affine.py @@ -19,38 +19,45 @@ TEST_CASES = [ [ - dict(as_tensor_output=False, device=None), {'img': torch.ones((3, 3, 3)), 'spatial_size': (2, 2)}, - np.ones((3, 2, 2)) + dict(as_tensor_output=False, device=None), + {"img": torch.ones((3, 3, 3)), "spatial_size": (2, 2)}, + np.ones((3, 2, 2)), ], [ - dict(as_tensor_output=True, device=None), {'img': torch.ones((1, 3, 3, 3)), 'spatial_size': (2, 2, 2)}, - torch.ones((1, 2, 2, 2)) + dict(as_tensor_output=True, device=None), + {"img": torch.ones((1, 3, 3, 3)), "spatial_size": (2, 2, 2)}, + torch.ones((1, 2, 2, 2)), ], [ - dict(prob=0.9, - rotate_range=(np.pi / 2,), - shear_range=[1, 2], - translate_range=[2, 1], - as_tensor_output=True, - spatial_size=(2, 2, 2), - device=None), {'img': torch.ones((1, 3, 3, 3)), 'mode': 'bilinear'}, - torch.tensor([[[[0.0000, 0.6577], [0.9911, 1.0000]], [[0.7781, 1.0000], [1.0000, 0.4000]]]]) + dict( + prob=0.9, + rotate_range=(np.pi / 2,), + shear_range=[1, 2], + translate_range=[2, 1], + as_tensor_output=True, + spatial_size=(2, 2, 2), + device=None, + ), + {"img": torch.ones((1, 3, 3, 3)), "mode": "bilinear"}, + torch.tensor([[[[0.0000, 0.6577], [0.9911, 1.0000]], [[0.7781, 1.0000], [1.0000, 0.4000]]]]), ], [ - dict(prob=0.9, - rotate_range=(np.pi / 2,), - shear_range=[1, 2], - translate_range=[2, 1], - scale_range=[.1, .2], - as_tensor_output=True, - device=None), {'img': torch.arange(64).reshape((1, 8, 8)), 'spatial_size': (3, 3)}, - torch.tensor([[[16.9127, 13.3079, 9.7031], [26.8129, 23.2081, 19.6033], [36.7131, 33.1083, 29.5035]]]) + dict( + prob=0.9, + rotate_range=(np.pi / 2,), + shear_range=[1, 2], + translate_range=[2, 1], + scale_range=[0.1, 0.2], + as_tensor_output=True, + device=None, + ), + {"img": torch.arange(64).reshape((1, 8, 8)), "spatial_size": (3, 3)}, + torch.tensor([[[16.9127, 13.3079, 9.7031], [26.8129, 23.2081, 19.6033], [36.7131, 33.1083, 29.5035]]]), ], ] class TestRandAffine(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_rand_affine(self, input_param, input_data, expected_val): g = RandAffine(**input_param) @@ -63,5 +70,5 @@ def test_rand_affine(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_affine_grid.py b/tests/test_rand_affine_grid.py index 99c4581813..601d2a0ce6 100644 --- a/tests/test_rand_affine_grid.py +++ b/tests/test_rand_affine_grid.py @@ -18,68 +18,170 @@ from monai.transforms import RandAffineGrid TEST_CASES = [ - [{'as_tensor_output': False, 'device': None}, {'grid': torch.ones((3, 3, 3))}, - np.ones((3, 3, 3))], - [{'rotate_range': (1, 2), 'translate_range': (3, 3, 3)}, {'grid': torch.arange(0, 27).reshape((3, 3, 3))}, - torch.tensor( - np.array([[[-32.81998, -33.910976, -35.001972], [-36.092968, -37.183964, -38.27496], - [-39.36596, -40.456955, -41.54795]], - [[2.1380205, 3.1015975, 4.0651755], [5.028752, 5.9923296, 6.955907], [7.919484, 8.883063, 9.84664]], - [[18., 19., 20.], [21., 22., 23.], [24., 25., 26.]]]))], - [{'translate_range': (3, 3, 3), 'as_tensor_output': False, 'device': torch.device('cpu:0')}, - {'spatial_size': (3, 3, 3)}, - np.array([[[[0.17881513, 0.17881513, 0.17881513], [0.17881513, 0.17881513, 0.17881513], - [0.17881513, 0.17881513, 0.17881513]], - [[1.1788151, 1.1788151, 1.1788151], [1.1788151, 1.1788151, 1.1788151], - [1.1788151, 1.1788151, 1.1788151]], - [[2.1788151, 2.1788151, 2.1788151], [2.1788151, 2.1788151, 2.1788151], - [2.1788151, 2.1788151, 2.1788151]]], - [[[-2.283164, -2.283164, -2.283164], [-1.283164, -1.283164, -1.283164], - [-0.28316402, -0.28316402, -0.28316402]], - [[-2.283164, -2.283164, -2.283164], [-1.283164, -1.283164, -1.283164], - [-0.28316402, -0.28316402, -0.28316402]], - [[-2.283164, -2.283164, -2.283164], [-1.283164, -1.283164, -1.283164], - [-0.28316402, -0.28316402, -0.28316402]]], - [[[-2.6388912, -1.6388912, -0.6388912], [-2.6388912, -1.6388912, -0.6388912], - [-2.6388912, -1.6388912, -0.6388912]], - [[-2.6388912, -1.6388912, -0.6388912], [-2.6388912, -1.6388912, -0.6388912], - [-2.6388912, -1.6388912, -0.6388912]], - [[-2.6388912, -1.6388912, -0.6388912], [-2.6388912, -1.6388912, -0.6388912], - [-2.6388912, -1.6388912, -0.6388912]]], - [[[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], - [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]]])], - [{'rotate_range': (1., 1., 1.), 'shear_range': (0.1,), 'scale_range': (1.2,)}, - {'grid': torch.arange(0, 108).reshape((4, 3, 3, 3))}, - torch.tensor( - np.array([[[[-9.4201e+00, -8.1672e+00, -6.9143e+00], [-5.6614e+00, -4.4085e+00, -3.1556e+00], - [-1.9027e+00, -6.4980e-01, 6.0310e-01]], - [[1.8560e+00, 3.1089e+00, 4.3618e+00], [5.6147e+00, 6.8676e+00, 8.1205e+00], - [9.3734e+00, 1.0626e+01, 1.1879e+01]], - [[1.3132e+01, 1.4385e+01, 1.5638e+01], [1.6891e+01, 1.8144e+01, 1.9397e+01], - [2.0650e+01, 2.1902e+01, 2.3155e+01]]], - [[[9.9383e-02, -4.8845e-01, -1.0763e+00], [-1.6641e+00, -2.2519e+00, -2.8398e+00], - [-3.4276e+00, -4.0154e+00, -4.6032e+00]], - [[-5.1911e+00, -5.7789e+00, -6.3667e+00], [-6.9546e+00, -7.5424e+00, -8.1302e+00], - [-8.7180e+00, -9.3059e+00, -9.8937e+00]], - [[-1.0482e+01, -1.1069e+01, -1.1657e+01], [-1.2245e+01, -1.2833e+01, -1.3421e+01], - [-1.4009e+01, -1.4596e+01, -1.5184e+01]]], - [[[5.9635e+01, 6.1199e+01, 6.2764e+01], [6.4328e+01, 6.5892e+01, 6.7456e+01], - [6.9021e+01, 7.0585e+01, 7.2149e+01]], - [[7.3714e+01, 7.5278e+01, 7.6842e+01], [7.8407e+01, 7.9971e+01, 8.1535e+01], - [8.3099e+01, 8.4664e+01, 8.6228e+01]], - [[8.7792e+01, 8.9357e+01, 9.0921e+01], [9.2485e+01, 9.4049e+01, 9.5614e+01], - [9.7178e+01, 9.8742e+01, 1.0031e+02]]], - [[[8.1000e+01, 8.2000e+01, 8.3000e+01], [8.4000e+01, 8.5000e+01, 8.6000e+01], - [8.7000e+01, 8.8000e+01, 8.9000e+01]], - [[9.0000e+01, 9.1000e+01, 9.2000e+01], [9.3000e+01, 9.4000e+01, 9.5000e+01], - [9.6000e+01, 9.7000e+01, 9.8000e+01]], - [[9.9000e+01, 1.0000e+02, 1.0100e+02], [1.0200e+02, 1.0300e+02, 1.0400e+02], - [1.0500e+02, 1.0600e+02, 1.0700e+02]]]]))], + [{"as_tensor_output": False, "device": None}, {"grid": torch.ones((3, 3, 3))}, np.ones((3, 3, 3))], + [ + {"rotate_range": (1, 2), "translate_range": (3, 3, 3)}, + {"grid": torch.arange(0, 27).reshape((3, 3, 3))}, + torch.tensor( + np.array( + [ + [ + [-32.81998, -33.910976, -35.001972], + [-36.092968, -37.183964, -38.27496], + [-39.36596, -40.456955, -41.54795], + ], + [[2.1380205, 3.1015975, 4.0651755], [5.028752, 5.9923296, 6.955907], [7.919484, 8.883063, 9.84664]], + [[18.0, 19.0, 20.0], [21.0, 22.0, 23.0], [24.0, 25.0, 26.0]], + ] + ) + ), + ], + [ + {"translate_range": (3, 3, 3), "as_tensor_output": False, "device": torch.device("cpu:0")}, + {"spatial_size": (3, 3, 3)}, + np.array( + [ + [ + [ + [0.17881513, 0.17881513, 0.17881513], + [0.17881513, 0.17881513, 0.17881513], + [0.17881513, 0.17881513, 0.17881513], + ], + [ + [1.1788151, 1.1788151, 1.1788151], + [1.1788151, 1.1788151, 1.1788151], + [1.1788151, 1.1788151, 1.1788151], + ], + [ + [2.1788151, 2.1788151, 2.1788151], + [2.1788151, 2.1788151, 2.1788151], + [2.1788151, 2.1788151, 2.1788151], + ], + ], + [ + [ + [-2.283164, -2.283164, -2.283164], + [-1.283164, -1.283164, -1.283164], + [-0.28316402, -0.28316402, -0.28316402], + ], + [ + [-2.283164, -2.283164, -2.283164], + [-1.283164, -1.283164, -1.283164], + [-0.28316402, -0.28316402, -0.28316402], + ], + [ + [-2.283164, -2.283164, -2.283164], + [-1.283164, -1.283164, -1.283164], + [-0.28316402, -0.28316402, -0.28316402], + ], + ], + [ + [ + [-2.6388912, -1.6388912, -0.6388912], + [-2.6388912, -1.6388912, -0.6388912], + [-2.6388912, -1.6388912, -0.6388912], + ], + [ + [-2.6388912, -1.6388912, -0.6388912], + [-2.6388912, -1.6388912, -0.6388912], + [-2.6388912, -1.6388912, -0.6388912], + ], + [ + [-2.6388912, -1.6388912, -0.6388912], + [-2.6388912, -1.6388912, -0.6388912], + [-2.6388912, -1.6388912, -0.6388912], + ], + ], + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ], + ] + ), + ], + [ + {"rotate_range": (1.0, 1.0, 1.0), "shear_range": (0.1,), "scale_range": (1.2,)}, + {"grid": torch.arange(0, 108).reshape((4, 3, 3, 3))}, + torch.tensor( + np.array( + [ + [ + [ + [-9.4201e00, -8.1672e00, -6.9143e00], + [-5.6614e00, -4.4085e00, -3.1556e00], + [-1.9027e00, -6.4980e-01, 6.0310e-01], + ], + [ + [1.8560e00, 3.1089e00, 4.3618e00], + [5.6147e00, 6.8676e00, 8.1205e00], + [9.3734e00, 1.0626e01, 1.1879e01], + ], + [ + [1.3132e01, 1.4385e01, 1.5638e01], + [1.6891e01, 1.8144e01, 1.9397e01], + [2.0650e01, 2.1902e01, 2.3155e01], + ], + ], + [ + [ + [9.9383e-02, -4.8845e-01, -1.0763e00], + [-1.6641e00, -2.2519e00, -2.8398e00], + [-3.4276e00, -4.0154e00, -4.6032e00], + ], + [ + [-5.1911e00, -5.7789e00, -6.3667e00], + [-6.9546e00, -7.5424e00, -8.1302e00], + [-8.7180e00, -9.3059e00, -9.8937e00], + ], + [ + [-1.0482e01, -1.1069e01, -1.1657e01], + [-1.2245e01, -1.2833e01, -1.3421e01], + [-1.4009e01, -1.4596e01, -1.5184e01], + ], + ], + [ + [ + [5.9635e01, 6.1199e01, 6.2764e01], + [6.4328e01, 6.5892e01, 6.7456e01], + [6.9021e01, 7.0585e01, 7.2149e01], + ], + [ + [7.3714e01, 7.5278e01, 7.6842e01], + [7.8407e01, 7.9971e01, 8.1535e01], + [8.3099e01, 8.4664e01, 8.6228e01], + ], + [ + [8.7792e01, 8.9357e01, 9.0921e01], + [9.2485e01, 9.4049e01, 9.5614e01], + [9.7178e01, 9.8742e01, 1.0031e02], + ], + ], + [ + [ + [8.1000e01, 8.2000e01, 8.3000e01], + [8.4000e01, 8.5000e01, 8.6000e01], + [8.7000e01, 8.8000e01, 8.9000e01], + ], + [ + [9.0000e01, 9.1000e01, 9.2000e01], + [9.3000e01, 9.4000e01, 9.5000e01], + [9.6000e01, 9.7000e01, 9.8000e01], + ], + [ + [9.9000e01, 1.0000e02, 1.0100e02], + [1.0200e02, 1.0300e02, 1.0400e02], + [1.0500e02, 1.0600e02, 1.0700e02], + ], + ], + ] + ) + ), + ], ] class TestRandAffineGrid(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_rand_affine_grid(self, input_param, input_data, expected_val): g = RandAffineGrid(**input_param) @@ -92,5 +194,5 @@ def test_rand_affine_grid(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_affined.py b/tests/test_rand_affined.py index 5d6214fb05..d9cd03767b 100644 --- a/tests/test_rand_affined.py +++ b/tests/test_rand_affined.py @@ -19,59 +19,68 @@ TEST_CASES = [ [ - dict(as_tensor_output=False, device=None, spatial_size=(2, 2), keys=('img', 'seg')), - {'img': torch.ones((3, 3, 3)), 'seg': torch.ones((3, 3, 3))}, - np.ones((3, 2, 2)) + dict(as_tensor_output=False, device=None, spatial_size=(2, 2), keys=("img", "seg")), + {"img": torch.ones((3, 3, 3)), "seg": torch.ones((3, 3, 3))}, + np.ones((3, 2, 2)), ], [ - dict(as_tensor_output=True, device=None, spatial_size=(2, 2, 2), keys=('img', 'seg')), - {'img': torch.ones((1, 3, 3, 3)), 'seg': torch.ones((1, 3, 3, 3))}, - torch.ones((1, 2, 2, 2)) + dict(as_tensor_output=True, device=None, spatial_size=(2, 2, 2), keys=("img", "seg")), + {"img": torch.ones((1, 3, 3, 3)), "seg": torch.ones((1, 3, 3, 3))}, + torch.ones((1, 2, 2, 2)), ], [ - dict(prob=0.9, - rotate_range=(np.pi / 2,), - shear_range=[1, 2], - translate_range=[2, 1], - as_tensor_output=True, - spatial_size=(2, 2, 2), - device=None, - keys=('img', 'seg'), - mode='bilinear'), {'img': torch.ones((1, 3, 3, 3)), 'seg': torch.ones((1, 3, 3, 3))}, - torch.tensor([[[[0.0000, 0.6577], [0.9911, 1.0000]], [[0.7781, 1.0000], [1.0000, 0.4000]]]]) + dict( + prob=0.9, + rotate_range=(np.pi / 2,), + shear_range=[1, 2], + translate_range=[2, 1], + as_tensor_output=True, + spatial_size=(2, 2, 2), + device=None, + keys=("img", "seg"), + mode="bilinear", + ), + {"img": torch.ones((1, 3, 3, 3)), "seg": torch.ones((1, 3, 3, 3))}, + torch.tensor([[[[0.0000, 0.6577], [0.9911, 1.0000]], [[0.7781, 1.0000], [1.0000, 0.4000]]]]), ], [ - dict(prob=0.9, - rotate_range=(np.pi / 2,), - shear_range=[1, 2], - translate_range=[2, 1], - scale_range=[.1, .2], - as_tensor_output=True, - spatial_size=(3, 3), - keys=('img', 'seg'), - device=None), {'img': torch.arange(64).reshape((1, 8, 8)), 'seg': torch.arange(64).reshape((1, 8, 8))}, - torch.tensor([[[16.9127, 13.3079, 9.7031], [26.8129, 23.2081, 19.6033], [36.7131, 33.1083, 29.5035]]]) + dict( + prob=0.9, + rotate_range=(np.pi / 2,), + shear_range=[1, 2], + translate_range=[2, 1], + scale_range=[0.1, 0.2], + as_tensor_output=True, + spatial_size=(3, 3), + keys=("img", "seg"), + device=None, + ), + {"img": torch.arange(64).reshape((1, 8, 8)), "seg": torch.arange(64).reshape((1, 8, 8))}, + torch.tensor([[[16.9127, 13.3079, 9.7031], [26.8129, 23.2081, 19.6033], [36.7131, 33.1083, 29.5035]]]), ], [ - dict(prob=0.9, - mode=('bilinear', 'nearest'), - rotate_range=(np.pi / 2,), - shear_range=[1, 2], - translate_range=[2, 1], - scale_range=[.1, .2], - as_tensor_output=False, - spatial_size=(3, 3), - keys=('img', 'seg'), - device=torch.device('cpu:0')), - {'img': torch.arange(64).reshape((1, 8, 8)), 'seg': torch.arange(64).reshape((1, 8, 8))}, - {'img': np.array([[[16.9127, 13.3079, 9.7031], [26.8129, 23.2081, 19.6033], [36.7131, 33.1083, 29.5035]]]), - 'seg': np.array([[[19., 12., 12.], [27., 20., 21.], [35., 36., 29.]]])} + dict( + prob=0.9, + mode=("bilinear", "nearest"), + rotate_range=(np.pi / 2,), + shear_range=[1, 2], + translate_range=[2, 1], + scale_range=[0.1, 0.2], + as_tensor_output=False, + spatial_size=(3, 3), + keys=("img", "seg"), + device=torch.device("cpu:0"), + ), + {"img": torch.arange(64).reshape((1, 8, 8)), "seg": torch.arange(64).reshape((1, 8, 8))}, + { + "img": np.array([[[16.9127, 13.3079, 9.7031], [26.8129, 23.2081, 19.6033], [36.7131, 33.1083, 29.5035]]]), + "seg": np.array([[[19.0, 12.0, 12.0], [27.0, 20.0, 21.0], [35.0, 36.0, 29.0]]]), + }, ], ] class TestRandAffined(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_rand_affined(self, input_param, input_data, expected_val): g = RandAffined(**input_param).set_random_state(123) @@ -86,5 +95,5 @@ def test_rand_affined(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_crop_by_pos_neg_labeld.py b/tests/test_rand_crop_by_pos_neg_labeld.py index c96d027adf..909c110abf 100644 --- a/tests/test_rand_crop_by_pos_neg_labeld.py +++ b/tests/test_rand_crop_by_pos_neg_labeld.py @@ -16,21 +16,21 @@ TEST_CASE_1 = [ { - 'keys': ['image', 'extral', 'label'], - 'label_key': 'label', - 'size': [2, 2, 2], - 'pos': 1, - 'neg': 1, - 'num_samples': 2, - 'image_key': None, - 'image_threshold': 0 + "keys": ["image", "extral", "label"], + "label_key": "label", + "size": [2, 2, 2], + "pos": 1, + "neg": 1, + "num_samples": 2, + "image_key": None, + "image_threshold": 0, }, { - 'image': np.random.randint(0, 2, size=[3, 3, 3, 3]), - 'extral': np.random.randint(0, 2, size=[3, 3, 3, 3]), - 'label': np.random.randint(0, 2, size=[3, 3, 3, 3]), - 'affine': np.eye(3), - 'shape': 'CHWD' + "image": np.random.randint(0, 2, size=[3, 3, 3, 3]), + "extral": np.random.randint(0, 2, size=[3, 3, 3, 3]), + "label": np.random.randint(0, 2, size=[3, 3, 3, 3]), + "affine": np.eye(3), + "shape": "CHWD", }, list, (3, 2, 2, 2), @@ -38,21 +38,21 @@ TEST_CASE_2 = [ { - 'keys': ['image', 'extral', 'label'], - 'label_key': 'label', - 'size': [2, 2, 2], - 'pos': 1, - 'neg': 1, - 'num_samples': 2, - 'image_key': None, - 'image_threshold': 0 + "keys": ["image", "extral", "label"], + "label_key": "label", + "size": [2, 2, 2], + "pos": 1, + "neg": 1, + "num_samples": 2, + "image_key": None, + "image_threshold": 0, }, { - 'image': np.zeros([3, 3, 3, 3]) - 1, - 'extral': np.zeros([3, 3, 3, 3]), - 'label': np.ones([3, 3, 3, 3]), - 'affine': np.eye(3), - 'shape': 'CHWD' + "image": np.zeros([3, 3, 3, 3]) - 1, + "extral": np.zeros([3, 3, 3, 3]), + "label": np.ones([3, 3, 3, 3]), + "affine": np.eye(3), + "shape": "CHWD", }, list, (3, 2, 2, 2), @@ -60,15 +60,14 @@ class TestRandCropByPosNegLabeld(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_type_shape(self, input_param, input_data, expected_type, expected_shape): result = RandCropByPosNegLabeld(**input_param)(input_data) self.assertIsInstance(result, expected_type) - self.assertTupleEqual(result[0]['image'].shape, expected_shape) - self.assertTupleEqual(result[0]['extral'].shape, expected_shape) - self.assertTupleEqual(result[0]['label'].shape, expected_shape) + self.assertTupleEqual(result[0]["image"].shape, expected_shape) + self.assertTupleEqual(result[0]["extral"].shape, expected_shape) + self.assertTupleEqual(result[0]["label"].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_deform_grid.py b/tests/test_rand_deform_grid.py index 1101bfa31e..73815ac256 100644 --- a/tests/test_rand_deform_grid.py +++ b/tests/test_rand_deform_grid.py @@ -19,65 +19,111 @@ TEST_CASES = [ [ - dict(spacing=(1, 2), magnitude_range=(1., 2.), as_tensor_output=False, device=None), - {'spatial_size': (3, 3)}, - np.array([[[-3.45774551, -0.6608006, -1.62002671, -4.02259806, -2.77692349], - [1.21748926, -4.25845712, -1.57592837, 0.69985342, -2.16382767], - [-0.91158377, -0.12717178, 2.00258405, -0.85789449, -0.59616292], - [0.41676882, 3.96204313, 3.93633727, 2.34820726, 1.51855713], - [2.99011186, 4.00170105, 0.74339613, 3.57886072, 0.31633439]], - [[-4.85634965, -0.78197195, -1.91838077, 1.81192079, 2.84286669], - [-4.34323645, -5.75784424, -2.37875058, 1.06023016, 5.24536301], - [-4.23315172, -1.99617861, 0.92412057, 0.81899041, 4.38084451], - [-5.08141703, -4.31985211, -0.52488611, 2.77048576, 4.45464513], - [-4.01588556, 1.21238156, 0.55444352, 3.31421131, 7.00529793]], - [[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], - [1., 1., 1., 1., 1.]]]) + dict(spacing=(1, 2), magnitude_range=(1.0, 2.0), as_tensor_output=False, device=None), + {"spatial_size": (3, 3)}, + np.array( + [ + [ + [-3.45774551, -0.6608006, -1.62002671, -4.02259806, -2.77692349], + [1.21748926, -4.25845712, -1.57592837, 0.69985342, -2.16382767], + [-0.91158377, -0.12717178, 2.00258405, -0.85789449, -0.59616292], + [0.41676882, 3.96204313, 3.93633727, 2.34820726, 1.51855713], + [2.99011186, 4.00170105, 0.74339613, 3.57886072, 0.31633439], + ], + [ + [-4.85634965, -0.78197195, -1.91838077, 1.81192079, 2.84286669], + [-4.34323645, -5.75784424, -2.37875058, 1.06023016, 5.24536301], + [-4.23315172, -1.99617861, 0.92412057, 0.81899041, 4.38084451], + [-5.08141703, -4.31985211, -0.52488611, 2.77048576, 4.45464513], + [-4.01588556, 1.21238156, 0.55444352, 3.31421131, 7.00529793], + ], + [ + [1.0, 1.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 1.0, 1.0], + ], + ] + ), ], [ - dict(spacing=(1, 2, 2), magnitude_range=(1., 3.), as_tensor_output=False, device=None), - {'spatial_size': (1, 2, 2)}, - np.array([[[[-2.81748977, 0.66968869, -0.52625642, -3.52173734], - [-1.96865364, 1.76472402, -5.06258324, -1.71805669], - [1.11934537, -2.45103851, -2.13654555, -1.15855539], - [1.49678424, -2.06960677, -1.74328475, -1.7271617]], - [[3.69301983, 3.66097025, 1.68091953, 0.6465273], [1.23445289, 2.49568333, -1.56671014, 1.96849393], - [-2.09916271, -1.06768069, 1.51861453, -2.39180117], - [-0.23449363, -1.44269211, -0.42794076, -4.68520972]], - [[-1.96578162, -0.17168741, 2.55269525, 0.70931081], - [1.00476444, 2.15217619, -0.47246061, 1.4748298], [-0.34829048, -1.89234811, 0.34558185, 1.9606272], - [1.56684302, 0.98019418, 5.00513708, 1.69126978]]], - [[[-1.36146598, 0.7469491, -5.16647064, -4.73906938], - [1.91920577, -2.33606298, -0.95030633, 0.7901769], [2.49116076, 3.93791246, 3.50390686, 2.79030531], - [1.70638302, 4.33070564, 3.52613304, 0.77965554]], - [[-0.62725323, -1.64857887, -2.92384357, -3.39022706], - [-3.00611521, -0.66597021, -0.21577072, -2.39146379], - [2.94568388, -0.83686357, -2.55435186, 2.74064119], [2.3247117, 2.78900974, 1.59788581, - 0.31140512]], - [[-0.89856598, -4.15325814, -0.21934502, -1.64845891], - [-1.52694693, -2.81794479, -2.22623861, -3.0299247], - [4.49410486, 1.27529645, 2.92559679, -1.12171559], [3.30307684, 4.97189727, 2.43914751, - 4.7262225]]], - [[[-4.81571068, -3.28263239, 1.635167, 2.36520831], [-1.92511521, -4.311247, 2.19242556, 7.34990574], - [-3.04122716, -0.94284154, 1.30058968, -0.11719455], - [-2.28657395, -3.68766906, 0.28400757, 5.08072864]], - [[-4.2308508, -0.16084264, 2.69545963, 3.4666492], - [-5.29514976, -1.55660775, 4.28031473, -0.39019547], - [-3.4617024, -1.92430221, 1.20214712, - 4.25261228], [-0.30683774, -1.4524049, 2.35996724, 3.83663135]], - [[-2.20587965, -1.94408353, -0.66964855, 1.15838178], - [-4.26637632, -0.46145396, 2.27393031, - 3.5415298], [-3.91902371, 2.02343374, 3.54278271, 2.40735681], - [-4.3785335, -0.78200288, 3.12162619, 3.55709275]]], - [[[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]], - [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]], - [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]]]]) + dict(spacing=(1, 2, 2), magnitude_range=(1.0, 3.0), as_tensor_output=False, device=None), + {"spatial_size": (1, 2, 2)}, + np.array( + [ + [ + [ + [-2.81748977, 0.66968869, -0.52625642, -3.52173734], + [-1.96865364, 1.76472402, -5.06258324, -1.71805669], + [1.11934537, -2.45103851, -2.13654555, -1.15855539], + [1.49678424, -2.06960677, -1.74328475, -1.7271617], + ], + [ + [3.69301983, 3.66097025, 1.68091953, 0.6465273], + [1.23445289, 2.49568333, -1.56671014, 1.96849393], + [-2.09916271, -1.06768069, 1.51861453, -2.39180117], + [-0.23449363, -1.44269211, -0.42794076, -4.68520972], + ], + [ + [-1.96578162, -0.17168741, 2.55269525, 0.70931081], + [1.00476444, 2.15217619, -0.47246061, 1.4748298], + [-0.34829048, -1.89234811, 0.34558185, 1.9606272], + [1.56684302, 0.98019418, 5.00513708, 1.69126978], + ], + ], + [ + [ + [-1.36146598, 0.7469491, -5.16647064, -4.73906938], + [1.91920577, -2.33606298, -0.95030633, 0.7901769], + [2.49116076, 3.93791246, 3.50390686, 2.79030531], + [1.70638302, 4.33070564, 3.52613304, 0.77965554], + ], + [ + [-0.62725323, -1.64857887, -2.92384357, -3.39022706], + [-3.00611521, -0.66597021, -0.21577072, -2.39146379], + [2.94568388, -0.83686357, -2.55435186, 2.74064119], + [2.3247117, 2.78900974, 1.59788581, 0.31140512], + ], + [ + [-0.89856598, -4.15325814, -0.21934502, -1.64845891], + [-1.52694693, -2.81794479, -2.22623861, -3.0299247], + [4.49410486, 1.27529645, 2.92559679, -1.12171559], + [3.30307684, 4.97189727, 2.43914751, 4.7262225], + ], + ], + [ + [ + [-4.81571068, -3.28263239, 1.635167, 2.36520831], + [-1.92511521, -4.311247, 2.19242556, 7.34990574], + [-3.04122716, -0.94284154, 1.30058968, -0.11719455], + [-2.28657395, -3.68766906, 0.28400757, 5.08072864], + ], + [ + [-4.2308508, -0.16084264, 2.69545963, 3.4666492], + [-5.29514976, -1.55660775, 4.28031473, -0.39019547], + [-3.4617024, -1.92430221, 1.20214712, 4.25261228], + [-0.30683774, -1.4524049, 2.35996724, 3.83663135], + ], + [ + [-2.20587965, -1.94408353, -0.66964855, 1.15838178], + [-4.26637632, -0.46145396, 2.27393031, 3.5415298], + [-3.91902371, 2.02343374, 3.54278271, 2.40735681], + [-4.3785335, -0.78200288, 3.12162619, 3.55709275], + ], + ], + [ + [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], + [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], + [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], + ], + ] + ), ], ] class TestRandDeformGrid(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_rand_deform_grid(self, input_param, input_data, expected_val): g = RandDeformGrid(**input_param) @@ -90,5 +136,5 @@ def test_rand_deform_grid(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_elastic_2d.py b/tests/test_rand_elastic_2d.py index 74174be326..33a3f120f1 100644 --- a/tests/test_rand_elastic_2d.py +++ b/tests/test_rand_elastic_2d.py @@ -18,37 +18,60 @@ from monai.transforms import Rand2DElastic TEST_CASES = [ - [{'spacing': (.3, .3), 'magnitude_range': (1., 2.), 'prob': 0.0, 'as_tensor_output': False, 'device': None}, - {'img': torch.ones((3, 3, 3)), 'spatial_size': (2, 2)}, - np.ones((3, 2, 2))], [ - {'spacing': (.3, .3), 'magnitude_range': (1., 2.), 'prob': 0.9, 'as_tensor_output': False, 'device': None}, - {'img': torch.ones((3, 3, 3)), 'spatial_size': (2, 2), 'mode': 'bilinear'}, - np.array([[[0., 0.], [0., 0.04970419]], [[0., 0.], [0., 0.04970419]], [[0., 0.], [0., 0.04970419]]]), + {"spacing": (0.3, 0.3), "magnitude_range": (1.0, 2.0), "prob": 0.0, "as_tensor_output": False, "device": None}, + {"img": torch.ones((3, 3, 3)), "spatial_size": (2, 2)}, + np.ones((3, 2, 2)), + ], + [ + {"spacing": (0.3, 0.3), "magnitude_range": (1.0, 2.0), "prob": 0.9, "as_tensor_output": False, "device": None}, + {"img": torch.ones((3, 3, 3)), "spatial_size": (2, 2), "mode": "bilinear"}, + np.array([[[0.0, 0.0], [0.0, 0.04970419]], [[0.0, 0.0], [0.0, 0.04970419]], [[0.0, 0.0], [0.0, 0.04970419]]]), ], [ { - 'spacing': (1., 1.), 'magnitude_range': (1., 1.), 'scale_range': [1.2, 2.2], 'prob': 0.9, 'padding_mode': - 'border', 'as_tensor_output': True, 'device': None, 'spatial_size': (2, 2) + "spacing": (1.0, 1.0), + "magnitude_range": (1.0, 1.0), + "scale_range": [1.2, 2.2], + "prob": 0.9, + "padding_mode": "border", + "as_tensor_output": True, + "device": None, + "spatial_size": (2, 2), }, - {'img': torch.arange(27).reshape((3, 3, 3))}, - torch.tensor([[[1.6605, 1.0083], [6.0000, 6.2224]], [[10.6605, 10.0084], [15.0000, 15.2224]], - [[19.6605, 19.0083], [24.0000, 24.2224]]]), + {"img": torch.arange(27).reshape((3, 3, 3))}, + torch.tensor( + [ + [[1.6605, 1.0083], [6.0000, 6.2224]], + [[10.6605, 10.0084], [15.0000, 15.2224]], + [[19.6605, 19.0083], [24.0000, 24.2224]], + ] + ), ], [ { - 'spacing': (.3, .3), 'magnitude_range': (.1, .2), 'translate_range': [-.01, .01], - 'scale_range': [0.01, 0.02], 'prob': 0.9, 'as_tensor_output': False, 'device': None, 'spatial_size': (2, 2), + "spacing": (0.3, 0.3), + "magnitude_range": (0.1, 0.2), + "translate_range": [-0.01, 0.01], + "scale_range": [0.01, 0.02], + "prob": 0.9, + "as_tensor_output": False, + "device": None, + "spatial_size": (2, 2), }, - {'img': torch.arange(27).reshape((3, 3, 3))}, - np.array([[[0.2001334, 1.2563337], [5.2274017, 7.90148]], [[8.675412, 6.9098353], [13.019891, 16.850012]], - [[17.15069, 12.563337], [20.81238, 25.798544]]]) + {"img": torch.arange(27).reshape((3, 3, 3))}, + np.array( + [ + [[0.2001334, 1.2563337], [5.2274017, 7.90148]], + [[8.675412, 6.9098353], [13.019891, 16.850012]], + [[17.15069, 12.563337], [20.81238, 25.798544]], + ] + ), ], ] class TestRand2DElastic(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_rand_2d_elastic(self, input_param, input_data, expected_val): g = Rand2DElastic(**input_param) @@ -61,5 +84,5 @@ def test_rand_2d_elastic(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_elastic_3d.py b/tests/test_rand_elastic_3d.py index 459fe932e2..425227db80 100644 --- a/tests/test_rand_elastic_3d.py +++ b/tests/test_rand_elastic_3d.py @@ -18,26 +18,45 @@ from monai.transforms import Rand3DElastic TEST_CASES = [ - [{'magnitude_range': (.3, 2.3), 'sigma_range': (1., 20.), 'prob': 0.0, 'as_tensor_output': False, 'device': None}, - {'img': torch.ones((2, 3, 3, 3)), 'spatial_size': (2, 2, 2)}, - np.ones((2, 2, 2, 2))], [ - {'magnitude_range': (.3, .3), 'sigma_range': (1., 2.), 'prob': 0.9, 'as_tensor_output': False, 'device': None}, - {'img': torch.arange(27).reshape((1, 3, 3, 3)), 'spatial_size': (2, 2, 2)}, + { + "magnitude_range": (0.3, 2.3), + "sigma_range": (1.0, 20.0), + "prob": 0.0, + "as_tensor_output": False, + "device": None, + }, + {"img": torch.ones((2, 3, 3, 3)), "spatial_size": (2, 2, 2)}, + np.ones((2, 2, 2, 2)), + ], + [ + { + "magnitude_range": (0.3, 0.3), + "sigma_range": (1.0, 2.0), + "prob": 0.9, + "as_tensor_output": False, + "device": None, + }, + {"img": torch.arange(27).reshape((1, 3, 3, 3)), "spatial_size": (2, 2, 2)}, np.array([[[[3.2385552, 4.753422], [7.779232, 9.286472]], [[16.769115, 18.287868], [21.300673, 22.808704]]]]), ], [ { - 'magnitude_range': (.3, .3), 'sigma_range': (1., 2.), 'prob': 0.9, 'rotate_range': [1, 1, 1], - 'as_tensor_output': False, 'device': None, 'spatial_size': (2, 2, 2) + "magnitude_range": (0.3, 0.3), + "sigma_range": (1.0, 2.0), + "prob": 0.9, + "rotate_range": [1, 1, 1], + "as_tensor_output": False, + "device": None, + "spatial_size": (2, 2, 2), }, - {'img': torch.arange(27).reshape((1, 3, 3, 3)), 'mode': 'bilinear'}, - np.array([[[[1.6566806, 7.695548], [7.4342523, 13.580086]], [[11.776854, 18.669481], [18.396517, 21.551771]]]])], + {"img": torch.arange(27).reshape((1, 3, 3, 3)), "mode": "bilinear"}, + np.array([[[[1.6566806, 7.695548], [7.4342523, 13.580086]], [[11.776854, 18.669481], [18.396517, 21.551771]]]]), + ], ] class TestRand3DElastic(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_rand_3d_elastic(self, input_param, input_data, expected_val): g = Rand3DElastic(**input_param) @@ -50,5 +69,5 @@ def test_rand_3d_elastic(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_elasticd_2d.py b/tests/test_rand_elasticd_2d.py index 00326e5e70..0dc7ce4aa4 100644 --- a/tests/test_rand_elasticd_2d.py +++ b/tests/test_rand_elasticd_2d.py @@ -20,55 +20,102 @@ TEST_CASES = [ [ { - 'keys': ('img', 'seg'), 'spacing': (.3, .3), 'magnitude_range': (1., 2.), 'prob': 0.0, 'as_tensor_output': - False, 'device': None, 'spatial_size': (2, 2) + "keys": ("img", "seg"), + "spacing": (0.3, 0.3), + "magnitude_range": (1.0, 2.0), + "prob": 0.0, + "as_tensor_output": False, + "device": None, + "spatial_size": (2, 2), }, - {'img': torch.ones((3, 3, 3)), 'seg': torch.ones((3, 3, 3))}, + {"img": torch.ones((3, 3, 3)), "seg": torch.ones((3, 3, 3))}, np.ones((3, 2, 2)), ], [ { - 'keys': ('img', 'seg'), 'spacing': (.3, .3), 'magnitude_range': (1., 2.), 'prob': 0.9, 'as_tensor_output': - False, 'device': None, 'spatial_size': (2, 2), 'mode': 'bilinear' + "keys": ("img", "seg"), + "spacing": (0.3, 0.3), + "magnitude_range": (1.0, 2.0), + "prob": 0.9, + "as_tensor_output": False, + "device": None, + "spatial_size": (2, 2), + "mode": "bilinear", }, - {'img': torch.ones((3, 3, 3)), 'seg': torch.ones((3, 3, 3))}, - np.array([[[0., 0.], [0., 0.04970419]], [[0., 0.], [0., 0.04970419]], [[0., 0.], [0., 0.04970419]]]), + {"img": torch.ones((3, 3, 3)), "seg": torch.ones((3, 3, 3))}, + np.array([[[0.0, 0.0], [0.0, 0.04970419]], [[0.0, 0.0], [0.0, 0.04970419]], [[0.0, 0.0], [0.0, 0.04970419]]]), ], [ { - 'keys': ('img', 'seg'), 'spacing': (1., 1.), 'magnitude_range': (1., 1.), 'scale_range': [1.2, 2.2], 'prob': - 0.9, 'padding_mode': 'border', 'as_tensor_output': True, 'device': None, 'spatial_size': (2, 2) + "keys": ("img", "seg"), + "spacing": (1.0, 1.0), + "magnitude_range": (1.0, 1.0), + "scale_range": [1.2, 2.2], + "prob": 0.9, + "padding_mode": "border", + "as_tensor_output": True, + "device": None, + "spatial_size": (2, 2), }, - {'img': torch.arange(27).reshape((3, 3, 3)), 'seg': torch.arange(27).reshape((3, 3, 3))}, - torch.tensor([[[1.6605, 1.0083], [6.0000, 6.2224]], [[10.6605, 10.0084], [15.0000, 15.2224]], - [[19.6605, 19.0083], [24.0000, 24.2224]]]), + {"img": torch.arange(27).reshape((3, 3, 3)), "seg": torch.arange(27).reshape((3, 3, 3))}, + torch.tensor( + [ + [[1.6605, 1.0083], [6.0000, 6.2224]], + [[10.6605, 10.0084], [15.0000, 15.2224]], + [[19.6605, 19.0083], [24.0000, 24.2224]], + ] + ), ], [ { - 'keys': ('img', 'seg'), 'spacing': (.3, .3), 'magnitude_range': (.1, .2), 'translate_range': [-.01, .01], - 'scale_range': [0.01, 0.02], 'prob': 0.9, 'as_tensor_output': False, 'device': None, 'spatial_size': (2, 2), + "keys": ("img", "seg"), + "spacing": (0.3, 0.3), + "magnitude_range": (0.1, 0.2), + "translate_range": [-0.01, 0.01], + "scale_range": [0.01, 0.02], + "prob": 0.9, + "as_tensor_output": False, + "device": None, + "spatial_size": (2, 2), }, - {'img': torch.arange(27).reshape((3, 3, 3)), 'seg': torch.arange(27).reshape((3, 3, 3))}, - np.array([[[0.2001334, 1.2563337], [5.2274017, 7.90148]], [[8.675412, 6.9098353], [13.019891, 16.850012]], - [[17.15069, 12.563337], [20.81238, 25.798544]]]) + {"img": torch.arange(27).reshape((3, 3, 3)), "seg": torch.arange(27).reshape((3, 3, 3))}, + np.array( + [ + [[0.2001334, 1.2563337], [5.2274017, 7.90148]], + [[8.675412, 6.9098353], [13.019891, 16.850012]], + [[17.15069, 12.563337], [20.81238, 25.798544]], + ] + ), ], [ { - 'keys': ('img', 'seg'), 'mode': ('bilinear', 'nearest'), 'spacing': (.3, .3), 'magnitude_range': (.1, .2), - 'translate_range': [-.01, .01], - 'scale_range': [0.01, 0.02], 'prob': 0.9, 'as_tensor_output': True, 'device': None, 'spatial_size': (2, 2), + "keys": ("img", "seg"), + "mode": ("bilinear", "nearest"), + "spacing": (0.3, 0.3), + "magnitude_range": (0.1, 0.2), + "translate_range": [-0.01, 0.01], + "scale_range": [0.01, 0.02], + "prob": 0.9, + "as_tensor_output": True, + "device": None, + "spatial_size": (2, 2), + }, + {"img": torch.arange(27).reshape((3, 3, 3)), "seg": torch.arange(27).reshape((3, 3, 3))}, + { + "img": torch.tensor( + [ + [[0.2001334, 1.2563337], [5.2274017, 7.90148]], + [[8.675412, 6.9098353], [13.019891, 16.850012]], + [[17.15069, 12.563337], [20.81238, 25.798544]], + ] + ), + "seg": torch.tensor([[[0.0, 2.0], [6.0, 8.0]], [[9.0, 11.0], [15.0, 17.0]], [[18.0, 20.0], [24.0, 26.0]]]), }, - {'img': torch.arange(27).reshape((3, 3, 3)), 'seg': torch.arange(27).reshape((3, 3, 3))}, - {'img': torch.tensor([[[0.2001334, 1.2563337], [5.2274017, 7.90148]], - [[8.675412, 6.9098353], [13.019891, 16.850012]], - [[17.15069, 12.563337], [20.81238, 25.798544]]]), - 'seg': torch.tensor([[[0., 2.], [6., 8.]], [[9., 11.], [15., 17.]], [[18., 20.], [24., 26.]]])} ], ] class TestRand2DElasticd(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_rand_2d_elasticd(self, input_param, input_data, expected_val): g = Rand2DElasticd(**input_param) @@ -84,5 +131,5 @@ def test_rand_2d_elasticd(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_elasticd_3d.py b/tests/test_rand_elasticd_3d.py index 068d634efb..43bc297277 100644 --- a/tests/test_rand_elasticd_3d.py +++ b/tests/test_rand_elasticd_3d.py @@ -18,41 +18,71 @@ from monai.transforms import Rand3DElasticd TEST_CASES = [ - [{'keys': ('img', 'seg'), 'magnitude_range': (.3, 2.3), 'sigma_range': (1., 20.), - 'prob': 0.0, 'as_tensor_output': False, 'device': None, 'spatial_size': (2, 2, 2)}, - {'img': torch.ones((2, 3, 3, 3)), 'seg': torch.ones((2, 3, 3, 3))}, - np.ones((2, 2, 2, 2))], [ - {'keys': ('img', 'seg'), 'magnitude_range': (.3, .3), 'sigma_range': (1., 2.), - 'prob': 0.9, 'as_tensor_output': False, 'device': None, 'spatial_size': (2, 2, 2)}, - {'img': torch.arange(27).reshape((1, 3, 3, 3)), 'seg': torch.arange(27).reshape((1, 3, 3, 3))}, + { + "keys": ("img", "seg"), + "magnitude_range": (0.3, 2.3), + "sigma_range": (1.0, 20.0), + "prob": 0.0, + "as_tensor_output": False, + "device": None, + "spatial_size": (2, 2, 2), + }, + {"img": torch.ones((2, 3, 3, 3)), "seg": torch.ones((2, 3, 3, 3))}, + np.ones((2, 2, 2, 2)), + ], + [ + { + "keys": ("img", "seg"), + "magnitude_range": (0.3, 0.3), + "sigma_range": (1.0, 2.0), + "prob": 0.9, + "as_tensor_output": False, + "device": None, + "spatial_size": (2, 2, 2), + }, + {"img": torch.arange(27).reshape((1, 3, 3, 3)), "seg": torch.arange(27).reshape((1, 3, 3, 3))}, np.array([[[[3.2385552, 4.753422], [7.779232, 9.286472]], [[16.769115, 18.287868], [21.300673, 22.808704]]]]), ], [ { - 'keys': ('img', 'seg'), 'magnitude_range': (.3, .3), 'sigma_range': (1., 2.), 'prob': 0.9, - 'rotate_range': [1, 1, 1], 'as_tensor_output': False, 'device': None, - 'spatial_size': (2, 2, 2), 'mode': 'bilinear' + "keys": ("img", "seg"), + "magnitude_range": (0.3, 0.3), + "sigma_range": (1.0, 2.0), + "prob": 0.9, + "rotate_range": [1, 1, 1], + "as_tensor_output": False, + "device": None, + "spatial_size": (2, 2, 2), + "mode": "bilinear", }, - {'img': torch.arange(27).reshape((1, 3, 3, 3)), 'seg': torch.arange(27).reshape((1, 3, 3, 3))}, + {"img": torch.arange(27).reshape((1, 3, 3, 3)), "seg": torch.arange(27).reshape((1, 3, 3, 3))}, np.array([[[[1.6566806, 7.695548], [7.4342523, 13.580086]], [[11.776854, 18.669481], [18.396517, 21.551771]]]]), ], [ { - 'keys': ('img', 'seg'), 'mode': ('bilinear', 'nearest'), 'magnitude_range': (.3, .3), - 'sigma_range': (1., 2.), 'prob': 0.9, 'rotate_range': [1, 1, 1], - 'as_tensor_output': True, 'device': torch.device('cpu:0'), 'spatial_size': (2, 2, 2) + "keys": ("img", "seg"), + "mode": ("bilinear", "nearest"), + "magnitude_range": (0.3, 0.3), + "sigma_range": (1.0, 2.0), + "prob": 0.9, + "rotate_range": [1, 1, 1], + "as_tensor_output": True, + "device": torch.device("cpu:0"), + "spatial_size": (2, 2, 2), + }, + {"img": torch.arange(27).reshape((1, 3, 3, 3)), "seg": torch.arange(27).reshape((1, 3, 3, 3))}, + { + "img": torch.tensor( + [[[[1.6566806, 7.695548], [7.4342523, 13.580086]], [[11.776854, 18.669481], [18.396517, 21.551771]]]] + ), + "seg": torch.tensor([[[[1.0, 11.0], [7.0, 17.0]], [[9.0, 19.0], [15.0, 25.0]]]]), }, - {'img': torch.arange(27).reshape((1, 3, 3, 3)), 'seg': torch.arange(27).reshape((1, 3, 3, 3))}, - {'img': torch.tensor([[[[1.6566806, 7.695548], [7.4342523, 13.580086]], - [[11.776854, 18.669481], [18.396517, 21.551771]]]]), - 'seg': torch.tensor([[[[1., 11.], [7., 17.]], [[9., 19.], [15., 25.]]]])} ], ] class TestRand3DElasticd(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_rand_3d_elasticd(self, input_param, input_data, expected_val): g = Rand3DElasticd(**input_param) @@ -68,5 +98,5 @@ def test_rand_3d_elasticd(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_flip.py b/tests/test_rand_flip.py index 1206c85571..d9b032b57a 100644 --- a/tests/test_rand_flip.py +++ b/tests/test_rand_flip.py @@ -17,15 +17,12 @@ from monai.transforms import RandFlip from tests.utils import NumpyImageTestCase2D -INVALID_CASES = [("wrong_axis", ['s', 1], TypeError), - ("not_numbers", 's', TypeError)] +INVALID_CASES = [("wrong_axis", ["s", 1], TypeError), ("not_numbers", "s", TypeError)] -VALID_CASES = [("no_axis", None), - ("one_axis", 1), - ("many_axis", [0, 1])] +VALID_CASES = [("no_axis", None), ("one_axis", 1), ("many_axis", [0, 1])] -class TestRandFlip(NumpyImageTestCase2D): +class TestRandFlip(NumpyImageTestCase2D): @parameterized.expand(INVALID_CASES) def test_invalid_inputs(self, _, spatial_axis, raises): with self.assertRaises(raises): @@ -42,5 +39,5 @@ def test_correct_results(self, _, spatial_axis): self.assertTrue(np.allclose(expected, flip(self.imt[0]))) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_flipd.py b/tests/test_rand_flipd.py index bcda54eecd..5e55f629db 100644 --- a/tests/test_rand_flipd.py +++ b/tests/test_rand_flipd.py @@ -17,22 +17,20 @@ from monai.transforms import RandFlipd from tests.utils import NumpyImageTestCase2D -VALID_CASES = [("no_axis", None), - ("one_axis", 1), - ("many_axis", [0, 1])] +VALID_CASES = [("no_axis", None), ("one_axis", 1), ("many_axis", [0, 1])] -class TestRandFlipd(NumpyImageTestCase2D): +class TestRandFlipd(NumpyImageTestCase2D): @parameterized.expand(VALID_CASES) def test_correct_results(self, _, spatial_axis): - flip = RandFlipd(keys='img', prob=1.0, spatial_axis=spatial_axis) - res = flip({'img': self.imt[0]}) + flip = RandFlipd(keys="img", prob=1.0, spatial_axis=spatial_axis) + res = flip({"img": self.imt[0]}) expected = list() for channel in self.imt[0]: expected.append(np.flip(channel, spatial_axis)) expected = np.stack(expected) - self.assertTrue(np.allclose(expected, res['img'])) + self.assertTrue(np.allclose(expected, res["img"])) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_gaussian_noise.py b/tests/test_rand_gaussian_noise.py index 92ab1462f4..68050c804e 100644 --- a/tests/test_rand_gaussian_noise.py +++ b/tests/test_rand_gaussian_noise.py @@ -19,11 +19,7 @@ class TestRandGaussianNoise(NumpyImageTestCase2D): - - @parameterized.expand([ - ("test_zero_mean", 0, 0.1), - ("test_non_zero_mean", 1, 0.5) - ]) + @parameterized.expand([("test_zero_mean", 0, 0.1), ("test_non_zero_mean", 1, 0.5)]) def test_correct_results(self, _, mean, std): seed = 0 gaussian_fn = RandGaussianNoise(prob=1.0, mean=mean, std=std) @@ -35,5 +31,5 @@ def test_correct_results(self, _, mean, std): np.testing.assert_allclose(expected, noised) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_gaussian_noised.py b/tests/test_rand_gaussian_noised.py index cfff9afb79..0802271ef9 100644 --- a/tests/test_rand_gaussian_noised.py +++ b/tests/test_rand_gaussian_noised.py @@ -19,21 +19,17 @@ class TestRandGaussianNoised(NumpyImageTestCase2D): - - @parameterized.expand([ - ("test_zero_mean", ['img'], 0, 0.1), - ("test_non_zero_mean", ['img'], 1, 0.5) - ]) + @parameterized.expand([("test_zero_mean", ["img"], 0, 0.1), ("test_non_zero_mean", ["img"], 1, 0.5)]) def test_correct_results(self, _, keys, mean, std): seed = 0 gaussian_fn = RandGaussianNoised(keys=keys, prob=1.0, mean=mean, std=std) gaussian_fn.set_random_state(seed) - noised = gaussian_fn({'img': self.imt}) + noised = gaussian_fn({"img": self.imt}) np.random.seed(seed) np.random.random() expected = self.imt + np.random.normal(mean, np.random.uniform(0, std), size=self.imt.shape) - np.testing.assert_allclose(expected, noised['img']) + np.testing.assert_allclose(expected, noised["img"]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_rotate.py b/tests/test_rand_rotate.py index 1e5a18bfc8..8d711d5c8f 100644 --- a/tests/test_rand_rotate.py +++ b/tests/test_rand_rotate.py @@ -20,27 +20,38 @@ class TestRandRotate(NumpyImageTestCase2D): - - @parameterized.expand([ - (90, (0, 1), True, 1, 'reflect', 0, True), - ((-45, 45), (1, 0), True, 3, 'constant', 0, True), - (180, (1, 0), False, 2, 'constant', 4, False), - ]) - def test_correct_results(self, degrees, spatial_axes, reshape, - order, mode, cval, prefilter): - rotate_fn = RandRotate(degrees, prob=1.0, spatial_axes=spatial_axes, reshape=reshape, - order=order, mode=mode, cval=cval, prefilter=prefilter) + @parameterized.expand( + [ + (90, (0, 1), True, 1, "reflect", 0, True), + ((-45, 45), (1, 0), True, 3, "constant", 0, True), + (180, (1, 0), False, 2, "constant", 4, False), + ] + ) + def test_correct_results(self, degrees, spatial_axes, reshape, order, mode, cval, prefilter): + rotate_fn = RandRotate( + degrees, + prob=1.0, + spatial_axes=spatial_axes, + reshape=reshape, + order=order, + mode=mode, + cval=cval, + prefilter=prefilter, + ) rotate_fn.set_random_state(243) rotated = rotate_fn(self.imt[0]) angle = rotate_fn.angle expected = list() for channel in self.imt[0]: - expected.append(scipy.ndimage.rotate(channel, angle, spatial_axes, reshape, order=order, - mode=mode, cval=cval, prefilter=prefilter)) + expected.append( + scipy.ndimage.rotate( + channel, angle, spatial_axes, reshape, order=order, mode=mode, cval=cval, prefilter=prefilter + ) + ) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, rotated)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_rotate90.py b/tests/test_rand_rotate90.py index 111ef4e961..20e171f9e8 100644 --- a/tests/test_rand_rotate90.py +++ b/tests/test_rand_rotate90.py @@ -18,7 +18,6 @@ class TestRandRotate90(NumpyImageTestCase2D): - def test_default(self): rotate = RandRotate90() rotate.set_random_state(123) @@ -60,5 +59,5 @@ def test_prob_k_spatial_axes(self): self.assertTrue(np.allclose(rotated, expected)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_rotate90d.py b/tests/test_rand_rotate90d.py index 5ec9d66d0c..cd55ff5e3a 100644 --- a/tests/test_rand_rotate90d.py +++ b/tests/test_rand_rotate90d.py @@ -18,7 +18,6 @@ class TestRandRotate90d(NumpyImageTestCase2D): - def test_default(self): key = None rotate = RandRotate90d(keys=key) @@ -31,7 +30,7 @@ def test_default(self): self.assertTrue(np.allclose(rotated[key], expected)) def test_k(self): - key = 'test' + key = "test" rotate = RandRotate90d(keys=key, max_k=2) rotate.set_random_state(234) rotated = rotate({key: self.imt[0]}) @@ -42,7 +41,7 @@ def test_k(self): self.assertTrue(np.allclose(rotated[key], expected)) def test_spatial_axes(self): - key = 'test' + key = "test" rotate = RandRotate90d(keys=key, spatial_axes=(0, 1)) rotate.set_random_state(234) rotated = rotate({key: self.imt[0]}) @@ -53,7 +52,7 @@ def test_spatial_axes(self): self.assertTrue(np.allclose(rotated[key], expected)) def test_prob_k_spatial_axes(self): - key = 'test' + key = "test" rotate = RandRotate90d(keys=key, prob=1.0, max_k=2, spatial_axes=(0, 1)) rotate.set_random_state(234) rotated = rotate({key: self.imt[0]}) @@ -64,11 +63,11 @@ def test_prob_k_spatial_axes(self): self.assertTrue(np.allclose(rotated[key], expected)) def test_no_key(self): - key = 'unknown' + key = "unknown" rotate = RandRotate90d(keys=key, prob=1.0, max_k=2, spatial_axes=(0, 1)) - with self.assertRaisesRegex(KeyError, ''): - rotated = rotate({'test': self.imt[0]}) + with self.assertRaisesRegex(KeyError, ""): + rotated = rotate({"test": self.imt[0]}) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_rotated.py b/tests/test_rand_rotated.py index 1c9d98e83e..af311539ad 100644 --- a/tests/test_rand_rotated.py +++ b/tests/test_rand_rotated.py @@ -20,27 +20,39 @@ class TestRandRotated(NumpyImageTestCase2D): - - @parameterized.expand([ - (90, (0, 1), True, 1, 'reflect', 0, True), - ((-45, 45), (1, 0), True, 3, 'constant', 0, True), - (180, (1, 0), False, 2, 'constant', 4, False), - ]) - def test_correct_results(self, degrees, spatial_axes, reshape, - order, mode, cval, prefilter): - rotate_fn = RandRotated('img', degrees, prob=1.0, spatial_axes=spatial_axes, reshape=reshape, - order=order, mode=mode, cval=cval, prefilter=prefilter) + @parameterized.expand( + [ + (90, (0, 1), True, 1, "reflect", 0, True), + ((-45, 45), (1, 0), True, 3, "constant", 0, True), + (180, (1, 0), False, 2, "constant", 4, False), + ] + ) + def test_correct_results(self, degrees, spatial_axes, reshape, order, mode, cval, prefilter): + rotate_fn = RandRotated( + "img", + degrees, + prob=1.0, + spatial_axes=spatial_axes, + reshape=reshape, + order=order, + mode=mode, + cval=cval, + prefilter=prefilter, + ) rotate_fn.set_random_state(243) - rotated = rotate_fn({'img': self.imt[0]}) + rotated = rotate_fn({"img": self.imt[0]}) angle = rotate_fn.angle expected = list() for channel in self.imt[0]: - expected.append(scipy.ndimage.rotate(channel, angle, spatial_axes, reshape, order=order, - mode=mode, cval=cval, prefilter=prefilter)) + expected.append( + scipy.ndimage.rotate( + channel, angle, spatial_axes, reshape, order=order, mode=mode, cval=cval, prefilter=prefilter + ) + ) expected = np.stack(expected).astype(np.float32) - self.assertTrue(np.allclose(expected, rotated['img'])) + self.assertTrue(np.allclose(expected, rotated["img"])) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_scale_intensity.py b/tests/test_rand_scale_intensity.py index 1cdefdef66..64e8bf95ac 100644 --- a/tests/test_rand_scale_intensity.py +++ b/tests/test_rand_scale_intensity.py @@ -17,7 +17,6 @@ class TestRandScaleIntensity(NumpyImageTestCase2D): - def test_value(self): scaler = RandScaleIntensity(factors=0.5, prob=1.0) scaler.set_random_state(seed=0) @@ -27,5 +26,5 @@ def test_value(self): np.testing.assert_allclose(result, expected) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_scale_intensityd.py b/tests/test_rand_scale_intensityd.py index ddd32acd43..cc17fd9b7b 100644 --- a/tests/test_rand_scale_intensityd.py +++ b/tests/test_rand_scale_intensityd.py @@ -17,9 +17,8 @@ class TestRandScaleIntensityd(NumpyImageTestCase2D): - def test_value(self): - key = 'img' + key = "img" scaler = RandScaleIntensityd(keys=[key], factors=0.5, prob=1.0) scaler.set_random_state(seed=0) result = scaler({key: self.imt}) @@ -28,5 +27,5 @@ def test_value(self): np.testing.assert_allclose(result[key], expected) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_shift_intensity.py b/tests/test_rand_shift_intensity.py index 282421505b..a54eb095cf 100644 --- a/tests/test_rand_shift_intensity.py +++ b/tests/test_rand_shift_intensity.py @@ -17,7 +17,6 @@ class TestRandShiftIntensity(NumpyImageTestCase2D): - def test_value(self): shifter = RandShiftIntensity(offsets=1.0, prob=1.0) shifter.set_random_state(seed=0) @@ -27,5 +26,5 @@ def test_value(self): np.testing.assert_allclose(result, expected) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_shift_intensityd.py b/tests/test_rand_shift_intensityd.py index 10841082cd..c1e571ec71 100644 --- a/tests/test_rand_shift_intensityd.py +++ b/tests/test_rand_shift_intensityd.py @@ -17,9 +17,8 @@ class TestRandShiftIntensityd(NumpyImageTestCase2D): - def test_value(self): - key = 'img' + key = "img" shifter = RandShiftIntensityd(keys=[key], offsets=1.0, prob=1.0) shifter.set_random_state(seed=0) result = shifter({key: self.imt}) @@ -28,5 +27,5 @@ def test_value(self): np.testing.assert_allclose(result[key], expected) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_spatial_crop.py b/tests/test_rand_spatial_crop.py index c15f678e73..bf4f89c0ba 100644 --- a/tests/test_rand_spatial_crop.py +++ b/tests/test_rand_spatial_crop.py @@ -14,43 +14,21 @@ from parameterized import parameterized from monai.transforms import RandSpatialCrop -TEST_CASE_1 = [ - { - 'roi_size': [3, 3, 3], - 'random_center': True - }, - np.random.randint(0, 2, size=[3, 3, 3, 3]), - (3, 3, 3, 3) -] +TEST_CASE_1 = [{"roi_size": [3, 3, 3], "random_center": True}, np.random.randint(0, 2, size=[3, 3, 3, 3]), (3, 3, 3, 3)] TEST_CASE_2 = [ - { - 'roi_size': [3, 3, 3], - 'random_center': False - }, + {"roi_size": [3, 3, 3], "random_center": False}, np.random.randint(0, 2, size=[3, 3, 3, 3]), - (3, 3, 3, 3) + (3, 3, 3, 3), ] TEST_CASE_3 = [ - { - 'roi_size': [3, 3], - 'random_center': False - }, - np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 1, 2, 1, 0], - [0, 0, 0, 0, 0] - ] - ]) + {"roi_size": [3, 3], "random_center": False}, + np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]]), ] class TestRandSpatialCrop(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_shape(self, input_param, input_data, expected_shape): result = RandSpatialCrop(**input_param)(input_data) @@ -61,8 +39,8 @@ def test_value(self, input_param, input_data): cropper = RandSpatialCrop(**input_param) result = cropper(input_data) roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size] - np.testing.assert_allclose(result, input_data[:, roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]]) + np.testing.assert_allclose(result, input_data[:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_spatial_cropd.py b/tests/test_rand_spatial_cropd.py index 2290e44498..6b9c9c5e23 100644 --- a/tests/test_rand_spatial_cropd.py +++ b/tests/test_rand_spatial_cropd.py @@ -15,59 +15,36 @@ from monai.transforms import RandSpatialCropd TEST_CASE_1 = [ - { - 'keys': 'img', - 'roi_size': [3, 3, 3], - 'random_center': True - }, - {'img': np.random.randint(0, 2, size=[3, 3, 3, 3])}, - (3, 3, 3, 3) + {"keys": "img", "roi_size": [3, 3, 3], "random_center": True}, + {"img": np.random.randint(0, 2, size=[3, 3, 3, 3])}, + (3, 3, 3, 3), ] TEST_CASE_2 = [ - { - 'keys': 'img', - 'roi_size': [3, 3, 3], - 'random_center': False - }, - {'img': np.random.randint(0, 2, size=[3, 3, 3, 3])}, - (3, 3, 3, 3) + {"keys": "img", "roi_size": [3, 3, 3], "random_center": False}, + {"img": np.random.randint(0, 2, size=[3, 3, 3, 3])}, + (3, 3, 3, 3), ] TEST_CASE_3 = [ - { - 'keys': 'img', - 'roi_size': [3, 3], - 'random_center': False - }, - { - 'img': np.array([ - [ - [0, 0, 0, 0, 0], - [0, 1, 2, 1, 0], - [0, 2, 3, 2, 0], - [0, 1, 2, 1, 0], - [0, 0, 0, 0, 0] - ] - ]) - } + {"keys": "img", "roi_size": [3, 3], "random_center": False}, + {"img": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]])}, ] class TestRandSpatialCropd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_shape(self, input_param, input_data, expected_shape): result = RandSpatialCropd(**input_param)(input_data) - self.assertTupleEqual(result['img'].shape, expected_shape) + self.assertTupleEqual(result["img"].shape, expected_shape) @parameterized.expand([TEST_CASE_3]) def test_value(self, input_param, input_data): cropper = RandSpatialCropd(**input_param) result = cropper(input_data) roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size] - np.testing.assert_allclose(result['img'], input_data['img'][:, roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]]) + np.testing.assert_allclose(result["img"], input_data["img"][:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_zoom.py b/tests/test_rand_zoom.py index 7dfdb7a522..0111c2c16f 100644 --- a/tests/test_rand_zoom.py +++ b/tests/test_rand_zoom.py @@ -20,60 +20,70 @@ from monai.transforms import RandZoom from tests.utils import NumpyImageTestCase2D -VALID_CASES = [(0.9, 1.1, 3, 'constant', 0, True, False, False)] +VALID_CASES = [(0.9, 1.1, 3, "constant", 0, True, False, False)] -class TestRandZoom(NumpyImageTestCase2D): +class TestRandZoom(NumpyImageTestCase2D): @parameterized.expand(VALID_CASES) - def test_correct_results(self, min_zoom, max_zoom, order, mode, - cval, prefilter, use_gpu, keep_size): - random_zoom = RandZoom(prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order, - mode=mode, cval=cval, prefilter=prefilter, use_gpu=use_gpu, - keep_size=keep_size) + def test_correct_results(self, min_zoom, max_zoom, order, mode, cval, prefilter, use_gpu, keep_size): + random_zoom = RandZoom( + prob=1.0, + min_zoom=min_zoom, + max_zoom=max_zoom, + order=order, + mode=mode, + cval=cval, + prefilter=prefilter, + use_gpu=use_gpu, + keep_size=keep_size, + ) random_zoom.set_random_state(234) zoomed = random_zoom(self.imt[0]) expected = list() for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order, - cval=cval, prefilter=prefilter)) + expected.append( + zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order, cval=cval, prefilter=prefilter) + ) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, zoomed)) - @parameterized.expand([ - (0.8, 1.2, 1, 'constant', 0, True) - ]) + @parameterized.expand([(0.8, 1.2, 1, "constant", 0, True)]) def test_gpu_zoom(self, min_zoom, max_zoom, order, mode, cval, prefilter): - if importlib.util.find_spec('cupy'): + if importlib.util.find_spec("cupy"): random_zoom = RandZoom( - prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order, - mode=mode, cval=cval, prefilter=prefilter, use_gpu=True, - keep_size=False) + prob=1.0, + min_zoom=min_zoom, + max_zoom=max_zoom, + order=order, + mode=mode, + cval=cval, + prefilter=prefilter, + use_gpu=True, + keep_size=False, + ) random_zoom.set_random_state(234) zoomed = random_zoom(self.imt[0]) expected = list() for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order, - cval=cval, prefilter=prefilter)) + expected.append( + zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order, cval=cval, prefilter=prefilter) + ) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, zoomed)) def test_keep_size(self): - random_zoom = RandZoom(prob=1.0, min_zoom=0.6, - max_zoom=0.7, keep_size=True) + random_zoom = RandZoom(prob=1.0, min_zoom=0.6, max_zoom=0.7, keep_size=True) zoomed = random_zoom(self.imt[0]) self.assertTrue(np.array_equal(zoomed.shape, self.imt.shape[1:])) - @parameterized.expand([ - ("no_min_zoom", None, 1.1, 1, TypeError), - ("invalid_order", 0.9, 1.1 , 's', AssertionError) - ]) + @parameterized.expand([("no_min_zoom", None, 1.1, 1, TypeError), ("invalid_order", 0.9, 1.1, "s", AssertionError)]) def test_invalid_inputs(self, _, min_zoom, max_zoom, order, raises): with self.assertRaises(raises): random_zoom = RandZoom(prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order) zoomed = random_zoom(self.imt[0]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_zoomd.py b/tests/test_rand_zoomd.py index 9a5838da4b..acdb6c805c 100644 --- a/tests/test_rand_zoomd.py +++ b/tests/test_rand_zoomd.py @@ -20,64 +20,76 @@ from monai.transforms import RandZoomd from tests.utils import NumpyImageTestCase2D -VALID_CASES = [(0.9, 1.1, 3, 'constant', 0, True, False, False)] +VALID_CASES = [(0.9, 1.1, 3, "constant", 0, True, False, False)] -class TestRandZoomd(NumpyImageTestCase2D): +class TestRandZoomd(NumpyImageTestCase2D): @parameterized.expand(VALID_CASES) - def test_correct_results(self, min_zoom, max_zoom, order, mode, - cval, prefilter, use_gpu, keep_size): - key = 'img' - random_zoom = RandZoomd(key, prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order, - mode=mode, cval=cval, prefilter=prefilter, use_gpu=use_gpu, - keep_size=keep_size) + def test_correct_results(self, min_zoom, max_zoom, order, mode, cval, prefilter, use_gpu, keep_size): + key = "img" + random_zoom = RandZoomd( + key, + prob=1.0, + min_zoom=min_zoom, + max_zoom=max_zoom, + order=order, + mode=mode, + cval=cval, + prefilter=prefilter, + use_gpu=use_gpu, + keep_size=keep_size, + ) random_zoom.set_random_state(234) zoomed = random_zoom({key: self.imt[0]}) expected = list() for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order, - cval=cval, prefilter=prefilter)) + expected.append( + zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order, cval=cval, prefilter=prefilter) + ) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, zoomed[key])) - @parameterized.expand([ - (0.8, 1.2, 1, 'constant', 0, True) - ]) + @parameterized.expand([(0.8, 1.2, 1, "constant", 0, True)]) def test_gpu_zoom(self, min_zoom, max_zoom, order, mode, cval, prefilter): - key = 'img' - if importlib.util.find_spec('cupy'): + key = "img" + if importlib.util.find_spec("cupy"): random_zoom = RandZoomd( - key, prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order, - mode=mode, cval=cval, prefilter=prefilter, use_gpu=True, - keep_size=False) + key, + prob=1.0, + min_zoom=min_zoom, + max_zoom=max_zoom, + order=order, + mode=mode, + cval=cval, + prefilter=prefilter, + use_gpu=True, + keep_size=False, + ) random_zoom.set_random_state(234) zoomed = random_zoom({key: self.imt[0]}) expected = list() for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order, - cval=cval, prefilter=prefilter)) + expected.append( + zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order, cval=cval, prefilter=prefilter) + ) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, zoomed)) def test_keep_size(self): - key = 'img' - random_zoom = RandZoomd(key, prob=1.0, min_zoom=0.6, - max_zoom=0.7, keep_size=True) + key = "img" + random_zoom = RandZoomd(key, prob=1.0, min_zoom=0.6, max_zoom=0.7, keep_size=True) zoomed = random_zoom({key: self.imt[0]}) self.assertTrue(np.array_equal(zoomed[key].shape, self.imt.shape[1:])) - @parameterized.expand([ - ("no_min_zoom", None, 1.1, 1, TypeError), - ("invalid_order", 0.9, 1.1 , 's', AssertionError) - ]) + @parameterized.expand([("no_min_zoom", None, 1.1, 1, TypeError), ("invalid_order", 0.9, 1.1, "s", AssertionError)]) def test_invalid_inputs(self, _, min_zoom, max_zoom, order, raises): - key = 'img' + key = "img" with self.assertRaises(raises): random_zoom = RandZoomd(key, prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order) zoomed = random_zoom({key: self.imt[0]}) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_randomizable.py b/tests/test_randomizable.py index aa1075570a..d02a97475b 100644 --- a/tests/test_randomizable.py +++ b/tests/test_randomizable.py @@ -22,7 +22,6 @@ def randomize(self): class TestRandomizable(unittest.TestCase): - def test_default(self): inst = RandTest() r1 = inst.R.rand() @@ -45,5 +44,5 @@ def test_state(self): self.assertAlmostEqual(inst.R.rand(), 0.69646918) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_repeat_channel.py b/tests/test_repeat_channel.py index 0de943761d..dca16cc58c 100644 --- a/tests/test_repeat_channel.py +++ b/tests/test_repeat_channel.py @@ -14,20 +14,15 @@ from parameterized import parameterized from monai.transforms import RepeatChannel -TEST_CASE_1 = [ - {'repeats': 3}, - np.array([[[0, 1], [1, 2]]]), - (3, 2, 2) -] +TEST_CASE_1 = [{"repeats": 3}, np.array([[[0, 1], [1, 2]]]), (3, 2, 2)] class TestRepeatChannel(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_shape(self, input_param, input_data, expected_shape): result = RepeatChannel(**input_param)(input_data) self.assertEqual(result.shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_repeat_channeld.py b/tests/test_repeat_channeld.py index 5476fbb8db..50e23b655f 100644 --- a/tests/test_repeat_channeld.py +++ b/tests/test_repeat_channeld.py @@ -15,22 +15,18 @@ from monai.transforms import RepeatChanneld TEST_CASE_1 = [ - {'keys': ['img'], 'repeats': 3}, - { - 'img': np.array([[[0, 1], [1, 2]]]), - 'seg': np.array([[[0, 1], [1, 2]]]) - }, + {"keys": ["img"], "repeats": 3}, + {"img": np.array([[[0, 1], [1, 2]]]), "seg": np.array([[[0, 1], [1, 2]]])}, (3, 2, 2), ] class TestRepeatChanneld(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_shape(self, input_param, input_data, expected_shape): result = RepeatChanneld(**input_param)(input_data) - self.assertEqual(result['img'].shape, expected_shape) + self.assertEqual(result["img"].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_resampler.py b/tests/test_resampler.py index 1fcdff2b43..933bf60d3c 100644 --- a/tests/test_resampler.py +++ b/tests/test_resampler.py @@ -20,46 +20,57 @@ TEST_CASES = [ [ - dict(padding_mode='zeros', as_tensor_output=False, device=None), - {'grid': create_grid((2, 2)), 'img': np.arange(4).reshape((1, 2, 2))}, - np.array([[[0., 0.25], [0.5, 0.75]]]) + dict(padding_mode="zeros", as_tensor_output=False, device=None), + {"grid": create_grid((2, 2)), "img": np.arange(4).reshape((1, 2, 2))}, + np.array([[[0.0, 0.25], [0.5, 0.75]]]), ], [ - dict(padding_mode='zeros', as_tensor_output=False, device=None), - {'grid': create_grid((4, 4)), 'img': np.arange(4).reshape((1, 2, 2))}, - np.array([[[0., 0., 0., 0.], [0., 0., 0.25, 0.], [0., 0.5, 0.75, 0.], [0., 0., 0., 0.]]]) + dict(padding_mode="zeros", as_tensor_output=False, device=None), + {"grid": create_grid((4, 4)), "img": np.arange(4).reshape((1, 2, 2))}, + np.array([[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.25, 0.0], [0.0, 0.5, 0.75, 0.0], [0.0, 0.0, 0.0, 0.0]]]), ], [ - dict(padding_mode='border', as_tensor_output=False, device=None), - {'grid': create_grid((4, 4)), 'img': np.arange(4).reshape((1, 2, 2))}, - np.array([[[0., 0., 1., 1.], [0., 0., 1., 1.], [2., 2., 3, 3.], [2., 2., 3., 3.]]]) + dict(padding_mode="border", as_tensor_output=False, device=None), + {"grid": create_grid((4, 4)), "img": np.arange(4).reshape((1, 2, 2))}, + np.array([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 3, 3.0], [2.0, 2.0, 3.0, 3.0]]]), ], [ - dict(padding_mode='reflection', as_tensor_output=False, device=None), - {'grid': create_grid((4, 4)), 'img': np.arange(4).reshape((1, 2, 2)), 'mode': 'nearest'}, - np.array([[[3., 2., 3., 2.], [1., 0., 1., 0.], [3., 2., 3., 2.], [1., 0., 1., 0.]]]) + dict(padding_mode="reflection", as_tensor_output=False, device=None), + {"grid": create_grid((4, 4)), "img": np.arange(4).reshape((1, 2, 2)), "mode": "nearest"}, + np.array([[[3.0, 2.0, 3.0, 2.0], [1.0, 0.0, 1.0, 0.0], [3.0, 2.0, 3.0, 2.0], [1.0, 0.0, 1.0, 0.0]]]), ], [ - dict(padding_mode='zeros', as_tensor_output=False, device=None), - {'grid': create_grid((4, 4, 4)), 'img': np.arange(8).reshape((1, 2, 2, 2)), 'mode': 'bilinear'}, - np.array([[[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]], - [[0., 0., 0., 0.], [0., 0., 0.125, 0.], [0., 0.25, 0.375, 0.], [0., 0., 0., 0.]], - [[0., 0., 0., 0.], [0., 0.5, 0.625, 0.], [0., 0.75, 0.875, 0.], [0., 0., 0., 0.]], - [[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]]]) + dict(padding_mode="zeros", as_tensor_output=False, device=None), + {"grid": create_grid((4, 4, 4)), "img": np.arange(8).reshape((1, 2, 2, 2)), "mode": "bilinear"}, + np.array( + [ + [ + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.125, 0.0], [0.0, 0.25, 0.375, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.5, 0.625, 0.0], [0.0, 0.75, 0.875, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + ] + ] + ), ], [ - dict(padding_mode='border', as_tensor_output=False, device=None), - {'grid': create_grid((4, 4, 4)), 'img': np.arange(8).reshape((1, 2, 2, 2)), 'mode': 'bilinear'}, - np.array([[[[0., 0., 1., 1.], [0., 0., 1., 1.], [2., 2., 3., 3.], [2., 2., 3., 3.]], - [[0., 0., 1., 1.], [0., 0., 1., 1.], [2., 2., 3., 3.], [2., 2., 3., 3.]], - [[4., 4., 5., 5.], [4., 4., 5., 5.], [6., 6., 7., 7.], [6., 6., 7., 7.]], - [[4., 4., 5., 5.], [4., 4., 5., 5.], [6., 6., 7., 7.], [6., 6., 7., 7.]]]]) + dict(padding_mode="border", as_tensor_output=False, device=None), + {"grid": create_grid((4, 4, 4)), "img": np.arange(8).reshape((1, 2, 2, 2)), "mode": "bilinear"}, + np.array( + [ + [ + [[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 3.0, 3.0], [2.0, 2.0, 3.0, 3.0]], + [[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 3.0, 3.0], [2.0, 2.0, 3.0, 3.0]], + [[4.0, 4.0, 5.0, 5.0], [4.0, 4.0, 5.0, 5.0], [6.0, 6.0, 7.0, 7.0], [6.0, 6.0, 7.0, 7.0]], + [[4.0, 4.0, 5.0, 5.0], [4.0, 4.0, 5.0, 5.0], [6.0, 6.0, 7.0, 7.0], [6.0, 6.0, 7.0, 7.0]], + ] + ] + ), ], ] class TestResample(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_resample(self, input_param, input_data, expected_val): g = Resample(**input_param) @@ -71,5 +82,5 @@ def test_resample(self, input_param, input_data, expected_val): np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_resize.py b/tests/test_resize.py index 3b63ac8544..bb5e6145e1 100644 --- a/tests/test_resize.py +++ b/tests/test_resize.py @@ -20,37 +20,41 @@ class TestResize(NumpyImageTestCase2D): - - @parameterized.expand([ - ("invalid_order", "order", AssertionError) - ]) + @parameterized.expand([("invalid_order", "order", AssertionError)]) def test_invalid_inputs(self, _, order, raises): with self.assertRaises(raises): resize = Resize(spatial_size=(128, 128, 3), order=order) resize(self.imt[0]) - @parameterized.expand([ - ((64, 64), 1, 'reflect', 0, True, True, True, None), - ((32, 32), 2, 'constant', 3, False, False, False, None), - ((256, 256), 3, 'constant', 3, False, False, False, None), - ]) - def test_correct_results(self, spatial_size, order, mode, - cval, clip, preserve_range, - anti_aliasing, anti_aliasing_sigma): - resize = Resize(spatial_size, order, mode, cval, clip, - preserve_range, anti_aliasing, - anti_aliasing_sigma) + @parameterized.expand( + [ + ((64, 64), 1, "reflect", 0, True, True, True, None), + ((32, 32), 2, "constant", 3, False, False, False, None), + ((256, 256), 3, "constant", 3, False, False, False, None), + ] + ) + def test_correct_results( + self, spatial_size, order, mode, cval, clip, preserve_range, anti_aliasing, anti_aliasing_sigma + ): + resize = Resize(spatial_size, order, mode, cval, clip, preserve_range, anti_aliasing, anti_aliasing_sigma) expected = list() for channel in self.imt[0]: - expected.append(skimage.transform.resize(channel, spatial_size, - order=order, mode=mode, - cval=cval, clip=clip, - preserve_range=preserve_range, - anti_aliasing=anti_aliasing, - anti_aliasing_sigma=anti_aliasing_sigma)) + expected.append( + skimage.transform.resize( + channel, + spatial_size, + order=order, + mode=mode, + cval=cval, + clip=clip, + preserve_range=preserve_range, + anti_aliasing=anti_aliasing, + anti_aliasing_sigma=anti_aliasing_sigma, + ) + ) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(resize(self.imt[0]), expected)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_resized.py b/tests/test_resized.py index e58016e2a6..90ce2581bc 100644 --- a/tests/test_resized.py +++ b/tests/test_resized.py @@ -20,37 +20,43 @@ class TestResized(NumpyImageTestCase2D): - - @parameterized.expand([ - ("invalid_order", "order", AssertionError) - ]) + @parameterized.expand([("invalid_order", "order", AssertionError)]) def test_invalid_inputs(self, _, order, raises): with self.assertRaises(raises): - resize = Resized(keys='img', spatial_size=(128, 128, 3), order=order) - resize({'img': self.imt[0]}) - - @parameterized.expand([ - ((64, 64), 1, 'reflect', 0, True, True, True, None), - ((32, 32), 2, 'constant', 3, False, False, False, None), - ((256, 256), 3, 'constant', 3, False, False, False, None), - ]) - def test_correct_results(self, spatial_size, order, mode, - cval, clip, preserve_range, - anti_aliasing, anti_aliasing_sigma): - resize = Resized('img', spatial_size, order, mode, cval, clip, - preserve_range, anti_aliasing, - anti_aliasing_sigma) + resize = Resized(keys="img", spatial_size=(128, 128, 3), order=order) + resize({"img": self.imt[0]}) + + @parameterized.expand( + [ + ((64, 64), 1, "reflect", 0, True, True, True, None), + ((32, 32), 2, "constant", 3, False, False, False, None), + ((256, 256), 3, "constant", 3, False, False, False, None), + ] + ) + def test_correct_results( + self, spatial_size, order, mode, cval, clip, preserve_range, anti_aliasing, anti_aliasing_sigma + ): + resize = Resized( + "img", spatial_size, order, mode, cval, clip, preserve_range, anti_aliasing, anti_aliasing_sigma + ) expected = list() for channel in self.imt[0]: - expected.append(skimage.transform.resize(channel, spatial_size, - order=order, mode=mode, - cval=cval, clip=clip, - preserve_range=preserve_range, - anti_aliasing=anti_aliasing, - anti_aliasing_sigma=anti_aliasing_sigma)) + expected.append( + skimage.transform.resize( + channel, + spatial_size, + order=order, + mode=mode, + cval=cval, + clip=clip, + preserve_range=preserve_range, + anti_aliasing=anti_aliasing, + anti_aliasing_sigma=anti_aliasing_sigma, + ) + ) expected = np.stack(expected).astype(np.float32) - self.assertTrue(np.allclose(resize({'img': self.imt[0]})['img'], expected)) + self.assertTrue(np.allclose(resize({"img": self.imt[0]})["img"], expected)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rotate.py b/tests/test_rotate.py index 7d6d1b531b..e16c484f96 100644 --- a/tests/test_rotate.py +++ b/tests/test_rotate.py @@ -18,24 +18,28 @@ from monai.transforms import Rotate from tests.utils import NumpyImageTestCase2D -TEST_CASES = [(90, (0, 1), True, 1, 'reflect', 0, True), - (-90, (1, 0), True, 3, 'constant', 0, True), - (180, (1, 0), False, 2, 'constant', 4, False)] +TEST_CASES = [ + (90, (0, 1), True, 1, "reflect", 0, True), + (-90, (1, 0), True, 3, "constant", 0, True), + (180, (1, 0), False, 2, "constant", 4, False), +] -class TestRotate(NumpyImageTestCase2D): +class TestRotate(NumpyImageTestCase2D): @parameterized.expand(TEST_CASES) - def test_correct_results(self, angle, spatial_axes, reshape, - order, mode, cval, prefilter): - rotate_fn = Rotate(angle, spatial_axes, reshape, - order, mode, cval, prefilter) + def test_correct_results(self, angle, spatial_axes, reshape, order, mode, cval, prefilter): + rotate_fn = Rotate(angle, spatial_axes, reshape, order, mode, cval, prefilter) rotated = rotate_fn(self.imt[0]) expected = list() for channel in self.imt[0]: - expected.append(scipy.ndimage.rotate(channel, angle, spatial_axes, reshape, order=order, - mode=mode, cval=cval, prefilter=prefilter)) + expected.append( + scipy.ndimage.rotate( + channel, angle, spatial_axes, reshape, order=order, mode=mode, cval=cval, prefilter=prefilter + ) + ) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, rotated)) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rotate90.py b/tests/test_rotate90.py index 74c25746a1..e4eafcc88c 100644 --- a/tests/test_rotate90.py +++ b/tests/test_rotate90.py @@ -18,7 +18,6 @@ class TestRotate90(NumpyImageTestCase2D): - def test_rotate90_default(self): rotate = Rotate90() rotated = rotate(self.imt[0]) @@ -56,5 +55,5 @@ def test_prob_k_spatial_axes(self): self.assertTrue(np.allclose(rotated, expected)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rotate90d.py b/tests/test_rotate90d.py index 384cb8f0cd..1f85d2fc0a 100644 --- a/tests/test_rotate90d.py +++ b/tests/test_rotate90d.py @@ -18,9 +18,8 @@ class TestRotate90d(NumpyImageTestCase2D): - def test_rotate90_default(self): - key = 'test' + key = "test" rotate = Rotate90d(keys=key) rotated = rotate({key: self.imt[0]}) expected = list() @@ -40,7 +39,7 @@ def test_k(self): self.assertTrue(np.allclose(rotated[key], expected)) def test_spatial_axes(self): - key = 'test' + key = "test" rotate = Rotate90d(keys=key, spatial_axes=(0, 1)) rotated = rotate({key: self.imt[0]}) expected = list() @@ -50,7 +49,7 @@ def test_spatial_axes(self): self.assertTrue(np.allclose(rotated[key], expected)) def test_prob_k_spatial_axes(self): - key = 'test' + key = "test" rotate = Rotate90d(keys=key, k=2, spatial_axes=(0, 1)) rotated = rotate({key: self.imt[0]}) expected = list() @@ -60,11 +59,11 @@ def test_prob_k_spatial_axes(self): self.assertTrue(np.allclose(rotated[key], expected)) def test_no_key(self): - key = 'unknown' + key = "unknown" rotate = Rotate90d(keys=key) - with self.assertRaisesRegex(KeyError, ''): - rotate({'test': self.imt[0]}) + with self.assertRaisesRegex(KeyError, ""): + rotate({"test": self.imt[0]}) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rotated.py b/tests/test_rotated.py index af7a758d8d..206b624a07 100644 --- a/tests/test_rotated.py +++ b/tests/test_rotated.py @@ -18,26 +18,29 @@ from monai.transforms import Rotated from tests.utils import NumpyImageTestCase2D -TEST_CASES = [(90, (0, 1), True, 1, 'reflect', 0, True), - (-90, (1, 0), True, 3, 'constant', 0, True), - (180, (1, 0), False, 2, 'constant', 4, False)] +TEST_CASES = [ + (90, (0, 1), True, 1, "reflect", 0, True), + (-90, (1, 0), True, 3, "constant", 0, True), + (180, (1, 0), False, 2, "constant", 4, False), +] -class TestRotated(NumpyImageTestCase2D): +class TestRotated(NumpyImageTestCase2D): @parameterized.expand(TEST_CASES) - def test_correct_results(self, angle, spatial_axes, reshape, - order, mode, cval, prefilter): - key = 'img' - rotate_fn = Rotated(key, angle, spatial_axes, reshape, order, - mode, cval, prefilter) + def test_correct_results(self, angle, spatial_axes, reshape, order, mode, cval, prefilter): + key = "img" + rotate_fn = Rotated(key, angle, spatial_axes, reshape, order, mode, cval, prefilter) rotated = rotate_fn({key: self.imt[0]}) expected = list() for channel in self.imt[0]: - expected.append(scipy.ndimage.rotate(channel, angle, spatial_axes, reshape, order=order, - mode=mode, cval=cval, prefilter=prefilter)) + expected.append( + scipy.ndimage.rotate( + channel, angle, spatial_axes, reshape, order=order, mode=mode, cval=cval, prefilter=prefilter + ) + ) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, rotated[key])) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_scale_intensity.py b/tests/test_scale_intensity.py index 5d1a6fb6bd..5c25a5c35a 100644 --- a/tests/test_scale_intensity.py +++ b/tests/test_scale_intensity.py @@ -17,7 +17,6 @@ class TestScaleIntensity(NumpyImageTestCase2D): - def test_range_scale(self): scaler = ScaleIntensity(minv=1.0, maxv=2.0) result = scaler(self.imt) @@ -34,5 +33,5 @@ def test_factor_scale(self): np.testing.assert_allclose(result, expected) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_scale_intensity_range.py b/tests/test_scale_intensity_range.py index 05d9b1fa9e..d952d18ce9 100644 --- a/tests/test_scale_intensity_range.py +++ b/tests/test_scale_intensity_range.py @@ -18,7 +18,6 @@ class IntensityScaleIntensityRange(NumpyImageTestCase2D): - def test_image_scale_intensity_range(self): scaler = ScaleIntensityRange(a_min=20, a_max=108, b_min=50, b_max=80) scaled = scaler(self.imt) @@ -27,5 +26,5 @@ def test_image_scale_intensity_range(self): self.assertTrue(np.allclose(scaled, expected)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_scale_intensity_ranged.py b/tests/test_scale_intensity_ranged.py index 57944f0d87..c4c12bfacf 100644 --- a/tests/test_scale_intensity_ranged.py +++ b/tests/test_scale_intensity_ranged.py @@ -18,9 +18,8 @@ class IntensityScaleIntensityRanged(NumpyImageTestCase2D): - def test_image_scale_intensity_ranged(self): - key = 'img' + key = "img" scaler = ScaleIntensityRanged(keys=key, a_min=20, a_max=108, b_min=50, b_max=80) scaled = scaler({key: self.imt}) expected = (self.imt - 20) / 88 @@ -28,5 +27,5 @@ def test_image_scale_intensity_ranged(self): self.assertTrue(np.allclose(scaled[key], expected)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_scale_intensityd.py b/tests/test_scale_intensityd.py index fab82865c1..42d80794cb 100644 --- a/tests/test_scale_intensityd.py +++ b/tests/test_scale_intensityd.py @@ -17,9 +17,8 @@ class TestScaleIntensityd(NumpyImageTestCase2D): - def test_range_scale(self): - key = 'img' + key = "img" scaler = ScaleIntensityd(keys=[key], minv=1.0, maxv=2.0) result = scaler({key: self.imt}) mina = np.min(self.imt) @@ -29,12 +28,12 @@ def test_range_scale(self): np.testing.assert_allclose(result[key], expected) def test_factor_scale(self): - key = 'img' + key = "img" scaler = ScaleIntensityd(keys=[key], minv=None, maxv=None, factor=0.1) result = scaler({key: self.imt}) expected = (self.imt * (1 + 0.1)).astype(np.float32) np.testing.assert_allclose(result[key], expected) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_shift_intensity.py b/tests/test_shift_intensity.py index 01da63d83d..857ae034fc 100644 --- a/tests/test_shift_intensity.py +++ b/tests/test_shift_intensity.py @@ -17,7 +17,6 @@ class TestShiftIntensity(NumpyImageTestCase2D): - def test_value(self): shifter = ShiftIntensity(offset=1.0) result = shifter(self.imt) @@ -25,5 +24,5 @@ def test_value(self): np.testing.assert_allclose(result, expected) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_shift_intensityd.py b/tests/test_shift_intensityd.py index 130251de93..0eb5c23959 100644 --- a/tests/test_shift_intensityd.py +++ b/tests/test_shift_intensityd.py @@ -17,14 +17,13 @@ class TestShiftIntensityd(NumpyImageTestCase2D): - def test_value(self): - key = 'img' + key = "img" shifter = ShiftIntensityd(keys=[key], offset=1.0) result = shifter({key: self.imt}) expected = self.imt + 1.0 np.testing.assert_allclose(result[key], expected) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_simulatedelay.py b/tests/test_simulatedelay.py index 9c7bbbfbd8..8961507091 100644 --- a/tests/test_simulatedelay.py +++ b/tests/test_simulatedelay.py @@ -18,7 +18,6 @@ class TestSimulateDelay(NumpyImageTestCase2D): - @parameterized.expand([(0.45,), (1,)]) def test_value(self, delay_test_time: float): resize = SimulateDelay(delay_time=delay_test_time) @@ -29,5 +28,5 @@ def test_value(self, delay_test_time: float): np.testing.assert_array_less(delay_test_time, measured_approximate) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_simulatedelayd.py b/tests/test_simulatedelayd.py index af533a6d76..b9dcd6b32a 100644 --- a/tests/test_simulatedelayd.py +++ b/tests/test_simulatedelayd.py @@ -18,16 +18,15 @@ class TestSimulateDelay(NumpyImageTestCase2D): - @parameterized.expand([(0.45,), (1,)]) def test_value(self, delay_test_time: float): - resize = SimulateDelayd(keys='imgd', delay_time=delay_test_time) + resize = SimulateDelayd(keys="imgd", delay_time=delay_test_time) start: float = time.time() - _ = resize({'imgd': self.imt[0]}) + _ = resize({"imgd": self.imt[0]}) stop: float = time.time() measured_approximate: float = stop - start np.testing.assert_array_less(delay_test_time, measured_approximate) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_sliding_window_inference.py b/tests/test_sliding_window_inference.py index d5ac52a867..e487eb41cd 100644 --- a/tests/test_sliding_window_inference.py +++ b/tests/test_sliding_window_inference.py @@ -26,7 +26,6 @@ class TestSlidingWindowInference(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_sliding_window_default(self, image_shape, roi_shape, sw_batch_size): inputs = torch.ones(*image_shape) @@ -40,5 +39,5 @@ def compute(data): self.assertTrue(np.allclose(result.numpy(), expected_val)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_spacing.py b/tests/test_spacing.py index f323361bd4..5749fcbfe6 100644 --- a/tests/test_spacing.py +++ b/tests/test_spacing.py @@ -17,108 +17,129 @@ from monai.transforms import Spacing TEST_CASES = [ + [{"pixdim": (2.0,)}, np.ones((1, 2)), {"affine": np.eye(4)}, np.array([[1.0, 0.0]])], # data [ - {'pixdim': (2.0,)}, - np.ones((1, 2)), # data - {'affine': np.eye(4)}, - np.array([[1., 0.]]) - ], - [ - {'pixdim': (1.0, 0.2, 1.5)}, + {"pixdim": (1.0, 0.2, 1.5)}, np.ones((1, 2, 1, 2)), # data - {'affine': np.eye(4)}, - np.array([[[[1., 0.]], [[1., 0.]]]]) + {"affine": np.eye(4)}, + np.array([[[[1.0, 0.0]], [[1.0, 0.0]]]]), ], [ - {'pixdim': (1.0, 0.2, 1.5), 'diagonal': False}, + {"pixdim": (1.0, 0.2, 1.5), "diagonal": False}, np.ones((1, 2, 1, 2)), # data - { - 'affine': np.array([[2, 1, 0, 4], [-1, -3, 0, 5], [0, 0, 2., 5], [0, 0, 0, 1]],), - }, - np.zeros((1, 3, 1, 2)) + {"affine": np.array([[2, 1, 0, 4], [-1, -3, 0, 5], [0, 0, 2.0, 5], [0, 0, 0, 1]])}, + np.zeros((1, 3, 1, 2)), ], [ - {'pixdim': (3.0, 1.0)}, + {"pixdim": (3.0, 1.0)}, np.arange(24).reshape((2, 3, 4)), # data - {'affine': np.diag([-3.0, 0.2, 1.5, 1])}, - np.array([[[0, 0], [4, 0], [8, 0]], [[12, 0], [16, 0], [20, 0]]]) + {"affine": np.diag([-3.0, 0.2, 1.5, 1])}, + np.array([[[0, 0], [4, 0], [8, 0]], [[12, 0], [16, 0], [20, 0]]]), ], [ - {'pixdim': (3.0, 1.0)}, + {"pixdim": (3.0, 1.0)}, np.arange(24).reshape((2, 3, 4)), # data {}, - np.array([[[0, 1, 2, 3], [0, 0, 0, 0]], [[12, 13, 14, 15], [0, 0, 0, 0]]]) + np.array([[[0, 1, 2, 3], [0, 0, 0, 0]], [[12, 13, 14, 15], [0, 0, 0, 0]]]), ], [ - {'pixdim': (1.0, 1.0)}, + {"pixdim": (1.0, 1.0)}, np.arange(24).reshape((2, 3, 4)), # data {}, - np.array([[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]]) + np.array( + [[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]] + ), ], [ - {'pixdim': (4.0, 5.0, 6.0)}, + {"pixdim": (4.0, 5.0, 6.0)}, np.arange(24).reshape((1, 2, 3, 4)), # data - {'affine': np.array([[-4, 0, 0, 4], [0, 5, 0, -5], [0, 0, 6, -6], [0, 0, 0, 1]])}, + {"affine": np.array([[-4, 0, 0, 4], [0, 5, 0, -5], [0, 0, 6, -6], [0, 0, 0, 1]])}, np.arange(24).reshape((1, 2, 3, 4)), # data ], [ - {'pixdim': (4.0, 5.0, 6.0), 'diagonal': True}, + {"pixdim": (4.0, 5.0, 6.0), "diagonal": True}, np.arange(24).reshape((1, 2, 3, 4)), # data - {'affine': np.array([[-4, 0, 0, 4], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]])}, - np.array([[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]], - [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]]]) + {"affine": np.array([[-4, 0, 0, 4], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]])}, + np.array( + [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]], [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]]] + ), ], [ - {'pixdim': (4.0, 5.0, 6.0), 'mode': 'nearest', 'diagonal': True}, + {"pixdim": (4.0, 5.0, 6.0), "mode": "nearest", "diagonal": True}, np.arange(24).reshape((1, 2, 3, 4)), # data - {'affine': np.array([[-4, 0, 0, -4], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]])}, - np.array([[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]], - [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]]]) + {"affine": np.array([[-4, 0, 0, -4], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]])}, + np.array( + [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]], [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]]] + ), ], [ - {'pixdim': (4.0, 5.0, 6.0), 'mode': 'nearest', 'diagonal': True}, + {"pixdim": (4.0, 5.0, 6.0), "mode": "nearest", "diagonal": True}, np.arange(24).reshape((1, 2, 3, 4)), # data - {'affine': np.array([[-4, 0, 0, -4], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]]), 'interp_order': 0}, - np.array([[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]], - [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]]]) + {"affine": np.array([[-4, 0, 0, -4], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]]), "interp_order": 0}, + np.array( + [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]], [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]]] + ), ], [ - {'pixdim': (2.0, 5.0, 6.0), 'mode': 'constant', 'diagonal': True}, + {"pixdim": (2.0, 5.0, 6.0), "mode": "constant", "diagonal": True}, np.arange(24).reshape((1, 4, 6)), # data - {'affine': np.array([[-4, 0, 0, -4], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]]), 'interp_order': 0}, - np.array([[[18, 19, 20, 21, 22, 23], [18, 19, 20, 21, 22, 23], [12, 13, 14, 15, 16, 17], - [12, 13, 14, 15, 16, 17], [6, 7, 8, 9, 10, 11], [6, 7, 8, 9, 10, 11], [0, 1, 2, 3, 4, 5]]]) + {"affine": np.array([[-4, 0, 0, -4], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]]), "interp_order": 0}, + np.array( + [ + [ + [18, 19, 20, 21, 22, 23], + [18, 19, 20, 21, 22, 23], + [12, 13, 14, 15, 16, 17], + [12, 13, 14, 15, 16, 17], + [6, 7, 8, 9, 10, 11], + [6, 7, 8, 9, 10, 11], + [0, 1, 2, 3, 4, 5], + ] + ] + ), ], [ - {'pixdim': (5., 3., 6.), 'mode': 'constant', 'diagonal': True, 'dtype': np.float32}, + {"pixdim": (5.0, 3.0, 6.0), "mode": "constant", "diagonal": True, "dtype": np.float32}, np.arange(24).reshape((1, 4, 6)), # data - {'affine': np.array([[-4, 0, 0, 0], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]]), 'interp_order': 0}, - np.array([[[18., 19., 19., 20., 20., 21., 22., 22., 23], [12., 13., 13., 14., 14., 15., 16., 16., 17.], - [6., 7., 7., 8., 8., 9., 10., 10., 11.]]],) + {"affine": np.array([[-4, 0, 0, 0], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]]), "interp_order": 0}, + np.array( + [ + [ + [18.0, 19.0, 19.0, 20.0, 20.0, 21.0, 22.0, 22.0, 23], + [12.0, 13.0, 13.0, 14.0, 14.0, 15.0, 16.0, 16.0, 17.0], + [6.0, 7.0, 7.0, 8.0, 8.0, 9.0, 10.0, 10.0, 11.0], + ] + ], + ), ], [ - {'pixdim': (5., 3., 6.), 'mode': 'constant', 'diagonal': True, 'dtype': np.float32}, + {"pixdim": (5.0, 3.0, 6.0), "mode": "constant", "diagonal": True, "dtype": np.float32}, np.arange(24).reshape((1, 4, 6)), # data - {'affine': np.array([[-4, 0, 0, 0], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]]), 'interp_order': 2}, + {"affine": np.array([[-4, 0, 0, 0], [0, 5, 0, 0], [0, 0, 6, 0], [0, 0, 0, 1]]), "interp_order": 2}, np.array( - [[[18., 18.492683, 19.22439, 19.80683, 20.398048, 21., 21.570732, 22.243902, 22.943415], - [10.392858, 10.88554, 11.617248, 12.199686, 12.790906, 13.392858, 13.963589, 14.63676, 15.336272], - [2.142857, 2.63554, 3.3672473, 3.9496865, 4.540906, 5.142857, 5.7135887, 6.3867598, 7.086272]]],) + [ + [ + [18.0, 18.492683, 19.22439, 19.80683, 20.398048, 21.0, 21.570732, 22.243902, 22.943415], + [10.392858, 10.88554, 11.617248, 12.199686, 12.790906, 13.392858, 13.963589, 14.63676, 15.336272], + [2.142857, 2.63554, 3.3672473, 3.9496865, 4.540906, 5.142857, 5.7135887, 6.3867598, 7.086272], + ] + ], + ), ], ] class TestSpacingCase(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_spacing(self, init_param, img, data_param, expected_output): res = Spacing(**init_param)(img, **data_param) np.testing.assert_allclose(res[0], expected_output, atol=1e-6) - if 'original_affine' in data_param: - np.testing.assert_allclose(res[1], data_param['original_affine']) - np.testing.assert_allclose(init_param['pixdim'], - np.sqrt(np.sum(np.square(res[2]), axis=0))[:len(init_param['pixdim'])]) + if "original_affine" in data_param: + np.testing.assert_allclose(res[1], data_param["original_affine"]) + np.testing.assert_allclose( + init_param["pixdim"], np.sqrt(np.sum(np.square(res[2]), axis=0))[: len(init_param["pixdim"])] + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_spacingd.py b/tests/test_spacingd.py index 88409c1038..441187d481 100644 --- a/tests/test_spacingd.py +++ b/tests/test_spacingd.py @@ -17,51 +17,53 @@ class TestSpacingDCase(unittest.TestCase): - def test_spacingd_3d(self): - data = {'image': np.ones((2, 10, 15, 20)), 'image.affine': np.eye(4)} - spacing = Spacingd(keys='image', pixdim=(1, 2, 1.4)) + data = {"image": np.ones((2, 10, 15, 20)), "image.affine": np.eye(4)} + spacing = Spacingd(keys="image", pixdim=(1, 2, 1.4)) res = spacing(data) - self.assertEqual(('image', 'image.affine'), tuple(sorted(res))) - np.testing.assert_allclose(res['image'].shape, (2, 10, 8, 15)) - np.testing.assert_allclose(res['image.affine'], np.diag([1, 2, 1.4, 1.])) + self.assertEqual(("image", "image.affine"), tuple(sorted(res))) + np.testing.assert_allclose(res["image"].shape, (2, 10, 8, 15)) + np.testing.assert_allclose(res["image.affine"], np.diag([1, 2, 1.4, 1.0])) def test_spacingd_2d(self): - data = {'image': np.ones((2, 10, 20)), 'image.affine': np.eye(3)} - spacing = Spacingd(keys='image', pixdim=(1, 2, 1.4)) + data = {"image": np.ones((2, 10, 20)), "image.affine": np.eye(3)} + spacing = Spacingd(keys="image", pixdim=(1, 2, 1.4)) res = spacing(data) - self.assertEqual(('image', 'image.affine'), tuple(sorted(res))) - np.testing.assert_allclose(res['image'].shape, (2, 10, 10)) - np.testing.assert_allclose(res['image.affine'], np.diag((1, 2, 1))) + self.assertEqual(("image", "image.affine"), tuple(sorted(res))) + np.testing.assert_allclose(res["image"].shape, (2, 10, 10)) + np.testing.assert_allclose(res["image.affine"], np.diag((1, 2, 1))) def test_spacingd_1d(self): - data = {'image': np.arange(20).reshape((2, 10)), 'image.original_affine': np.diag((3, 2, 1, 1))} - data['image.affine'] = data['image.original_affine'] - spacing = Spacingd(keys='image', pixdim=(0.2,)) + data = {"image": np.arange(20).reshape((2, 10)), "image.original_affine": np.diag((3, 2, 1, 1))} + data["image.affine"] = data["image.original_affine"] + spacing = Spacingd(keys="image", pixdim=(0.2,)) res = spacing(data) - self.assertEqual(('image', 'image.affine', 'image.original_affine'), tuple(sorted(res))) - np.testing.assert_allclose(res['image'].shape, (2, 136)) - np.testing.assert_allclose(res['image.affine'], np.diag((0.2, 2, 1, 1))) - np.testing.assert_allclose(res['image.original_affine'], np.diag((3, 2, 1, 1))) + self.assertEqual(("image", "image.affine", "image.original_affine"), tuple(sorted(res))) + np.testing.assert_allclose(res["image"].shape, (2, 136)) + np.testing.assert_allclose(res["image.affine"], np.diag((0.2, 2, 1, 1))) + np.testing.assert_allclose(res["image.original_affine"], np.diag((3, 2, 1, 1))) def test_interp_all(self): - data = {'image': np.arange(20).reshape((2, 10)), 'seg': np.ones((2, 10)), - 'image.affine': np.eye(4), 'seg.affine': np.eye(4)} - spacing = Spacingd(keys=('image', 'seg'), interp_order=0, pixdim=(0.2,)) + data = { + "image": np.arange(20).reshape((2, 10)), + "seg": np.ones((2, 10)), + "image.affine": np.eye(4), + "seg.affine": np.eye(4), + } + spacing = Spacingd(keys=("image", "seg"), interp_order=0, pixdim=(0.2,)) res = spacing(data) - self.assertEqual(('image', 'image.affine', 'seg', 'seg.affine'), tuple(sorted(res))) - np.testing.assert_allclose(res['image'].shape, (2, 46)) - np.testing.assert_allclose(res['image.affine'], np.diag((0.2, 1, 1, 1))) + self.assertEqual(("image", "image.affine", "seg", "seg.affine"), tuple(sorted(res))) + np.testing.assert_allclose(res["image"].shape, (2, 46)) + np.testing.assert_allclose(res["image.affine"], np.diag((0.2, 1, 1, 1))) def test_interp_sep(self): - data = {'image': np.ones((2, 10)), 'seg': np.ones((2, 10)), - 'image.affine': np.eye(4), 'seg.affine': np.eye(4)} - spacing = Spacingd(keys=('image', 'seg'), interp_order=(2, 0), pixdim=(0.2,)) + data = {"image": np.ones((2, 10)), "seg": np.ones((2, 10)), "image.affine": np.eye(4), "seg.affine": np.eye(4)} + spacing = Spacingd(keys=("image", "seg"), interp_order=(2, 0), pixdim=(0.2,)) res = spacing(data) - self.assertEqual(('image', 'image.affine', 'seg', 'seg.affine'), tuple(sorted(res))) - np.testing.assert_allclose(res['image'].shape, (2, 46)) - np.testing.assert_allclose(res['image.affine'], np.diag((0.2, 1, 1, 1))) + self.assertEqual(("image", "image.affine", "seg", "seg.affine"), tuple(sorted(res))) + np.testing.assert_allclose(res["image"].shape, (2, 46)) + np.testing.assert_allclose(res["image.affine"], np.diag((0.2, 1, 1, 1))) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_spatial_crop.py b/tests/test_spatial_crop.py index 7d4eaac586..b967425c5e 100644 --- a/tests/test_spatial_crop.py +++ b/tests/test_spatial_crop.py @@ -15,49 +15,32 @@ from monai.transforms import SpatialCrop TEST_CASE_1 = [ - { - 'roi_center': [1, 1, 1], - 'roi_size': [2, 2, 2] - }, + {"roi_center": [1, 1, 1], "roi_size": [2, 2, 2]}, np.random.randint(0, 2, size=[3, 3, 3, 3]), - (3, 2, 2, 2) + (3, 2, 2, 2), ] -TEST_CASE_2 = [ - { - 'roi_start': [0, 0, 0], - 'roi_end': [2, 2, 2] - }, - np.random.randint(0, 2, size=[3, 3, 3, 3]), - (3, 2, 2, 2) -] +TEST_CASE_2 = [{"roi_start": [0, 0, 0], "roi_end": [2, 2, 2]}, np.random.randint(0, 2, size=[3, 3, 3, 3]), (3, 2, 2, 2)] TEST_CASE_3 = [ - { - 'roi_start': [0, 0], - 'roi_end': [2, 2] - }, + {"roi_start": [0, 0], "roi_end": [2, 2]}, np.random.randint(0, 2, size=[3, 3, 3, 3]), (3, 2, 2, 3), ] TEST_CASE_4 = [ - { - 'roi_start': [0, 0, 0, 0, 0], - 'roi_end': [2, 2, 2, 2, 2] - }, + {"roi_start": [0, 0, 0, 0, 0], "roi_end": [2, 2, 2, 2, 2]}, np.random.randint(0, 2, size=[3, 3, 3, 3]), (3, 2, 2, 2), ] class TestSpatialCrop(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_shape(self, input_param, input_data, expected_shape): result = SpatialCrop(**input_param)(input_data) self.assertTupleEqual(result.shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_spatial_cropd.py b/tests/test_spatial_cropd.py index a622b18429..f8c628a50a 100644 --- a/tests/test_spatial_cropd.py +++ b/tests/test_spatial_cropd.py @@ -15,53 +15,36 @@ from monai.transforms import SpatialCropd TEST_CASE_1 = [ - { - 'keys': ['img'], - 'roi_center': [1, 1, 1], - 'roi_size': [2, 2, 2] - }, - {'img': np.random.randint(0, 2, size=[3, 3, 3, 3])}, - (3, 2, 2, 2) + {"keys": ["img"], "roi_center": [1, 1, 1], "roi_size": [2, 2, 2]}, + {"img": np.random.randint(0, 2, size=[3, 3, 3, 3])}, + (3, 2, 2, 2), ] TEST_CASE_2 = [ - { - 'keys': ['img'], - 'roi_start': [0, 0, 0], - 'roi_end': [2, 2, 2] - }, - {'img': np.random.randint(0, 2, size=[3, 3, 3, 3])}, - (3, 2, 2, 2) + {"keys": ["img"], "roi_start": [0, 0, 0], "roi_end": [2, 2, 2]}, + {"img": np.random.randint(0, 2, size=[3, 3, 3, 3])}, + (3, 2, 2, 2), ] TEST_CASE_3 = [ - { - 'keys': ['img'], - 'roi_start': [0, 0], - 'roi_end': [2, 2] - }, - {'img': np.random.randint(0, 2, size=[3, 3, 3, 3])}, + {"keys": ["img"], "roi_start": [0, 0], "roi_end": [2, 2]}, + {"img": np.random.randint(0, 2, size=[3, 3, 3, 3])}, (3, 2, 2, 3), ] TEST_CASE_4 = [ - { - 'keys': ['img'], - 'roi_start': [0, 0, 0, 0, 0], - 'roi_end': [2, 2, 2, 2, 2] - }, - {'img': np.random.randint(0, 2, size=[3, 3, 3, 3])}, + {"keys": ["img"], "roi_start": [0, 0, 0, 0, 0], "roi_end": [2, 2, 2, 2, 2]}, + {"img": np.random.randint(0, 2, size=[3, 3, 3, 3])}, (3, 2, 2, 2), ] class TestSpatialCropd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_shape(self, input_param, input_data, expected_shape): result = SpatialCropd(**input_param)(input_data) - self.assertTupleEqual(result['img'].shape, expected_shape) + self.assertTupleEqual(result["img"].shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_spatial_pad.py b/tests/test_spatial_pad.py index 78d32f17ee..4ccc309ba6 100644 --- a/tests/test_spatial_pad.py +++ b/tests/test_spatial_pad.py @@ -15,28 +15,19 @@ from monai.transforms import SpatialPad TEST_CASE_1 = [ - { - 'spatial_size': [15, 8, 8], - 'method': 'symmetric', - 'mode': 'constant' - }, + {"spatial_size": [15, 8, 8], "method": "symmetric", "mode": "constant"}, np.zeros((3, 8, 8, 4)), np.zeros((3, 15, 8, 8)), ] TEST_CASE_2 = [ - { - 'spatial_size': [15, 8, 8], - 'method': 'end', - 'mode': 'constant' - }, + {"spatial_size": [15, 8, 8], "method": "end", "mode": "constant"}, np.zeros((3, 8, 8, 4)), np.zeros((3, 15, 8, 8)), ] class TestSpatialPad(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_pad_shape(self, input_param, input_data, expected_val): padder = SpatialPad(**input_param) @@ -44,5 +35,5 @@ def test_pad_shape(self, input_param, input_data, expected_val): self.assertAlmostEqual(result.shape, expected_val.shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_spatial_padd.py b/tests/test_spatial_padd.py index e036fb7ce2..a885e982b5 100644 --- a/tests/test_spatial_padd.py +++ b/tests/test_spatial_padd.py @@ -15,36 +15,25 @@ from monai.transforms import SpatialPadd TEST_CASE_1 = [ - { - 'keys': ['img'], - 'spatial_size': [15, 8, 8], - 'method': 'symmetric', - 'mode': 'constant' - }, - {'img': np.zeros((3, 8, 8, 4))}, + {"keys": ["img"], "spatial_size": [15, 8, 8], "method": "symmetric", "mode": "constant"}, + {"img": np.zeros((3, 8, 8, 4))}, np.zeros((3, 15, 8, 8)), ] TEST_CASE_2 = [ - { - 'keys': ['img'], - 'spatial_size': [15, 8, 8], - 'method': 'end', - 'mode': 'constant' - }, - {'img': np.zeros((3, 8, 8, 4))}, + {"keys": ["img"], "spatial_size": [15, 8, 8], "method": "end", "mode": "constant"}, + {"img": np.zeros((3, 8, 8, 4))}, np.zeros((3, 15, 8, 8)), ] class TestSpatialPadd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_pad_shape(self, input_param, input_data, expected_val): padder = SpatialPadd(**input_param) result = padder(input_data) - self.assertAlmostEqual(result['img'].shape, expected_val.shape) + self.assertAlmostEqual(result["img"].shape, expected_val.shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_squeezedim.py b/tests/test_squeezedim.py index e5dc852dd4..d873c4aa06 100644 --- a/tests/test_squeezedim.py +++ b/tests/test_squeezedim.py @@ -14,53 +14,26 @@ from parameterized import parameterized from monai.transforms import SqueezeDim -TEST_CASE_1 = [ - { - 'dim': None - }, - np.random.rand(1, 2, 1, 3), - (2, 3) -] +TEST_CASE_1 = [{"dim": None}, np.random.rand(1, 2, 1, 3), (2, 3)] -TEST_CASE_2 = [ - { - 'dim': 2 - }, - np.random.rand(1, 2, 1, 8, 16), - (1, 2, 8, 16) -] +TEST_CASE_2 = [{"dim": 2}, np.random.rand(1, 2, 1, 8, 16), (1, 2, 8, 16)] -TEST_CASE_3 = [ - { - 'dim': -1 - }, - np.random.rand(1, 1, 16, 8, 1), - (1, 1, 16, 8) -] +TEST_CASE_3 = [{"dim": -1}, np.random.rand(1, 1, 16, 8, 1), (1, 1, 16, 8)] -TEST_CASE_4 = [ - {}, - np.random.rand(1, 2, 1, 3), - (2, 3) -] +TEST_CASE_4 = [{}, np.random.rand(1, 2, 1, 3), (2, 3)] TEST_CASE_5 = [ - { - 'dim': -2 - }, + {"dim": -2}, np.random.rand(1, 1, 16, 8, 1), ] TEST_CASE_6 = [ - { - 'dim': 0.5 - }, + {"dim": 0.5}, np.random.rand(1, 1, 16, 8, 1), ] class TestSqueezeDim(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_shape(self, input_param, test_data, expected_shape): result = SqueezeDim(**input_param)(test_data) @@ -72,5 +45,5 @@ def test_invalid_inputs(self, input_param, test_data): result = SqueezeDim(**input_param)(test_data) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_squeezedimd.py b/tests/test_squeezedimd.py index d0df400ac0..f73aef5b11 100644 --- a/tests/test_squeezedimd.py +++ b/tests/test_squeezedimd.py @@ -16,82 +16,46 @@ TEST_CASE_1 = [ - { - 'keys': ['img', 'seg'], - 'dim': None - }, - { - 'img': np.random.rand(1, 2, 1, 3), - 'seg': np.random.randint(0, 2, size=[1, 2, 1, 3]), - }, - (2, 3) + {"keys": ["img", "seg"], "dim": None}, + {"img": np.random.rand(1, 2, 1, 3), "seg": np.random.randint(0, 2, size=[1, 2, 1, 3])}, + (2, 3), ] TEST_CASE_2 = [ - { - 'keys': ['img', 'seg'], - 'dim': 2 - }, - { - 'img': np.random.rand(1, 2, 1, 8, 16), - 'seg': np.random.randint(0, 2, size=[1, 2, 1, 8, 16]), - }, - (1, 2, 8, 16) + {"keys": ["img", "seg"], "dim": 2}, + {"img": np.random.rand(1, 2, 1, 8, 16), "seg": np.random.randint(0, 2, size=[1, 2, 1, 8, 16])}, + (1, 2, 8, 16), ] TEST_CASE_3 = [ - { - 'keys': ['img', 'seg'], - 'dim': -1 - }, - { - 'img': np.random.rand(1, 1, 16, 8, 1), - 'seg': np.random.randint(0, 2, size=[1, 1, 16, 8, 1]), - }, - (1, 1, 16, 8) + {"keys": ["img", "seg"], "dim": -1}, + {"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])}, + (1, 1, 16, 8), ] TEST_CASE_4 = [ - { - 'keys': ['img', 'seg'] - }, - { - 'img': np.random.rand(1, 2, 1, 3), - 'seg': np.random.randint(0, 2, size=[1, 2, 1, 3]), - }, - (2, 3) + {"keys": ["img", "seg"]}, + {"img": np.random.rand(1, 2, 1, 3), "seg": np.random.randint(0, 2, size=[1, 2, 1, 3])}, + (2, 3), ] TEST_CASE_5 = [ - { - 'keys': ['img', 'seg'], - 'dim': -2 - }, - { - 'img': np.random.rand(1, 1, 16, 8, 1), - 'seg': np.random.randint(0, 2, size=[1, 1, 16, 8, 1]), - } + {"keys": ["img", "seg"], "dim": -2}, + {"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])}, ] TEST_CASE_6 = [ - { - 'keys': ['img', 'seg'], - 'dim': 0.5 - }, - { - 'img': np.random.rand(1, 1, 16, 8, 1), - 'seg': np.random.randint(0, 2, size=[1, 1, 16, 8, 1]), - } + {"keys": ["img", "seg"], "dim": 0.5}, + {"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])}, ] class TestSqueezeDim(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_shape(self, input_param, test_data, expected_shape): result = SqueezeDimd(**input_param)(test_data) - self.assertTupleEqual(result['img'].shape, expected_shape) - self.assertTupleEqual(result['seg'].shape, expected_shape) + self.assertTupleEqual(result["img"].shape, expected_shape) + self.assertTupleEqual(result["seg"].shape, expected_shape) @parameterized.expand([TEST_CASE_5, TEST_CASE_6]) def test_invalid_inputs(self, input_param, test_data): @@ -99,5 +63,5 @@ def test_invalid_inputs(self, input_param, test_data): result = SqueezeDimd(**input_param)(test_data) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_threshold_intensity.py b/tests/test_threshold_intensity.py index 59f88e73a5..a9fdea4ad9 100644 --- a/tests/test_threshold_intensity.py +++ b/tests/test_threshold_intensity.py @@ -14,36 +14,14 @@ from parameterized import parameterized from monai.transforms import ThresholdIntensity -TEST_CASE_1 = [ - { - 'threshold': 5, - 'above': True, - 'cval': 0 - }, - (0, 0, 0, 0, 0, 0, 6, 7, 8, 9) -] - -TEST_CASE_2 = [ - { - 'threshold': 5, - 'above': False, - 'cval': 0 - }, - (0, 1, 2, 3, 4, 0, 0, 0, 0, 0) -] - -TEST_CASE_3 = [ - { - 'threshold': 5, - 'above': True, - 'cval': 5 - }, - (5, 5, 5, 5, 5, 5, 6, 7, 8, 9) -] +TEST_CASE_1 = [{"threshold": 5, "above": True, "cval": 0}, (0, 0, 0, 0, 0, 0, 6, 7, 8, 9)] +TEST_CASE_2 = [{"threshold": 5, "above": False, "cval": 0}, (0, 1, 2, 3, 4, 0, 0, 0, 0, 0)] + +TEST_CASE_3 = [{"threshold": 5, "above": True, "cval": 5}, (5, 5, 5, 5, 5, 5, 6, 7, 8, 9)] -class TestThresholdIntensity(unittest.TestCase): +class TestThresholdIntensity(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_value(self, input_param, expected_value): test_data = np.arange(10) @@ -51,5 +29,5 @@ def test_value(self, input_param, expected_value): np.testing.assert_allclose(result, expected_value) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_threshold_intensityd.py b/tests/test_threshold_intensityd.py index 3252b66137..b0fffb1ab7 100644 --- a/tests/test_threshold_intensityd.py +++ b/tests/test_threshold_intensityd.py @@ -15,50 +15,30 @@ from monai.transforms import ThresholdIntensityd TEST_CASE_1 = [ - { - 'keys': ['image', 'label', 'extra'], - 'threshold': 5, - 'above': True, - 'cval': 0 - }, - (0, 0, 0, 0, 0, 0, 6, 7, 8, 9) + {"keys": ["image", "label", "extra"], "threshold": 5, "above": True, "cval": 0}, + (0, 0, 0, 0, 0, 0, 6, 7, 8, 9), ] TEST_CASE_2 = [ - { - 'keys': ['image', 'label', 'extra'], - 'threshold': 5, - 'above': False, - 'cval': 0 - }, - (0, 1, 2, 3, 4, 0, 0, 0, 0, 0) + {"keys": ["image", "label", "extra"], "threshold": 5, "above": False, "cval": 0}, + (0, 1, 2, 3, 4, 0, 0, 0, 0, 0), ] TEST_CASE_3 = [ - { - 'keys': ['image', 'label', 'extra'], - 'threshold': 5, - 'above': True, - 'cval': 5 - }, - (5, 5, 5, 5, 5, 5, 6, 7, 8, 9) + {"keys": ["image", "label", "extra"], "threshold": 5, "above": True, "cval": 5}, + (5, 5, 5, 5, 5, 5, 6, 7, 8, 9), ] class TestThresholdIntensityd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_value(self, input_param, expected_value): - test_data = { - 'image': np.arange(10), - 'label': np.arange(10), - 'extra': np.arange(10) - } + test_data = {"image": np.arange(10), "label": np.arange(10), "extra": np.arange(10)} result = ThresholdIntensityd(**input_param)(test_data) - np.testing.assert_allclose(result['image'], expected_value) - np.testing.assert_allclose(result['label'], expected_value) - np.testing.assert_allclose(result['extra'], expected_value) + np.testing.assert_allclose(result["image"], expected_value) + np.testing.assert_allclose(result["label"], expected_value) + np.testing.assert_allclose(result["extra"], expected_value) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_to_onehot.py b/tests/test_to_onehot.py index 7407edbafb..5d09e17b24 100644 --- a/tests/test_to_onehot.py +++ b/tests/test_to_onehot.py @@ -18,25 +18,24 @@ from monai.networks.utils import one_hot TEST_CASE_1 = [ # single channel 2D, batch 3, shape (2, 1, 2, 2) - {'labels': torch.tensor([[[[0, 1], [1, 2]]], [[[2, 1], [1, 0]]]]), 'num_classes': 3}, + {"labels": torch.tensor([[[[0, 1], [1, 2]]], [[[2, 1], [1, 0]]]]), "num_classes": 3}, (2, 3, 2, 2), ] TEST_CASE_2 = [ # single channel 1D, batch 2, shape (2, 1, 4) - {'labels': torch.tensor([[[1, 2, 2, 0]], [[2, 1, 0, 1]]]), 'num_classes': 3}, + {"labels": torch.tensor([[[1, 2, 2, 0]], [[2, 1, 0, 1]]]), "num_classes": 3}, (2, 3, 4), np.array([[[0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 1, 0]], [[0, 0, 1, 0], [0, 1, 0, 1], [1, 0, 0, 0]]]), ] TEST_CASE_3 = [ # single channel 0D, batch 2, shape (2, 1) - {'labels': torch.tensor([[1.], [2.]]), 'num_classes': 3}, + {"labels": torch.tensor([[1.0], [2.0]]), "num_classes": 3}, (2, 3), np.array([[0, 1, 0], [0, 0, 1]]), ] class TestToOneHot(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_shape(self, input_data, expected_shape, expected_result=None): result = one_hot(**input_data) @@ -45,5 +44,5 @@ def test_shape(self, input_data, expected_shape, expected_result=None): self.assertTrue(np.allclose(expected_result, result.numpy())) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_tversky_loss.py b/tests/test_tversky_loss.py index b82a6acf60..3fdefde4d1 100644 --- a/tests/test_tversky_loss.py +++ b/tests/test_tversky_loss.py @@ -17,112 +17,82 @@ from monai.losses import TverskyLoss TEST_CASE_1 = [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) + {"include_background": True, "do_sigmoid": True}, { - 'include_background': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 0.], [1., 1.]]]]), - 'smooth': 1e-6, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), + "smooth": 1e-6, }, 0.307576, ] TEST_CASE_2 = [ # shape: (2, 1, 2, 2), (2, 1, 2, 2) + {"include_background": True, "do_sigmoid": True}, { - 'include_background': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]], [[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 1.], [1., 1.]]], [[[1., 0.], [1., 0.]]]]), - 'smooth': 1e-4, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]], [[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 1.0], [1.0, 1.0]]], [[[1.0, 0.0], [1.0, 0.0]]]]), + "smooth": 1e-4, }, 0.416657, ] TEST_CASE_3 = [ # shape: (2, 2, 3), (2, 1, 3) + {"include_background": False, "to_onehot_y": True}, { - 'include_background': False, - 'to_onehot_y': True, - }, - { - 'pred': torch.tensor([[[1., 1., 0.], [0., 0., 1.]], [[1., 0., 1.], [0., 1., 0.]]]), - 'ground': torch.tensor([[[0., 0., 1.]], [[0., 1., 0.]]]), - 'smooth': 0.0, + "pred": torch.tensor([[[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]]]), + "ground": torch.tensor([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]]]), + "smooth": 0.0, }, 0.0, ] TEST_CASE_4 = [ # shape: (2, 2, 3), (2, 1, 3) + {"include_background": True, "to_onehot_y": True, "do_sigmoid": True}, { - 'include_background': True, - 'to_onehot_y': True, - 'do_sigmoid': True, - }, - { - 'pred': torch.tensor([[[-1., 0., 1.], [1., 0., -1.]], [[0., 0., 0.], [0., 0., 0.]]]), - 'ground': torch.tensor([[[1., 0., 0.]], [[1., 1., 0.]]]), - 'smooth': 1e-4, + "pred": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]), + "ground": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]), + "smooth": 1e-4, }, 0.435050, ] TEST_CASE_5 = [ # shape: (2, 2, 3), (2, 1, 3) + {"include_background": True, "to_onehot_y": True, "do_softmax": True}, { - 'include_background': True, - 'to_onehot_y': True, - 'do_softmax': True, - }, - { - 'pred': torch.tensor([[[-1., 0., 1.], [1., 0., -1.]], [[0., 0., 0.], [0., 0., 0.]]]), - 'ground': torch.tensor([[[1., 0., 0.]], [[1., 1., 0.]]]), - 'smooth': 1e-4, + "pred": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]), + "ground": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]), + "smooth": 1e-4, }, 0.383713, ] TEST_CASE_6 = [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) + {"include_background": True, "do_sigmoid": True, "alpha": 0.3, "beta": 0.7}, { - 'include_background': True, - 'do_sigmoid': True, - 'alpha': 0.3, - 'beta': 0.7, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 0.], [1., 1.]]]]), - 'smooth': 1e-6, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), + "smooth": 1e-6, }, 0.3589, ] TEST_CASE_7 = [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) + {"include_background": True, "do_sigmoid": True, "alpha": 0.7, "beta": 0.3}, { - 'include_background': True, - 'do_sigmoid': True, - 'alpha': 0.7, - 'beta': 0.3, - }, - { - 'pred': torch.tensor([[[[1., -1.], [-1., 1.]]]]), - 'ground': torch.tensor([[[[1., 0.], [1., 1.]]]]), - 'smooth': 1e-6, + "pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), + "ground": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), + "smooth": 1e-6, }, 0.2474, ] - - - class TestTverskyLoss(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) def test_shape(self, input_param, input_data, expected_val): result = TverskyLoss(**input_param).forward(**input_data) self.assertAlmostEqual(result.item(), expected_val, places=4) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/tests/test_unet.py b/tests/test_unet.py index 8ea86a5067..b653b0a784 100644 --- a/tests/test_unet.py +++ b/tests/test_unet.py @@ -20,12 +20,12 @@ TEST_CASE_0 = [ # single channel 2D, batch 16, no residual { - 'dimensions': 2, - 'in_channels': 1, - 'out_channels': 3, - 'channels': (16, 32, 64), - 'strides': (2, 2), - 'num_res_units': 0, + "dimensions": 2, + "in_channels": 1, + "out_channels": 3, + "channels": (16, 32, 64), + "strides": (2, 2), + "num_res_units": 0, }, torch.randn(16, 1, 32, 32), (16, 3, 32, 32), @@ -33,12 +33,12 @@ TEST_CASE_1 = [ # single channel 2D, batch 16 { - 'dimensions': 2, - 'in_channels': 1, - 'out_channels': 3, - 'channels': (16, 32, 64), - 'strides': (2, 2), - 'num_res_units': 1, + "dimensions": 2, + "in_channels": 1, + "out_channels": 3, + "channels": (16, 32, 64), + "strides": (2, 2), + "num_res_units": 1, }, torch.randn(16, 1, 32, 32), (16, 3, 32, 32), @@ -46,12 +46,12 @@ TEST_CASE_2 = [ # single channel 3D, batch 16 { - 'dimensions': 3, - 'in_channels': 1, - 'out_channels': 3, - 'channels': (16, 32, 64), - 'strides': (2, 2), - 'num_res_units': 1, + "dimensions": 3, + "in_channels": 1, + "out_channels": 3, + "channels": (16, 32, 64), + "strides": (2, 2), + "num_res_units": 1, }, torch.randn(16, 1, 32, 24, 48), (16, 3, 32, 24, 48), @@ -59,12 +59,12 @@ TEST_CASE_3 = [ # 4-channel 3D, batch 16 { - 'dimensions': 3, - 'in_channels': 4, - 'out_channels': 3, - 'channels': (16, 32, 64), - 'strides': (2, 2), - 'num_res_units': 1, + "dimensions": 3, + "in_channels": 4, + "out_channels": 3, + "channels": (16, 32, 64), + "strides": (2, 2), + "num_res_units": 1, }, torch.randn(16, 4, 32, 64, 48), (16, 3, 32, 64, 48), @@ -72,13 +72,13 @@ TEST_CASE_4 = [ # 4-channel 3D, batch 16, batch normalisation { - 'dimensions': 3, - 'in_channels': 4, - 'out_channels': 3, - 'channels': (16, 32, 64), - 'strides': (2, 2), - 'num_res_units': 1, - 'norm': Norm.BATCH, + "dimensions": 3, + "in_channels": 4, + "out_channels": 3, + "channels": (16, 32, 64), + "strides": (2, 2), + "num_res_units": 1, + "norm": Norm.BATCH, }, torch.randn(16, 4, 32, 64, 48), (16, 3, 32, 64, 48), @@ -86,13 +86,13 @@ TEST_CASE_5 = [ # 4-channel 3D, batch 16, LeakyReLU activation { - 'dimensions': 3, - 'in_channels': 4, - 'out_channels': 3, - 'channels': (16, 32, 64), - 'strides': (2, 2), - 'num_res_units': 1, - 'act': (Act.LEAKYRELU, {'negative_slope': 0.2}), + "dimensions": 3, + "in_channels": 4, + "out_channels": 3, + "channels": (16, 32, 64), + "strides": (2, 2), + "num_res_units": 1, + "act": (Act.LEAKYRELU, {"negative_slope": 0.2}), }, torch.randn(16, 4, 32, 64, 48), (16, 3, 32, 64, 48), @@ -100,13 +100,13 @@ TEST_CASE_6 = [ # 4-channel 3D, batch 16, LeakyReLU activation explicit { - 'dimensions': 3, - 'in_channels': 4, - 'out_channels': 3, - 'channels': (16, 32, 64), - 'strides': (2, 2), - 'num_res_units': 1, - 'act': (torch.nn.LeakyReLU, {'negative_slope': 0.2}), + "dimensions": 3, + "in_channels": 4, + "out_channels": 3, + "channels": (16, 32, 64), + "strides": (2, 2), + "num_res_units": 1, + "act": (torch.nn.LeakyReLU, {"negative_slope": 0.2}), }, torch.randn(16, 4, 32, 64, 48), (16, 3, 32, 64, 48), @@ -116,7 +116,6 @@ class TestUNET(unittest.TestCase): - @parameterized.expand(CASES) def test_shape(self, input_param, input_data, expected_shape): net = UNet(**input_param) @@ -126,5 +125,5 @@ def test_shape(self, input_param, input_data, expected_shape): self.assertEqual(result.shape, expected_shape) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_zoom.py b/tests/test_zoom.py index cb0af47fef..bbec43afb0 100644 --- a/tests/test_zoom.py +++ b/tests/test_zoom.py @@ -20,40 +20,40 @@ from monai.transforms import Zoom from tests.utils import NumpyImageTestCase2D -VALID_CASES = [(1.1, 3, 'constant', 0, True, False, False), - (0.9, 3, 'constant', 0, True, False, False), - (0.8, 1, 'reflect', 0, False, False, False)] +VALID_CASES = [ + (1.1, 3, "constant", 0, True, False, False), + (0.9, 3, "constant", 0, True, False, False), + (0.8, 1, "reflect", 0, False, False, False), +] -GPU_CASES = [("gpu_zoom", 0.6, 1, 'constant', 0, True)] +GPU_CASES = [("gpu_zoom", 0.6, 1, "constant", 0, True)] -INVALID_CASES = [("no_zoom", None, 1, TypeError), - ("invalid_order", 0.9, 's', AssertionError)] +INVALID_CASES = [("no_zoom", None, 1, TypeError), ("invalid_order", 0.9, "s", AssertionError)] class TestZoom(NumpyImageTestCase2D): - @parameterized.expand(VALID_CASES) def test_correct_results(self, zoom, order, mode, cval, prefilter, use_gpu, keep_size): - zoom_fn = Zoom(zoom=zoom, order=order, mode=mode, cval=cval, - prefilter=prefilter, use_gpu=use_gpu, keep_size=keep_size) + zoom_fn = Zoom( + zoom=zoom, order=order, mode=mode, cval=cval, prefilter=prefilter, use_gpu=use_gpu, keep_size=keep_size + ) zoomed = zoom_fn(self.imt[0]) expected = list() for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=zoom, mode=mode, order=order, - cval=cval, prefilter=prefilter)) + expected.append(zoom_scipy(channel, zoom=zoom, mode=mode, order=order, cval=cval, prefilter=prefilter)) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, zoomed)) @parameterized.expand(GPU_CASES) def test_gpu_zoom(self, _, zoom, order, mode, cval, prefilter): - if importlib.util.find_spec('cupy'): - zoom_fn = Zoom(zoom=zoom, order=order, mode=mode, cval=cval, - prefilter=prefilter, use_gpu=True, keep_size=False) + if importlib.util.find_spec("cupy"): + zoom_fn = Zoom( + zoom=zoom, order=order, mode=mode, cval=cval, prefilter=prefilter, use_gpu=True, keep_size=False + ) zoomed = zoom_fn(self.imt[0]) expected = list() for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=zoom, mode=mode, order=order, - cval=cval, prefilter=prefilter)) + expected.append(zoom_scipy(channel, zoom=zoom, mode=mode, order=order, cval=cval, prefilter=prefilter)) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, zoomed)) @@ -73,5 +73,5 @@ def test_invalid_inputs(self, _, zoom, order, raises): zoomed = zoom_fn(self.imt[0]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_zoom_affine.py b/tests/test_zoom_affine.py index 194abf3ccb..24d1dc262a 100644 --- a/tests/test_zoom_affine.py +++ b/tests/test_zoom_affine.py @@ -19,24 +19,24 @@ VALID_CASES = [ ( - np.array([[2, 1, 4], [-1, -3, 5], [0, 0, 1]], ), + np.array([[2, 1, 4], [-1, -3, 5], [0, 0, 1]],), (10, 20, 30), - np.array([[8.94427191, -8.94427191, 0], [-4.47213595, -17.88854382, 0], [0., 0., 1.]], ), + np.array([[8.94427191, -8.94427191, 0], [-4.47213595, -17.88854382, 0], [0.0, 0.0, 1.0]],), ), ( - np.array([[1, 0, 0, 4], [0, 2, 0, 5], [0, 0, 3, 6], [0, 0, 0, 1]], ), + np.array([[1, 0, 0, 4], [0, 2, 0, 5], [0, 0, 3, 6], [0, 0, 0, 1]],), (10, 20, 30), - np.array([[10, 0, 0, 0], [0, 20, 0, 0], [0, 0, 30, 0], [0, 0, 0, 1]], ), + np.array([[10, 0, 0, 0], [0, 20, 0, 0], [0, 0, 30, 0], [0, 0, 0, 1]],), ), ( - np.array([[1, 0, 0, 4], [0, 2, 0, 5], [0, 0, 3, 6], [0, 0, 0, 1]], ), + np.array([[1, 0, 0, 4], [0, 2, 0, 5], [0, 0, 3, 6], [0, 0, 0, 1]],), (10, 20), - np.array([[10, 0, 0, 0], [0, 20, 0, 0], [0, 0, 3, 0], [0, 0, 0, 1]], ), + np.array([[10, 0, 0, 0], [0, 20, 0, 0], [0, 0, 3, 0], [0, 0, 0, 1]],), ), ( - np.array([[1, 0, 0, 4], [0, 2, 0, 5], [0, 0, 3, 6], [0, 0, 0, 1]], ), + np.array([[1, 0, 0, 4], [0, 2, 0, 5], [0, 0, 3, 6], [0, 0, 0, 1]],), (10,), - np.array([[10, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 1]], ), + np.array([[10, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 1]],), ), ( [[1, 0, 10], [0, 1, 20], [0, 0, 1]] @@ -48,25 +48,24 @@ DIAGONAL_CASES = [ ( - np.array([[-1, 0, 0, 4], [0, 2, 0, 5], [0, 0, 3, 6], [0, 0, 0, 1]], ), + np.array([[-1, 0, 0, 4], [0, 2, 0, 5], [0, 0, 3, 6], [0, 0, 0, 1]],), (10, 20, 30), - np.array([[10, 0, 0, 0], [0, 20, 0, 0], [0, 0, 30, 0], [0, 0, 0, 1]], ), + np.array([[10, 0, 0, 0], [0, 20, 0, 0], [0, 0, 30, 0], [0, 0, 0, 1]],), ), ( - np.array([[2, 1, 4], [-1, -3, 5], [0, 0, 1]], ), + np.array([[2, 1, 4], [-1, -3, 5], [0, 0, 1]],), (10, 20, 30), - np.array([[10, 0, 0], [0, 20, 0], [0., 0., 1.]], ), + np.array([[10, 0, 0], [0, 20, 0], [0.0, 0.0, 1.0]],), ), ( # test default scale from affine - np.array([[2, 1, 4], [-1, -3, 5], [0, 0, 1]], ), - (10, ), - np.array([[10, 0, 0], [0, 3.162278, 0], [0., 0., 1.]], ), + np.array([[2, 1, 4], [-1, -3, 5], [0, 0, 1]],), + (10,), + np.array([[10, 0, 0], [0, 3.162278, 0], [0.0, 0.0, 1.0]],), ), ] class TestZoomAffine(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct(self, affine, scale, expected): output = zoom_affine(affine, scale, diagonal=False) @@ -81,5 +80,5 @@ def test_diagonal(self, affine, scale, expected): np.testing.assert_allclose(output, expected, rtol=1e-6, atol=1e-6) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_zoomd.py b/tests/test_zoomd.py index 6ef85cb1fe..1008c2a3df 100644 --- a/tests/test_zoomd.py +++ b/tests/test_zoomd.py @@ -20,48 +20,47 @@ from monai.transforms import Zoomd from tests.utils import NumpyImageTestCase2D -VALID_CASES = [(1.1, 3, 'constant', 0, True, False, False), - (0.9, 3, 'constant', 0, True, False, False), - (0.8, 1, 'reflect', 0, False, False, False)] +VALID_CASES = [ + (1.1, 3, "constant", 0, True, False, False), + (0.9, 3, "constant", 0, True, False, False), + (0.8, 1, "reflect", 0, False, False, False), +] -GPU_CASES = [("gpu_zoom", 0.6, 1, 'constant', 0, True)] +GPU_CASES = [("gpu_zoom", 0.6, 1, "constant", 0, True)] -INVALID_CASES = [("no_zoom", None, 1, TypeError), - ("invalid_order", 0.9, 's', AssertionError)] +INVALID_CASES = [("no_zoom", None, 1, TypeError), ("invalid_order", 0.9, "s", AssertionError)] class TestZoomd(NumpyImageTestCase2D): - @parameterized.expand(VALID_CASES) def test_correct_results(self, zoom, order, mode, cval, prefilter, use_gpu, keep_size): - key = 'img' - zoom_fn = Zoomd(key, zoom=zoom, order=order, mode=mode, cval=cval, - prefilter=prefilter, use_gpu=use_gpu, keep_size=keep_size) + key = "img" + zoom_fn = Zoomd( + key, zoom=zoom, order=order, mode=mode, cval=cval, prefilter=prefilter, use_gpu=use_gpu, keep_size=keep_size + ) zoomed = zoom_fn({key: self.imt[0]}) expected = list() for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=zoom, mode=mode, order=order, - cval=cval, prefilter=prefilter)) + expected.append(zoom_scipy(channel, zoom=zoom, mode=mode, order=order, cval=cval, prefilter=prefilter)) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, zoomed[key])) - @parameterized.expand(GPU_CASES) def test_gpu_zoom(self, _, zoom, order, mode, cval, prefilter): - key = 'img' - if importlib.util.find_spec('cupy'): - zoom_fn = Zoomd(key, zoom=zoom, order=order, mode=mode, cval=cval, - prefilter=prefilter, use_gpu=True, keep_size=False) + key = "img" + if importlib.util.find_spec("cupy"): + zoom_fn = Zoomd( + key, zoom=zoom, order=order, mode=mode, cval=cval, prefilter=prefilter, use_gpu=True, keep_size=False + ) zoomed = zoom_fn({key: self.imt[0]}) expected = list() for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=zoom, mode=mode, order=order, - cval=cval, prefilter=prefilter)) + expected.append(zoom_scipy(channel, zoom=zoom, mode=mode, order=order, cval=cval, prefilter=prefilter)) expected = np.stack(expected).astype(np.float32) self.assertTrue(np.allclose(expected, zoomed[key])) def test_keep_size(self): - key = 'img' + key = "img" zoom_fn = Zoomd(key, zoom=0.6, keep_size=True) zoomed = zoom_fn({key: self.imt[0]}) self.assertTrue(np.array_equal(zoomed[key].shape, self.imt.shape[1:])) @@ -72,11 +71,11 @@ def test_keep_size(self): @parameterized.expand(INVALID_CASES) def test_invalid_inputs(self, _, zoom, order, raises): - key = 'img' + key = "img" with self.assertRaises(raises): zoom_fn = Zoomd(key, zoom=zoom, order=order) zoomed = zoom_fn({key: self.imt[0]}) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/utils.py b/tests/utils.py index 7795c4c2b4..1c5bc30bd8 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -37,7 +37,7 @@ def make_nifti_image(array, affine=None): affine = np.eye(4) test_image = nib.Nifti1Image(array, affine) - temp_f, image_name = tempfile.mkstemp(suffix='.nii.gz') + temp_f, image_name = tempfile.mkstemp(suffix=".nii.gz") nib.save(test_image, image_name) os.close(temp_f) return image_name @@ -58,7 +58,6 @@ def setUp(self): class TorchImageTestCase2D(NumpyImageTestCase2D): - def setUp(self): NumpyImageTestCase2D.setUp(self) self.imt = torch.tensor(self.imt) diff --git a/versioneer.py b/versioneer.py index 64fea1c892..a1eca36d87 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,4 +1,3 @@ - # Version: 0.18 """The Versioneer - like a rocketeer, but for versions. @@ -277,6 +276,7 @@ """ from __future__ import print_function + try: import configparser except ImportError: @@ -308,11 +308,13 @@ def get_root(): setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") + err = ( + "Versioneer was unable to run the project root directory. " + "Versioneer requires setup.py to be executed from " + "its immediate directory (like 'python setup.py COMMAND'), " + "or in a way that lets it use sys.argv[0] to find the root " + "(like 'python path/to/setup.py COMMAND')." + ) raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools @@ -325,8 +327,7 @@ def get_root(): me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) + print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root @@ -348,6 +349,7 @@ def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None + cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" @@ -372,17 +374,18 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None @@ -390,10 +393,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None) + ) break except EnvironmentError: e = sys.exc_info()[1] @@ -418,7 +420,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, return stdout, p.returncode -LONG_VERSION_PY['git'] = ''' +LONG_VERSION_PY[ + "git" +] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -993,7 +997,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -1002,7 +1006,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -1010,19 +1014,26 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } @register_vcs_handler("git", "pieces_from_vcs") @@ -1037,8 +1048,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -1046,10 +1056,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = run_command( + GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -1072,17 +1081,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag @@ -1091,10 +1099,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix) return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -1105,13 +1112,11 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -1167,16 +1172,19 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) + print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -1205,11 +1213,9 @@ def versions_from_file(filename): contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) + mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: - mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) + mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) @@ -1218,8 +1224,7 @@ def versions_from_file(filename): def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) + contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) @@ -1251,8 +1256,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -1366,11 +1370,13 @@ def render_git_describe_long(pieces): def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } if not style or style == "default": style = "pep440" # the default @@ -1390,9 +1396,13 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } class VersioneerBadRootError(Exception): @@ -1415,8 +1425,7 @@ def get_versions(verbose=False): handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" + assert cfg.versionfile_source is not None, "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) @@ -1470,9 +1479,13 @@ def get_versions(verbose=False): if verbose: print("unable to compute version") - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version", - "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } def get_version(): @@ -1521,6 +1534,7 @@ def run(self): print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) + cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools @@ -1553,14 +1567,15 @@ def run(self): # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) + target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) + cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe + # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ @@ -1581,17 +1596,21 @@ def run(self): os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + cmds["build_exe"] = cmd_build_exe del cmds["build_py"] - if 'py2exe' in sys.modules: # py2exe enabled? + if "py2exe" in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: @@ -1610,13 +1629,17 @@ def run(self): os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments @@ -1643,8 +1666,8 @@ def make_release_tree(self, base_dir, files): # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) + write_to_version_file(target_versionfile, self._versioneer_generated_versions) + cmds["sdist"] = cmd_sdist return cmds @@ -1699,11 +1722,9 @@ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, - configparser.NoOptionError) as e: + except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) + print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) @@ -1712,15 +1733,18 @@ def do_setup(): print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + + ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: @@ -1762,8 +1786,7 @@ def do_setup(): else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) + print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: