diff --git a/.github/workflows/test_and_deploy.yml b/.github/workflows/test_and_deploy.yml
index cdf8dff..d9e3f66 100644
--- a/.github/workflows/test_and_deploy.yml
+++ b/.github/workflows/test_and_deploy.yml
@@ -3,7 +3,7 @@ name: tests
on:
push:
branches:
- - '*'
+ - 'main'
tags:
- '*'
pull_request:
diff --git a/brainglobe_registration/elastix/register.py b/brainglobe_registration/elastix/register.py
index 4a216f9..e208e75 100644
--- a/brainglobe_registration/elastix/register.py
+++ b/brainglobe_registration/elastix/register.py
@@ -1,36 +1,22 @@
-from typing import List, Tuple
+from pathlib import Path
+from typing import List, Optional, Tuple
import itk
import numpy as np
import numpy.typing as npt
-from brainglobe_atlasapi import BrainGlobeAtlas
-
-def get_atlas_by_name(atlas_name: str) -> BrainGlobeAtlas:
- """
- Get a BrainGlobeAtlas object by its name.
-
- Parameters
- ----------
- atlas_name : str
- The name of the atlas.
-
- Returns
- -------
- BrainGlobeAtlas
- The BrainGlobeAtlas object.
- """
- atlas = BrainGlobeAtlas(atlas_name)
-
- return atlas
+from brainglobe_registration.utils.utils import (
+ convert_atlas_labels,
+ restore_atlas_labels,
+)
def run_registration(
- atlas_image,
- moving_image,
- annotation_image,
+ atlas_image: npt.NDArray,
+ moving_image: npt.NDArray,
parameter_lists: List[Tuple[str, dict]],
-) -> Tuple[npt.NDArray, itk.ParameterObject, npt.NDArray]:
+ output_directory: Optional[Path] = None,
+) -> Tuple[npt.NDArray, itk.ParameterObject]:
"""
Run the registration process on the given images.
@@ -40,10 +26,10 @@ def run_registration(
The atlas image.
moving_image : npt.NDArray
The moving image.
- annotation_image : npt.NDArray
- The annotation image.
- parameter_lists : List[tuple[str, dict]], optional
- The list of parameter lists, by default None
+ parameter_lists : List[tuple[str, dict]]
+ The list of registration parameters, one for each transform.
+ output_directory : Optional[Path], optional
+ The output directory for the registration results, by default None
Returns
-------
@@ -51,8 +37,6 @@ def run_registration(
The result image.
itk.ParameterObject
The result transform parameters.
- npt.NDArray
- The transformed annotation image.
"""
# convert to ITK, view only
atlas_image = itk.GetImageViewFromArray(atlas_image).astype(itk.F)
@@ -66,33 +50,204 @@ def run_registration(
parameter_object = setup_parameter_object(parameter_lists=parameter_lists)
elastix_object.SetParameterObject(parameter_object)
-
- # update filter object
elastix_object.UpdateLargestPossibleRegion()
# get results
result_image = elastix_object.GetOutput()
result_transform_parameters = elastix_object.GetTransformParameterObject()
- temp_interp_order = result_transform_parameters.GetParameter(
+
+ if output_directory:
+ file_names = [
+ f"{output_directory}/TransformParameters.{i}.txt"
+ for i in range(len(parameter_lists))
+ ]
+
+ itk.ParameterObject.WriteParameterFile(
+ result_transform_parameters, file_names
+ )
+
+ return (
+ np.asarray(result_image),
+ result_transform_parameters,
+ )
+
+
+def transform_annotation_image(
+ annotation_image: npt.NDArray[np.uint32],
+ transform_parameters: itk.ParameterObject,
+) -> npt.NDArray[np.uint32]:
+ """
+ Transform the annotation image using the given transform parameters.
+ Sets the FinalBSplineInterpolationOrder to 0 to avoid interpolation.
+ Resets the FinalBSplineInterpolationOrder to its original value after
+ transforming the annotation image.
+
+ Parameters
+ ----------
+ annotation_image : npt.NDArray
+ The annotation image.
+ transform_parameters : itk.ParameterObject
+ The transform parameters.
+
+ Returns
+ -------
+ npt.NDArray
+ The transformed annotation image.
+ """
+ adjusted_annotation_image, mapping = convert_atlas_labels(annotation_image)
+
+ annotation_image = itk.GetImageFromArray(adjusted_annotation_image).astype(
+ itk.F
+ )
+ temp_interp_order = transform_parameters.GetParameter(
0, "FinalBSplineInterpolationOrder"
)
- result_transform_parameters.SetParameter(
- "FinalBSplineInterpolationOrder", "0"
+ transform_parameters.SetParameter("FinalBSplineInterpolationOrder", "0")
+
+ transformix_object = itk.TransformixFilter.New(annotation_image)
+ transformix_object.SetTransformParameterObject(transform_parameters)
+ transformix_object.UpdateLargestPossibleRegion()
+
+ transformed_annotation = transformix_object.GetOutput()
+
+ transform_parameters.SetParameter(
+ "FinalBSplineInterpolationOrder", temp_interp_order
+ )
+ transformed_annotation_array = np.asarray(transformed_annotation).astype(
+ np.uint32
)
- annotation_image_transformix = itk.transformix_filter(
- annotation_image.astype(np.float32, copy=False),
- result_transform_parameters,
+ transformed_annotation_array = restore_atlas_labels(
+ transformed_annotation_array, mapping
+ )
+
+ return transformed_annotation_array
+
+
+def transform_image(
+ image: npt.NDArray,
+ transform_parameters: itk.ParameterObject,
+) -> npt.NDArray:
+ """
+ Transform the image using the given transform parameters.
+
+ Parameters
+ ----------
+ image: npt.NDArray
+ The image to transform.
+ transform_parameters: itk.ParameterObject
+ The transform parameters.
+
+ Returns
+ -------
+ npt.NDArray
+ The transformed image.
+ """
+ image = itk.GetImageViewFromArray(image).astype(itk.F)
+
+ transformix_object = itk.TransformixFilter.New(image)
+ transformix_object.SetTransformParameterObject(transform_parameters)
+ transformix_object.UpdateLargestPossibleRegion()
+
+ transformed_image = transformix_object.GetOutput()
+
+ return np.asarray(transformed_image)
+
+
+def calculate_deformation_field(
+ moving_image: npt.NDArray,
+ transform_parameters: itk.ParameterObject,
+ debug: bool = False,
+) -> npt.NDArray:
+ """
+ Calculate the deformation field for the moving image using the given
+ transform parameters.
+
+ Parameters
+ ----------
+ moving_image : npt.NDArray
+ The moving image.
+ transform_parameters : itk.ParameterObject
+ The transform parameters.
+ debug : bool, optional
+ Whether to save extra files for debugging, by default False
+
+ Returns
+ -------
+ npt.NDArray
+ The deformation field.
+ """
+ transformix_object = itk.TransformixFilter.New(
+ itk.GetImageViewFromArray(moving_image).astype(itk.F),
+ transform_parameters,
+ )
+ transformix_object.SetComputeDeformationField(True)
+
+ transformix_object.UpdateLargestPossibleRegion()
+
+ # Change from ITK to numpy axes ordering
+ deformation_field = itk.GetArrayFromImage(
+ transformix_object.GetOutputDeformationField()
+ )[..., ::-1]
+
+ if not debug:
+ # Cleanup files generated by elastix
+ (Path.cwd() / "DeformationField.tiff").unlink(missing_ok=True)
+
+ return deformation_field
+
+
+def invert_transformation(
+ fixed_image: npt.NDArray,
+ parameter_list: List[Tuple[str, dict]],
+ transform_parameters: itk.ParameterObject,
+ output_directory: Optional[Path] = None,
+) -> itk.ParameterObject:
+
+ fixed_image = itk.GetImageFromArray(fixed_image).astype(itk.F)
+
+ elastix_object = itk.ElastixRegistrationMethod.New(
+ fixed_image, fixed_image
)
+ parameter_object_inverse = setup_parameter_object(parameter_list)
+
+ elastix_object.SetInitialTransformParameterObject(transform_parameters)
+
+ elastix_object.SetParameterObject(parameter_object_inverse)
+
+ elastix_object.UpdateLargestPossibleRegion()
+
+ num_initial_transforms = transform_parameters.GetNumberOfParameterMaps()
+
+ result_image = elastix_object.GetOutput()
+ out_parameters = elastix_object.GetTransformParameterObject()
+ result_transform_parameters = itk.ParameterObject.New()
+
+ for i in range(
+ num_initial_transforms, out_parameters.GetNumberOfParameterMaps()
+ ):
+ result_transform_parameters.AddParameterMap(
+ out_parameters.GetParameterMap(i)
+ )
+
result_transform_parameters.SetParameter(
- "FinalBSplineInterpolationOrder", temp_interp_order
+ 0, "InitialTransformParameterFileName", "NoInitialTransform"
)
+ if output_directory:
+ file_names = [
+ f"{output_directory}/InverseTransformParameters.{i}.txt"
+ for i in range(len(parameter_list))
+ ]
+
+ itk.ParameterObject.WriteParameterFiles(
+ result_transform_parameters, file_names
+ )
+
return (
np.asarray(result_image),
result_transform_parameters,
- np.asarray(annotation_image_transformix),
)
diff --git a/brainglobe_registration/parameters/ara_tools/affine.txt b/brainglobe_registration/parameters/ara_tools/affine.txt
index cabe450..b6f281e 100644
--- a/brainglobe_registration/parameters/ara_tools/affine.txt
+++ b/brainglobe_registration/parameters/ara_tools/affine.txt
@@ -4,9 +4,9 @@
//ImageTypes
(FixedInternalImagePixelType "float")
-(FixedImageDimension 3)
+(FixedImageDimension 2)
(MovingInternalImagePixelType "float")
-(MovingImageDimension 3)
+(MovingImageDimension 2)
//Components
(Registration "MultiResolutionRegistration")
diff --git a/brainglobe_registration/parameters/ara_tools/bspline.txt b/brainglobe_registration/parameters/ara_tools/bspline.txt
index e87a225..a7a51f9 100644
--- a/brainglobe_registration/parameters/ara_tools/bspline.txt
+++ b/brainglobe_registration/parameters/ara_tools/bspline.txt
@@ -2,9 +2,9 @@
//ImageTypes
(FixedInternalImagePixelType "float")
-(FixedImageDimension 3)
+(FixedImageDimension 2)
(MovingInternalImagePixelType "float")
-(MovingImageDimension 3)
+(MovingImageDimension 2)
//Components
(Registration "MultiResolutionRegistration")
diff --git a/brainglobe_registration/parameters/elastix_default/affine.txt b/brainglobe_registration/parameters/elastix_default/affine.txt
index 77e2e87..dfd5765 100644
--- a/brainglobe_registration/parameters/elastix_default/affine.txt
+++ b/brainglobe_registration/parameters/elastix_default/affine.txt
@@ -18,7 +18,7 @@
(Registration "MultiResolutionRegistration")
(ResampleInterpolator "FinalBSplineInterpolator")
(Resampler "DefaultResampler")
-(ResultImageFormat "nii")
+(ResultImageFormat "tiff")
(Transform "AffineTransform")
(WriteIterationInfo "false")
(WriteResultImage "true")
diff --git a/brainglobe_registration/parameters/elastix_default/bspline.txt b/brainglobe_registration/parameters/elastix_default/bspline.txt
index c14fa00..16b7736 100644
--- a/brainglobe_registration/parameters/elastix_default/bspline.txt
+++ b/brainglobe_registration/parameters/elastix_default/bspline.txt
@@ -21,7 +21,7 @@
(Registration "MultiMetricMultiResolutionRegistration")
(ResampleInterpolator "FinalBSplineInterpolator")
(Resampler "DefaultResampler")
-(ResultImageFormat "nii")
+(ResultImageFormat "tiff")
(Transform "BSplineTransform")
(WriteIterationInfo "false")
(WriteResultImage "true")
diff --git a/brainglobe_registration/parameters/elastix_default/rigid.txt b/brainglobe_registration/parameters/elastix_default/rigid.txt
deleted file mode 100644
index 80ed2c9..0000000
--- a/brainglobe_registration/parameters/elastix_default/rigid.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-(AutomaticParameterEstimation "true")
-(AutomaticScalesEstimation "true")
-(CheckNumberOfSamples "true")
-(DefaultPixelValue 0)
-(FinalBSplineInterpolationOrder 3)
-(FixedImagePyramid "FixedSmoothingImagePyramid")
-(ImageSampler "RandomCoordinate")
-(Interpolator "LinearInterpolator")
-(MaximumNumberOfIterations 256)
-(MaximumNumberOfSamplingAttempts 8)
-(Metric "AdvancedMattesMutualInformation")
-(MovingImagePyramid "MovingSmoothingImagePyramid")
-(NewSamplesEveryIteration "true")
-(NumberOfResolutions 4)
-(NumberOfSamplesForExactGradient 4096)
-(NumberOfSpatialSamples 2048)
-(Optimizer "AdaptiveStochasticGradientDescent")
-(Registration "MultiResolutionRegistration")
-(ResampleInterpolator "FinalBSplineInterpolator")
-(Resampler "DefaultResampler")
-(ResultImageFormat "nii")
-(Transform "EulerTransform")
-(WriteIterationInfo "false")
-(WriteResultImage "true")
diff --git a/brainglobe_registration/registration_widget.py b/brainglobe_registration/registration_widget.py
index 1d14756..848638b 100644
--- a/brainglobe_registration/registration_widget.py
+++ b/brainglobe_registration/registration_widget.py
@@ -8,6 +8,7 @@
Users can download and add the atlas images/structures as layers to the viewer.
"""
+import json
from pathlib import Path
from typing import Optional
@@ -26,18 +27,31 @@
from napari.utils.notifications import show_error
from napari.viewer import Viewer
from pytransform3d.rotations import active_matrix_from_angle
-from qtpy.QtWidgets import QCheckBox, QPushButton, QScrollArea, QTabWidget
+from qtpy.QtWidgets import (
+ QCheckBox,
+ QFileDialog,
+ QHBoxLayout,
+ QLabel,
+ QLineEdit,
+ QPushButton,
+ QScrollArea,
+ QTabWidget,
+ QWidget,
+)
from skimage.segmentation import find_boundaries
from skimage.transform import rescale
+from tifffile import imwrite
from brainglobe_registration.utils.utils import (
adjust_napari_image_layer,
+ calculate_areas,
calculate_rotated_bounding_box,
check_atlas_installed,
filter_plane,
find_layer_index,
get_image_layer_names,
open_parameter_file,
+ serialize_registration_widget,
)
from brainglobe_registration.widgets.adjust_moving_image_view import (
AdjustMovingImageView,
@@ -54,8 +68,8 @@
class RegistrationWidget(QScrollArea):
def __init__(self, napari_viewer: Viewer):
super().__init__()
- self.widget = CollapsibleWidgetContainer()
- self.widget.setContentsMargins(10, 10, 10, 10)
+ self._widget = CollapsibleWidgetContainer()
+ self._widget.setContentsMargins(10, 10, 10, 10)
self._viewer = napari_viewer
self._atlas: Optional[BrainGlobeAtlas] = None
@@ -63,11 +77,10 @@ def __init__(self, napari_viewer: Viewer):
self._atlas_annotations_layer: Optional[napari.layers.Labels] = None
self._moving_image: Optional[napari.layers.Image] = None
self._moving_image_data_backup: Optional[npt.NDArray] = None
- self._automatic_deletion_flag = False
# Flag to differentiate between manual and automatic atlas deletion
+ self._automatic_deletion_flag = False
self.transform_params: dict[str, dict] = {
- "rigid": {},
"affine": {},
"bspline": {},
}
@@ -77,7 +90,7 @@ def __init__(self, napari_viewer: Viewer):
file_path = (
Path(__file__).parent.resolve()
/ "parameters"
- / "elastix_default"
+ / "ara_tools"
/ f"{transform_type}.txt"
)
@@ -93,6 +106,8 @@ def __init__(self, napari_viewer: Viewer):
self._available_atlases = ["------"] + get_downloaded_atlases()
self._sample_images = get_image_layer_names(self._viewer)
+ self.output_directory: Optional[Path] = None
+
if len(self._sample_images) > 0:
self._moving_image = self._viewer.layers[0]
else:
@@ -143,14 +158,32 @@ def __init__(self, napari_viewer: Viewer):
# Use decorator to connect to layer deletion event
self._connect_events()
+
self.filter_checkbox = QCheckBox("Filter Images")
self.filter_checkbox.setChecked(False)
+ self.output_directory_widget = QWidget()
+ self.output_directory_widget.setLayout(QHBoxLayout())
+
+ self.output_directory_text_field = QLineEdit()
+ self.output_directory_text_field.editingFinished.connect(
+ self._on_output_directory_text_edited
+ )
+ self.output_directory_widget.layout().addWidget(
+ self.output_directory_text_field
+ )
+
+ self.open_file_dialog = QPushButton("Browse")
+ self.open_file_dialog.clicked.connect(
+ self._on_open_file_dialog_clicked
+ )
+ self.output_directory_widget.layout().addWidget(self.open_file_dialog)
+
self.run_button = QPushButton("Run")
self.run_button.clicked.connect(self._on_run_button_click)
self.run_button.setEnabled(False)
- self.widget.add_widget(
+ self._widget.add_widget(
header_widget(
"brainglobe-
registration", # line break at
"Registration with Elastix",
@@ -158,13 +191,13 @@ def __init__(self, napari_viewer: Viewer):
),
collapsible=False,
)
- self.widget.add_widget(
+ self._widget.add_widget(
self.get_atlas_widget, widget_title="Select Images"
)
- self.widget.add_widget(
+ self._widget.add_widget(
self.adjust_moving_image_widget, widget_title="Prepare Images"
)
- self.widget.add_widget(
+ self._widget.add_widget(
self.transform_select_view, widget_title="Select Transformations"
)
@@ -180,20 +213,24 @@ def __init__(self, napari_viewer: Viewer):
self.parameters_tab.addTab(new_tab, transform_type)
self.parameter_setting_tabs_lists.append(new_tab)
- self.widget.add_widget(
+ self._widget.add_widget(
self.parameters_tab, widget_title="Advanced Settings (optional)"
)
- self.widget.add_widget(self.filter_checkbox, collapsible=False)
+ self._widget.add_widget(self.filter_checkbox, collapsible=False)
- self.widget.add_widget(self.run_button, collapsible=False)
+ self._widget.add_widget(QLabel("Output Directory"), collapsible=False)
+ self._widget.add_widget(
+ self.output_directory_widget, collapsible=False
+ )
+ self._widget.add_widget(self.run_button, collapsible=False)
- self.widget.layout().itemAt(1).widget().collapse(animate=False)
+ self._widget.layout().itemAt(1).widget().collapse(animate=False)
check_atlas_installed(self)
self.setWidgetResizable(True)
- self.setWidget(self.widget)
+ self.setWidget(self._widget)
def _connect_events(self):
@self._viewer.layers.events.removed.connect
@@ -327,8 +364,31 @@ def _on_adjust_moving_image_reset_button_click(self):
return
adjust_napari_image_layer(self._moving_image, 0, 0, 0)
+ def _on_output_directory_text_edited(self):
+ self.output_directory = Path(self.output_directory_text_field.text())
+
+ def _on_open_file_dialog_clicked(self) -> None:
+ """
+ Open a file dialog to select the output directory.
+ """
+ output_directory_str = QFileDialog.getExistingDirectory(
+ self, "Select the output directory", str(Path.home())
+ )
+ # A blank string is returned if the user cancels the dialog
+ if not output_directory_str:
+ return
+
+ self.output_directory = Path(output_directory_str)
+ self.output_directory_text_field.setText(str(self.output_directory))
+
def _on_run_button_click(self):
- from brainglobe_registration.elastix.register import run_registration
+ from brainglobe_registration.elastix.register import (
+ calculate_deformation_field,
+ invert_transformation,
+ run_registration,
+ transform_annotation_image,
+ transform_image,
+ )
if self._atlas_data_layer is None:
display_info(
@@ -346,6 +406,15 @@ def _on_run_button_click(self):
)
return
+ if self.output_directory is None:
+ display_info(
+ widget=self,
+ title="Warning",
+ message="Please select an output directory "
+ "before clicking 'Run'.",
+ )
+ return
+
current_atlas_slice = self._viewer.dims.current_step[0]
if self.filter_checkbox.isChecked():
@@ -354,24 +423,55 @@ def _on_run_button_click(self):
current_atlas_slice, :, :
].compute()
)
- moving_image_layer = filter_plane(self._moving_image.data)
+ moving_image = filter_plane(self._moving_image.data)
else:
atlas_layer = self._atlas_data_layer.data[
current_atlas_slice, :, :
]
- moving_image_layer = self._moving_image.data
+ moving_image = self._moving_image.data
- result, parameters, registered_annotation_image = run_registration(
+ result, parameters = run_registration(
+ atlas_layer,
+ moving_image,
+ self.transform_selections,
+ self.output_directory,
+ )
+
+ inverse_result, inverse_parameters = invert_transformation(
atlas_layer,
- moving_image_layer,
- self._atlas_annotations_layer.data[current_atlas_slice, :, :],
self.transform_selections,
+ parameters,
+ self.output_directory,
+ )
+
+ data_in_atlas_space = transform_image(moving_image, inverse_parameters)
+
+ # Creating fresh array is necessary to avoid a crash on Windows
+ # Otherwise self._atlas_annotations_layer.data.dtype.type returns
+ # np.uintc which is not supported by ITK. After creating a new array
+ # annotations.dtype.type returns np.uint32 which is supported by ITK.
+ annotation = np.array(
+ self._atlas_annotations_layer.data[current_atlas_slice, :, :],
+ dtype=np.uint32,
+ )
+
+ registered_annotation_image = transform_annotation_image(
+ annotation,
+ parameters,
+ )
+
+ registered_hemisphere = transform_annotation_image(
+ self._atlas.hemispheres[current_atlas_slice, :, :], parameters
)
boundaries = find_boundaries(
registered_annotation_image, mode="inner"
).astype(np.int8, copy=False)
+ deformation_field = calculate_deformation_field(
+ moving_image, parameters
+ )
+
self._viewer.add_image(result, name="Registered Image", visible=False)
atlas_layer_index = find_layer_index(
@@ -380,7 +480,7 @@ def _on_run_button_click(self):
self._viewer.layers[atlas_layer_index].visible = False
self._viewer.add_labels(
- registered_annotation_image.astype(np.uint32, copy=False),
+ registered_annotation_image,
name="Registered Annotations",
visible=False,
)
@@ -391,9 +491,37 @@ def _on_run_button_click(self):
blending="additive",
opacity=0.8,
)
+ self._viewer.add_image(
+ data_in_atlas_space,
+ name="Inverse Registered Image",
+ visible=False,
+ )
self._viewer.grid.enabled = False
+ self.save_outputs(
+ boundaries,
+ deformation_field,
+ moving_image,
+ data_in_atlas_space,
+ result,
+ registered_annotation_image,
+ registered_hemisphere,
+ )
+
+ areas_path = self.output_directory / "areas.csv"
+ calculate_areas(
+ self._atlas,
+ registered_annotation_image,
+ registered_hemisphere,
+ areas_path,
+ )
+
+ with open(
+ self.output_directory / "brainglobe-registration.json", "w"
+ ) as f:
+ json.dump(self, f, default=serialize_registration_widget, indent=4)
+
def _on_transform_type_added(
self, transform_type: str, transform_order: int
) -> None:
@@ -595,3 +723,75 @@ def _on_atlas_reset(self):
self._atlas_annotations_layer.data = self._atlas.annotation
self._viewer.grid.enabled = False
self._viewer.grid.enabled = True
+
+ def save_outputs(
+ self,
+ boundaries: npt.NDArray,
+ deformation_field: npt.NDArray,
+ downsampled: npt.NDArray,
+ data_in_atlas_space: npt.NDArray,
+ atlas_in_data_space: npt.NDArray,
+ annotation_in_data_space: npt.NDArray,
+ registered_hemisphere: npt.NDArray,
+ ):
+ """
+ Save the outputs of the registration to the output directory.
+
+ The outputs are saved as per
+ https://brainglobe.info/documentation/brainreg/user-guide/output-files.html
+
+ Parameters
+ ----------
+ boundaries: npt.NDArray
+ The area boundaries of the registered annotation image.
+ deformation_field: npt.NDArray
+ The deformation field.
+ downsampled: npt.NDArray
+ The downsampled moving image.
+ data_in_atlas_space: npt.NDArray
+ The moving image in atlas space.
+ atlas_in_data_space: npt.NDArray
+ The atlas in data space.
+ annotation_in_data_space: npt.NDArray
+ The annotation in data space.#
+ registered_hemisphere: npt.NDArray
+ The hemisphere annotation in data space.
+ """
+ assert self._moving_image
+ assert self.output_directory
+
+ imwrite(self.output_directory / "boundaries.tiff", boundaries)
+
+ for i in range(deformation_field.shape[-1]):
+ imwrite(
+ self.output_directory / f"deformation_field_{i}.tiff",
+ deformation_field[:, :, i],
+ )
+
+ imwrite(self.output_directory / "downsampled.tiff", downsampled)
+ imwrite(
+ self.output_directory
+ / f"downsampled_standard_{self._moving_image.name}.tiff",
+ data_in_atlas_space,
+ )
+ imwrite(
+ self.output_directory / "registered_atlas.tiff",
+ annotation_in_data_space,
+ )
+
+ imwrite(
+ self.output_directory / "registered_hemispheres.tiff",
+ registered_hemisphere,
+ )
+
+ def __dict__(self):
+ return {
+ "atlas": self._atlas,
+ "atlas_data_layer": self._atlas_data_layer,
+ "atlas_annotations_layer": self._atlas_annotations_layer,
+ "moving_image": self._moving_image,
+ "adjust_moving_image_widget": self.adjust_moving_image_widget,
+ "transform_selections": self.transform_selections,
+ "filter": self.filter_checkbox.isChecked(),
+ "output_directory": self.output_directory,
+ }
diff --git a/brainglobe_registration/utils/utils.py b/brainglobe_registration/utils/utils.py
index 1f17cfd..aeea6fc 100644
--- a/brainglobe_registration/utils/utils.py
+++ b/brainglobe_registration/utils/utils.py
@@ -1,9 +1,13 @@
-from pathlib import Path
+from collections import Counter
+from pathlib import Path, PurePath
from typing import Dict, List, Tuple
+import dask.array as da
import napari
import numpy as np
import numpy.typing as npt
+import pandas as pd
+from brainglobe_atlasapi import BrainGlobeAtlas
from brainglobe_atlasapi.list_atlases import get_downloaded_atlases
from brainglobe_utils.qtpy.dialog import display_info
from pytransform3d.rotations import active_matrix_from_angle
@@ -254,3 +258,178 @@ def pseudo_flatfield(img_plane, sigma=5):
"""
filtered_img = gaussian_filter(img_plane, sigma)
return img_plane / (filtered_img + 1)
+
+
+def calculate_areas(
+ atlas: BrainGlobeAtlas,
+ annotation_image: npt.NDArray[np.uint32],
+ hemispheres: npt.NDArray,
+ output_path: Path,
+ left_hemisphere_label: int = 2,
+ right_hemisphere_label: int = 1,
+) -> pd.DataFrame:
+ """
+ Calculate the areas of the structures in the annotation image.
+
+ Parameters
+ ----------
+ atlas: BrainGlobeAtlas
+ The atlas object to which the annotation image belongs.
+ annotation_image: npt.NDArray[np.uint32]
+ The annotation image.
+ hemispheres: npt.NDArray
+ The hemisphere labels for each pixel in the annotation image.
+ output_path: Path
+ The path to save the output csv file.
+ left_hemisphere_label: int, optional
+ The label for the left hemisphere.
+ right_hemisphere_label: int, optional
+ The label for the right hemisphere.
+
+ Returns
+ -------
+ pd.DataFrame
+ A DataFrame containing the structure names and areas.
+ """
+ count_left = Counter(
+ annotation_image[hemispheres == left_hemisphere_label].flatten()
+ )
+ count_right = Counter(
+ annotation_image[hemispheres == right_hemisphere_label].flatten()
+ )
+
+ # Remove the background label
+ count_left.pop(0)
+ count_right.pop(0)
+
+ structures_reference_df = atlas.lookup_df
+
+ df = pd.DataFrame(
+ index=structures_reference_df.id,
+ columns=[
+ "structure_name",
+ "left_area_mm2",
+ "right_area_mm2",
+ "total_area_mm2",
+ ],
+ )
+ pixel_area_in_mm2 = atlas.resolution[1] * atlas.resolution[2] / (1000**2)
+
+ for structure_id in count_left.keys():
+ structure_line = structures_reference_df[
+ structures_reference_df["id"] == structure_id
+ ]
+
+ if len(structure_line) == 0:
+ print(
+ f"Value: {structure_id} is not in the atlas structure "
+ f"reference file. Not calculating the area."
+ )
+ continue
+
+ left_area = count_left[structure_id] * pixel_area_in_mm2
+ right_area = count_right[structure_id] * pixel_area_in_mm2
+ total_area = left_area + right_area
+
+ df.loc[structure_id] = {
+ "structure_name": structure_line["name"].values[0],
+ "left_area_mm2": left_area,
+ "right_area_mm2": right_area,
+ "total_area_mm2": total_area,
+ }
+
+ df.dropna(how="all", inplace=True)
+
+ df.to_csv(output_path, index=False)
+
+ return df
+
+
+def convert_atlas_labels(
+ annotation_image: npt.NDArray[np.uint32],
+) -> Tuple[npt.NDArray[np.uint32], Dict[int, int]]:
+ """
+ Adjust the atlas labels such that they can be represented accurately
+ by a single precision float (np.float32).
+
+ This is done by mapping the labels greater than 2**16 to new
+ consecutive values starting from 2**16.
+
+ Slow to run if a large number of unique values are greater than 2**16.
+ Based on current BrainGlobe atlases, this should not be the case.
+
+ Parameters
+ ----------
+ annotation_image: npt.NDArray[np.uint32]
+ The annotation image.
+
+ Returns
+ -------
+ npt.NDArray[np.uint32]
+ The adjusted annotation image.
+ Dict[int, int]
+ A dictionary mapping the original values to the new values.
+ key: original annotation ID
+ value: new annotation ID
+ """
+ # Returns a sorted array of unique values in the annotation image
+ values = np.unique(annotation_image)
+
+ if isinstance(values, da.Array):
+ values = values.compute()
+
+ # Create a mapping of the original values to the new values
+ # and adjust the annotation image
+ mapping = {}
+ new_value = 2**16
+ for value in values:
+ if value > new_value:
+ mapping[value] = new_value
+ annotation_image[annotation_image == value] = new_value
+ new_value += 1
+ elif value == new_value:
+ new_value += 1
+
+ return annotation_image, mapping
+
+
+def restore_atlas_labels(
+ annotation_image: npt.NDArray[np.uint32], mapping: Dict[int, int]
+) -> npt.NDArray[np.uint32]:
+ """
+ Restore the original atlas labels from the adjusted labels based on the
+ provided mapping.
+
+ Parameters
+ ----------
+ annotation_image: npt.NDArray[np.uint32]
+ The adjusted annotation image.
+ mapping: Dict[int, int]
+ A dictionary mapping the original values to the new values.
+ key: original annotation ID
+ value: new annotation ID
+
+ Returns
+ -------
+ npt.NDArray[np.uint32]
+ The restored annotation image.
+ """
+ for old_value, new_value in mapping.items():
+ annotation_image[annotation_image == new_value] = old_value
+
+ return annotation_image
+
+
+def serialize_registration_widget(obj):
+ if isinstance(obj, napari.layers.Layer):
+ return obj.name
+ elif isinstance(obj, napari.Viewer):
+ return str(obj)
+ elif isinstance(obj, PurePath):
+ return str(obj)
+ elif isinstance(obj, BrainGlobeAtlas):
+ return obj.atlas_name
+ elif isinstance(obj, np.ndarray):
+ return f"<{type(obj)}>, {obj.shape}, {obj.dtype}"
+ else:
+ return obj.__dict__()
diff --git a/brainglobe_registration/widgets/adjust_moving_image_view.py b/brainglobe_registration/widgets/adjust_moving_image_view.py
index 111951d..5e5a829 100644
--- a/brainglobe_registration/widgets/adjust_moving_image_view.py
+++ b/brainglobe_registration/widgets/adjust_moving_image_view.py
@@ -196,3 +196,15 @@ def _on_atlas_reset(self):
self.adjust_atlas_roll.setValue(0)
self.reset_atlas_signal.emit()
+
+ def __dict__(self):
+ return {
+ "pixel_size_x": self.adjust_moving_image_pixel_size_x.value(),
+ "pixel_size_y": self.adjust_moving_image_pixel_size_y.value(),
+ "atlas_pitch": self.adjust_atlas_pitch.value(),
+ "atlas_yaw": self.adjust_atlas_yaw.value(),
+ "atlas_roll": self.adjust_atlas_roll.value(),
+ "moving_image_x_offset": self.adjust_moving_image_x.value(),
+ "moving_image_y_offset": self.adjust_moving_image_y.value(),
+ "moving_image_rotation": self.adjust_moving_image_rotate.value(),
+ }
diff --git a/brainglobe_registration/widgets/transform_select_view.py b/brainglobe_registration/widgets/transform_select_view.py
index d141816..58d54c8 100644
--- a/brainglobe_registration/widgets/transform_select_view.py
+++ b/brainglobe_registration/widgets/transform_select_view.py
@@ -58,7 +58,7 @@ def __init__(self, parent=None):
"ara_tools",
"brainregister_IBL",
]
- self.transform_type_options = ["", "rigid", "affine", "bspline"]
+ self.transform_type_options = ["", "affine", "bspline"]
# Create signal mappers for the transform type and file option
# dropdown menus
@@ -82,7 +82,7 @@ def __init__(self, parent=None):
# Add dropdown menus to the table for each transform type option
for i in range(len(self.transform_type_options) - 1):
# Create and configure the transform type dropdown menu
- self.transform_type_selections.append(QComboBox())
+ self.transform_type_selections.append(QComboBox(self))
self.transform_type_selections[i].addItems(
self.transform_type_options
)
@@ -92,9 +92,9 @@ def __init__(self, parent=None):
)
# Create and configure the file option dropdown menu
- self.file_selections.append(QComboBox())
+ self.file_selections.append(QComboBox(self))
self.file_selections[i].addItems(self.file_options)
- self.file_selections[i].setCurrentIndex(0)
+ self.file_selections[i].setCurrentIndex(1)
self.file_selections[i].currentIndexChanged.connect(
self.file_signaller.map
)
@@ -110,7 +110,7 @@ def __init__(self, parent=None):
self.setCellWidget(i, 1, self.file_selections[i])
# Add an extra row to the table for adding new transform types
- self.transform_type_selections.append(QComboBox())
+ self.transform_type_selections.append(QComboBox(self))
self.transform_type_selections[-1].addItems(
self.transform_type_options
)
@@ -123,7 +123,7 @@ def __init__(self, parent=None):
len(self.transform_type_options) - 1,
)
- self.file_selections.append(QComboBox())
+ self.file_selections.append(QComboBox(self))
self.file_selections[-1].addItems(self.file_options)
self.file_selections[-2].setEnabled(True)
self.file_selections[-1].setEnabled(False)
@@ -131,6 +131,7 @@ def __init__(self, parent=None):
self.file_signaller.map
)
+ # Subtract 1 to account for the empty transform type option
self.file_signaller.setMapping(
self.file_selections[-1], len(self.transform_type_options) - 1
)
@@ -143,7 +144,7 @@ def __init__(self, parent=None):
self.setCellWidget(
len(self.transform_type_options) - 1, 1, self.file_selections[-1]
)
- self.file_selections[-1].setEnabled(False)
+
self.resizeRowsToContents()
self.resizeColumnsToContents()
@@ -172,7 +173,7 @@ def _on_transform_type_change(self, index):
current_length = self.rowCount()
self.setRowCount(self.rowCount() + 1)
- self.transform_type_selections.append(QComboBox())
+ self.transform_type_selections.append(QComboBox(self))
self.transform_type_selections[-1].addItems(
self.transform_type_options
)
@@ -183,7 +184,7 @@ def _on_transform_type_change(self, index):
self.transform_type_selections[-1], current_length
)
- self.file_selections.append(QComboBox())
+ self.file_selections.append(QComboBox(self))
self.file_selections[-1].addItems(self.file_options)
self.file_selections[-2].setEnabled(True)
self.file_selections[-1].setEnabled(False)
diff --git a/pyproject.toml b/pyproject.toml
index 0d14999..8fe19ac 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -31,6 +31,7 @@ dependencies = [
"itk-elastix",
"lxml_html_clean",
"numpy",
+ "pandas",
"pytransform3d",
"qtpy",
]
diff --git a/tests/conftest.py b/tests/conftest.py
index df32c05..37ff1d0 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -7,6 +7,8 @@
from brainglobe_atlasapi import config as bg_config
from PIL import Image
+from brainglobe_registration.utils.utils import open_parameter_file
+
@pytest.fixture()
def make_napari_viewer_with_images(make_napari_viewer, pytestconfig):
@@ -22,6 +24,34 @@ def make_napari_viewer_with_images(make_napari_viewer, pytestconfig):
return viewer
+@pytest.fixture(scope="session")
+def parameter_lists():
+ transform_list = []
+ for transform_type in ["affine", "bspline"]:
+ file_path = (
+ Path(__file__).parent.parent.resolve()
+ / "brainglobe_registration"
+ / "parameters"
+ / "ara_tools"
+ / f"{transform_type}.txt"
+ )
+ transform_list.append((transform_type, open_parameter_file(file_path)))
+
+ return transform_list
+
+
+@pytest.fixture(scope="session")
+def parameter_lists_affine_only():
+ file_path = (
+ Path(__file__).parent.parent.resolve()
+ / "brainglobe_registration"
+ / "parameters"
+ / "ara_tools"
+ / "affine.txt"
+ )
+ return [("affine", open_parameter_file(file_path))]
+
+
def pytest_addoption(parser):
parser.addoption(
"--runslow",
@@ -88,6 +118,7 @@ def setup_preexisting_local_atlases():
in the test user data."""
preexisting_atlases = [
("example_mouse_100um", "v1.2"),
+ ("allen_mouse_25um", "v1.2"),
("allen_mouse_100um", "v1.2"),
("osten_mouse_100um", "v1.1"),
]
diff --git a/tests/test_images/InverseTransformParameters.0.txt b/tests/test_images/InverseTransformParameters.0.txt
new file mode 100644
index 0000000..b4767b5
--- /dev/null
+++ b/tests/test_images/InverseTransformParameters.0.txt
@@ -0,0 +1,23 @@
+(CenterOfRotationPoint 243.30832890239543 160.94442716213672)
+(CompressResultImage "false")
+(DefaultPixelValue 0)
+(Direction 1 0 0 1)
+(FinalBSplineInterpolationOrder 3)
+(FixedImageDimension 2)
+(FixedInternalImagePixelType "float")
+(HowToCombineTransforms "Compose")
+(Index 0 0)
+(InitialTransformParameterFileName "NoInitialTransform")
+(MovingImageDimension 2)
+(MovingInternalImagePixelType "float")
+(NumberOfParameters 6)
+(Origin 0 0)
+(ResampleInterpolator "FinalBSplineInterpolator")
+(Resampler "DefaultResampler")
+(ResultImageFormat "tiff")
+(ResultImagePixelType "float")
+(Size 456 320)
+(Spacing 1 1)
+(Transform "AffineTransform")
+(TransformParameters 1.0012288470663753 -0.05182565273355364 0.05066241734530797 1.004940816163661 -15.806903227778218 -1.4462426953553795)
+(UseDirectionCosines "true")
diff --git a/tests/test_images/TransformParameters.0.txt b/tests/test_images/TransformParameters.0.txt
new file mode 100644
index 0000000..7134e8a
--- /dev/null
+++ b/tests/test_images/TransformParameters.0.txt
@@ -0,0 +1,23 @@
+(CenterOfRotationPoint 213.5 158.5)
+(CompressResultImage "false")
+(DefaultPixelValue 0)
+(Direction 1 0 0 1)
+(FinalBSplineInterpolationOrder 3)
+(FixedImageDimension 2)
+(FixedInternalImagePixelType "float")
+(HowToCombineTransforms "Compose")
+(Index 0 0)
+(InitialTransformParameterFileName "NoInitialTransform")
+(MovingImageDimension 2)
+(MovingInternalImagePixelType "float")
+(NumberOfParameters 6)
+(Origin 0 0)
+(ResampleInterpolator "FinalBSplineInterpolator")
+(Resampler "DefaultResampler")
+(ResultImageFormat "tiff")
+(ResultImagePixelType "float")
+(Size 428 318)
+(Spacing 1 1)
+(Transform "AffineTransform")
+(TransformParameters 0.9961694613937621 0.05137285130530663 -0.05021136724642646 0.9924908378555806 15.810583591577414 2.1548954657311192)
+(UseDirectionCosines "true")
diff --git a/tests/test_images/areas.csv b/tests/test_images/areas.csv
new file mode 100644
index 0000000..d3b98ef
--- /dev/null
+++ b/tests/test_images/areas.csv
@@ -0,0 +1,172 @@
+structure_name,left_area_mm2,right_area_mm2,total_area_mm2
+root,0.318125,0.310625,0.6287499999999999
+"Primary somatosensory area, barrel field, layer 1",0.146875,0.14875,0.295625
+"Primary somatosensory area, barrel field, layer 2/3",0.293125,0.289375,0.5825
+"Primary somatosensory area, barrel field, layer 4",0.275,0.276875,0.551875
+"Primary somatosensory area, barrel field, layer 5",0.2725,0.27,0.5425
+"Primary somatosensory area, barrel field, layer 6a",0.283125,0.285625,0.5687500000000001
+"Primary somatosensory area, barrel field, layer 6b",0.050625,0.046875,0.0975
+"Supplemental somatosensory area, layer 1",0.08,0.08125,0.16125
+"Supplemental somatosensory area, layer 2/3",0.171875,0.17500000000000002,0.34687500000000004
+"Supplemental somatosensory area, layer 4",0.1075,0.10625,0.21375
+"Supplemental somatosensory area, layer 5",0.17375000000000002,0.17125,0.34500000000000003
+"Supplemental somatosensory area, layer 6a",0.145,0.1475,0.2925
+"Supplemental somatosensory area, layer 6b",0.0225,0.023125,0.045625
+"Dorsal auditory area, layer 1",0.090625,0.088125,0.17875
+"Dorsal auditory area, layer 2/3",0.17,0.16875,0.33875
+"Dorsal auditory area, layer 4",0.075,0.08125,0.15625
+"Dorsal auditory area, layer 5",0.228125,0.22875,0.45687500000000003
+"Dorsal auditory area, layer 6a",0.12687500000000002,0.12625,0.25312500000000004
+"Dorsal auditory area, layer 6b",0.015,0.015,0.03
+"Primary auditory area, layer 1",0.09125,0.088125,0.179375
+"Primary auditory area, layer 2/3",0.1125,0.11625,0.22875
+"Primary auditory area, layer 4",0.034375,0.033125,0.0675
+"Primary auditory area, layer 5",0.011875,0.01125,0.023125
+"Ventral auditory area, layer 1",0.07625,0.075625,0.15187499999999998
+"Ventral auditory area, layer 2/3",0.134375,0.13375,0.268125
+"Ventral auditory area, layer 4",0.08625000000000001,0.08625000000000001,0.17250000000000001
+"Ventral auditory area, layer 5",0.26625,0.268125,0.534375
+"Ventral auditory area, layer 6a",0.165,0.1625,0.3275
+"Ventral auditory area, layer 6b",0.0175,0.020625,0.038125000000000006
+"Anteromedial visual area, layer 1",0.048125,0.04875,0.096875
+"Anteromedial visual area, layer 2/3",0.07375,0.0725,0.14625
+"Anteromedial visual area, layer 4",0.018125,0.018125,0.03625
+"Anteromedial visual area, layer 5",0.075625,0.073125,0.14875
+"Anteromedial visual area, layer 6a",0.04,0.040625,0.080625
+"Anteromedial visual area, layer 6b",0.006875,0.00625,0.013125000000000001
+"Retrosplenial area, lateral agranular part, layer 1",0.05,0.048125,0.098125
+"Retrosplenial area, lateral agranular part, layer 2/3",0.06875,0.0725,0.14125
+"Retrosplenial area, lateral agranular part, layer 5",0.091875,0.094375,0.18625
+"Retrosplenial area, lateral agranular part, layer 6a",0.06375,0.064375,0.128125
+"Retrosplenial area, lateral agranular part, layer 6b",0.00875,0.0075,0.01625
+"Retrosplenial area, dorsal part, layer 1",0.099375,0.099375,0.19875
+"Retrosplenial area, dorsal part, layer 2/3",0.14250000000000002,0.13875,0.28125
+"Retrosplenial area, dorsal part, layer 5",0.166875,0.163125,0.32999999999999996
+"Retrosplenial area, dorsal part, layer 6a",0.125,0.124375,0.249375
+"Retrosplenial area, dorsal part, layer 6b",0.008125,0.009375,0.0175
+"Retrosplenial area, ventral part, layer 1",0.11875000000000001,0.08875,0.20750000000000002
+"Retrosplenial area, ventral part, layer 2/3",0.136875,0.138125,0.275
+"Retrosplenial area, ventral part, layer 5",0.210625,0.211875,0.4225
+"Retrosplenial area, ventral part, layer 6a",0.0675,0.06875,0.13625
+"Retrosplenial area, ventral part, layer 6b",0.01,0.009375,0.019375
+"Anterior area, layer 1",0.14375000000000002,0.145,0.28875
+"Anterior area, layer 2/3",0.26125,0.261875,0.5231250000000001
+"Anterior area, layer 4",0.10875,0.10250000000000001,0.21125
+"Anterior area, layer 5",0.249375,0.255,0.504375
+"Anterior area, layer 6a",0.184375,0.1825,0.366875
+"Anterior area, layer 6b",0.03625,0.03875,0.075
+"Rostrolateral area, layer 1",0.038125,0.04,0.078125
+"Rostrolateral area, layer 2/3",0.059375000000000004,0.058750000000000004,0.11812500000000001
+"Rostrolateral area, layer 4",0.0275,0.02875,0.05625
+"Rostrolateral area, layer 5",0.028125,0.02875,0.056875
+"Rostrolateral area, layer 6a",0.013125,0.0125,0.025625000000000002
+"Rostrolateral area, layer 6b",0.00125,0.000625,0.001875
+"Temporal association areas, layer 1",0.0975,0.09375,0.19125
+"Temporal association areas, layer 2/3",0.13625,0.13625,0.2725
+"Temporal association areas, layer 4",0.056875,0.0575,0.114375
+"Temporal association areas, layer 5",0.208125,0.210625,0.41875
+"Temporal association areas, layer 6a",0.165,0.165,0.33
+"Temporal association areas, layer 6b",0.018125,0.015625,0.03375
+"Perirhinal area, layer 1",0.05375,0.053125,0.106875
+"Perirhinal area, layer 2/3",0.080625,0.08125,0.161875
+"Perirhinal area, layer 5",0.045,0.044375,0.089375
+"Perirhinal area, layer 6a",0.004375,0.003125,0.007500000000000001
+Ectorhinal area/Layer 1,0.065625,0.06,0.125625
+Ectorhinal area/Layer 2/3,0.113125,0.11750000000000001,0.23062500000000002
+Ectorhinal area/Layer 5,0.16375,0.165625,0.329375
+Ectorhinal area/Layer 6a,0.12125,0.124375,0.24562499999999998
+Ectorhinal area/Layer 6b,0.0125,0.0125,0.025
+Olfactory areas,0.0075,0.0075,0.015
+Piriform area,1.081875,1.080625,2.1624999999999996
+"Cortical amygdalar area, posterior part, lateral zone",0.361875,0.36125,0.723125
+"Cortical amygdalar area, posterior part, medial zone",0.289375,0.29125,0.580625
+Piriform-amygdalar area,0.2175,0.21625,0.43374999999999997
+Hippocampal formation,0.0025,0.003125,0.005625
+Field CA1,1.570625,1.57125,3.1418749999999998
+Field CA2,0.05375,0.05375,0.1075
+Field CA3,0.688125,0.68625,1.3743750000000001
+"Dentate gyrus, molecular layer",0.62625,0.6218750000000001,1.248125
+"Dentate gyrus, polymorph layer",0.104375,0.10250000000000001,0.206875
+"Dentate gyrus, granule cell layer",0.24125,0.245625,0.486875
+Fasciola cinerea,0.02875,0.029375000000000002,0.058125
+"Entorhinal area, lateral part, layer 1",0.0475,0.0475,0.095
+"Entorhinal area, lateral part, layer 2",0.058750000000000004,0.056875,0.115625
+"Entorhinal area, lateral part, layer 3",0.0675,0.069375,0.13687500000000002
+"Entorhinal area, lateral part, layer 5",0.075625,0.075,0.150625
+"Entorhinal area, lateral part, layer 6a",0.015625,0.011875,0.0275
+Cortical subplate,0.028125,0.033125,0.06125
+"Endopiriform nucleus, dorsal part",0.18625,0.18875,0.375
+"Endopiriform nucleus, ventral part",0.11750000000000001,0.114375,0.231875
+Lateral amygdalar nucleus,0.5,0.500625,1.0006249999999999
+"Basolateral amygdalar nucleus, anterior part",0.171875,0.174375,0.34625
+"Basolateral amygdalar nucleus, posterior part",0.244375,0.24625,0.490625
+"Basolateral amygdalar nucleus, ventral part",0.2525,0.2575,0.51
+"Basomedial amygdalar nucleus, posterior part",0.375,0.37375,0.74875
+Posterior amygdalar nucleus,0.18875,0.19,0.37875000000000003
+Striatum,0.2475,0.2475,0.495
+Caudoputamen,0.82375,0.820625,1.6443750000000001
+"Central amygdalar nucleus, capsular part",0.043750000000000004,0.043750000000000004,0.08750000000000001
+"Central amygdalar nucleus, lateral part",0.045,0.041875,0.08687500000000001
+"Central amygdalar nucleus, medial part",0.079375,0.08125,0.16062500000000002
+Intercalated amygdalar nucleus,0.040625,0.040625,0.08125
+Medial amygdalar nucleus,0.7987500000000001,0.798125,1.596875
+Thalamus,0.21625,0.2,0.41625
+Ventral medial nucleus of the thalamus,0.32375,0.324375,0.6481250000000001
+Ventral posterolateral nucleus of the thalamus,0.2725,0.27375,0.54625
+"Ventral posterolateral nucleus of the thalamus, parvicellular part",0.090625,0.0925,0.18312499999999998
+Ventral posteromedial nucleus of the thalamus,0.72375,0.7225,1.44625
+"Ventral posteromedial nucleus of the thalamus, parvicellular part",0.30875,0.30625,0.615
+"Subparafascicular nucleus, magnocellular part",0.053125,0.051250000000000004,0.104375
+"Dorsal part of the lateral geniculate complex, shell",0.068125,0.065625,0.13375
+"Dorsal part of the lateral geniculate complex, core",0.2425,0.24125,0.48375
+"Dorsal part of the lateral geniculate complex, ipsilateral zone",0.043750000000000004,0.043750000000000004,0.08750000000000001
+Lateral posterior nucleus of the thalamus,0.65625,0.6587500000000001,1.315
+Posterior complex of the thalamus,0.64875,0.65,1.29875
+Ethmoid nucleus of the thalamus,0.106875,0.10187500000000001,0.20875
+Intermediodorsal nucleus of the thalamus,0.153125,0.13,0.283125
+Mediodorsal nucleus of thalamus,0.4275,0.4225,0.85
+Paraventricular nucleus of the thalamus,0.181875,0.15812500000000002,0.34
+Central medial nucleus of the thalamus,0.03125,0.02625,0.057499999999999996
+Central lateral nucleus of the thalamus,0.185625,0.18937500000000002,0.375
+Parafascicular nucleus,0.17125,0.17375000000000002,0.34500000000000003
+Reticular nucleus of the thalamus,0.155625,0.1525,0.308125
+Ventral part of the lateral geniculate complex,0.1475,0.14625,0.29374999999999996
+Medial habenula,0.13375,0.13625,0.27
+Lateral habenula,0.194375,0.194375,0.38875
+Hypothalamus,0.585,0.573125,1.158125
+"Periventricular hypothalamic nucleus, intermediate part",0.073125,0.07375,0.14687499999999998
+Arcuate hypothalamic nucleus,0.165,0.165,0.33
+Dorsomedial nucleus of the hypothalamus,0.163125,0.168125,0.33125
+Ventral premammillary nucleus,0.04875,0.04875,0.0975
+Posterior hypothalamic nucleus,0.58,0.581875,1.161875
+Lateral hypothalamic area,0.451875,0.453125,0.905
+Parasubthalamic nucleus,0.135625,0.135625,0.27125
+Subthalamic nucleus,0.170625,0.174375,0.345
+Tuberal nucleus,0.19062500000000002,0.19062500000000002,0.38125000000000003
+Zona incerta,0.50375,0.50375,1.0075
+Median eminence,0.059375000000000004,0.05,0.109375
+Precommissural nucleus,0.005,0.003125,0.008125
+fiber tracts,0.631875,0.631875,1.26375
+optic tract,0.18,0.185,0.365
+supra-callosal cerebral white matter,0.040625,0.040625,0.08125
+external capsule,0.02625,0.025625000000000002,0.051875000000000004
+"corpus callosum, extreme capsule",0.033125,0.035,0.068125
+"corpus callosum, splenium",0.6012500000000001,0.593125,1.194375
+internal capsule,0.413125,0.420625,0.83375
+external medullary lamina of the thalamus,0.003125,0.005625,0.00875
+optic radiation,0.5825,0.5775,1.1600000000000001
+auditory radiation,0.125,0.12875,0.25375000000000003
+nigrostriatal tract,0.04625,0.046875,0.093125
+amygdalar capsule,0.06125,0.0525,0.11374999999999999
+cingulum bundle,0.1625,0.1625,0.325
+alveus,0.2175,0.216875,0.434375
+fimbria,0.22375,0.224375,0.448125
+columns of the fornix,0.02125,0.0225,0.04375
+dorsal hippocampal commissure,0.14625,0.138125,0.284375
+stria terminalis,0.19625,0.18937500000000002,0.385625
+mammillothalamic tract,0.0525,0.051250000000000004,0.10375000000000001
+fasciculus retroflexus,0.07375,0.075,0.14875
+habenular commissure,0.02,0.019375,0.039375
+lateral ventricle,0.19375,0.19187500000000002,0.385625
+choroid plexus,0.0775,0.068125,0.145625
+third ventricle,0.20500000000000002,0.080625,0.285625
diff --git a/tests/test_images/boundaries.tiff b/tests/test_images/boundaries.tiff
new file mode 100644
index 0000000..41f93a1
Binary files /dev/null and b/tests/test_images/boundaries.tiff differ
diff --git a/tests/test_images/deformation_field_0.tiff b/tests/test_images/deformation_field_0.tiff
new file mode 100644
index 0000000..b231365
Binary files /dev/null and b/tests/test_images/deformation_field_0.tiff differ
diff --git a/tests/test_images/deformation_field_1.tiff b/tests/test_images/deformation_field_1.tiff
new file mode 100644
index 0000000..bfc9b59
Binary files /dev/null and b/tests/test_images/deformation_field_1.tiff differ
diff --git a/tests/test_images/downsampled.tiff b/tests/test_images/downsampled.tiff
new file mode 100644
index 0000000..957997c
Binary files /dev/null and b/tests/test_images/downsampled.tiff differ
diff --git a/tests/test_images/inverted_reference.tiff b/tests/test_images/inverted_reference.tiff
new file mode 100644
index 0000000..2562eb8
Binary files /dev/null and b/tests/test_images/inverted_reference.tiff differ
diff --git a/tests/test_images/registered_atlas.tiff b/tests/test_images/registered_atlas.tiff
new file mode 100644
index 0000000..4c6db57
Binary files /dev/null and b/tests/test_images/registered_atlas.tiff differ
diff --git a/tests/test_images/registered_hemispheres.tiff b/tests/test_images/registered_hemispheres.tiff
new file mode 100644
index 0000000..a63105b
Binary files /dev/null and b/tests/test_images/registered_hemispheres.tiff differ
diff --git a/tests/test_images/registered_reference.tiff b/tests/test_images/registered_reference.tiff
new file mode 100644
index 0000000..d1afa27
Binary files /dev/null and b/tests/test_images/registered_reference.tiff differ
diff --git a/tests/test_images/registered_sample.tiff b/tests/test_images/registered_sample.tiff
new file mode 100644
index 0000000..2a9d4fb
Binary files /dev/null and b/tests/test_images/registered_sample.tiff differ
diff --git a/tests/test_register.py b/tests/test_register.py
index ff32d71..269e223 100644
--- a/tests/test_register.py
+++ b/tests/test_register.py
@@ -1,30 +1,183 @@
+from pathlib import Path
+
+import itk
+import numpy as np
import pytest
-from PIL import Image
+from brainglobe_atlasapi import BrainGlobeAtlas
+from tifffile import imread
from brainglobe_registration.elastix.register import (
+ calculate_deformation_field,
+ invert_transformation,
run_registration,
setup_parameter_object,
+ transform_annotation_image,
+ transform_image,
)
+SLICE_NUMBER = 293
+
+
+def compare_parameter_objects(param_obj1, param_obj2):
+ assert (
+ param_obj1.GetNumberOfParameterMaps()
+ == param_obj2.GetNumberOfParameterMaps()
+ )
+
+ for index in range(param_obj1.GetNumberOfParameterMaps()):
+ submap_1 = dict(param_obj1.GetParameterMap(index))
+ submap_2 = dict(param_obj2.GetParameterMap(index))
+
+ for key in submap_1.keys():
+ if key in [
+ "TransformParameters",
+ "CenterOfRotationPoint",
+ "GridOrigin",
+ "GridSpacing",
+ ]:
+ assert np.allclose(
+ np.array(submap_1[key], dtype=np.double),
+ np.array(submap_2[key], dtype=np.double),
+ )
+ else:
+ assert submap_1[key] == submap_2[key]
+
+
+@pytest.fixture(scope="module")
+def atlas(atlas_name="allen_mouse_25um"):
+ return BrainGlobeAtlas(atlas_name)
+
+
+@pytest.fixture(scope="module")
+def atlas_reference(atlas, slice_number=SLICE_NUMBER):
+ return atlas.reference[slice_number, :, :]
+
+
+@pytest.fixture(scope="module")
+def atlas_annotation(atlas, slice_number=SLICE_NUMBER):
+ # Need the astype call to avoid a crash on Windows
+ return atlas.annotation[slice_number, :, :].astype(np.uint32)
+
-@pytest.fixture
-def sample_atlas_slice():
- return Image.open("test_images/Atlas_Hipp.tif")
+@pytest.fixture(scope="module")
+def atlas_hemisphere(atlas, slice_number=SLICE_NUMBER):
+ return atlas.hemispheres[slice_number, :, :]
-@pytest.fixture
+@pytest.fixture(scope="module")
def sample_moving_image():
- return Image.open("test_images/sample_hipp.tif")
+ return imread(Path(__file__).parent / "test_images/sample_hipp.tif")
-@pytest.mark.slow
-def test_run_registration(sample_atlas_slice, sample_moving_image):
- result_image, transform_parameters = run_registration(
- sample_atlas_slice,
+@pytest.fixture(scope="module")
+def registration_affine_only(
+ atlas_reference, sample_moving_image, parameter_lists_affine_only
+):
+ yield run_registration(
+ atlas_reference,
sample_moving_image,
+ parameter_lists_affine_only,
+ )
+
+
+@pytest.fixture(scope="module")
+def invert_transform(
+ registration_affine_only, atlas_reference, parameter_lists_affine_only
+):
+ result_image, transform_parameters = registration_affine_only
+ inverted_image, invert_parameters = invert_transformation(
+ atlas_reference, parameter_lists_affine_only, transform_parameters
+ )
+
+ yield inverted_image, invert_parameters, transform_parameters
+
+
+def test_run_registration(registration_affine_only):
+ result_image, transform_parameters = registration_affine_only
+
+ expected_result_image = imread(
+ Path(__file__).parent / "test_images/registered_reference.tiff"
+ )
+ expected_parameter_object = itk.ParameterObject.New()
+ expected_parameter_object.AddParameterFile(
+ str(Path(__file__).parent / "test_images/TransformParameters.0.txt")
+ )
+
+ assert np.allclose(result_image, expected_result_image, atol=0.1)
+ compare_parameter_objects(transform_parameters, expected_parameter_object)
+
+
+def test_transform_annotation_image(
+ atlas_annotation, registration_affine_only
+):
+ result_image, transform_parameters = registration_affine_only
+
+ transformed_annotation = transform_annotation_image(
+ atlas_annotation, transform_parameters
+ )
+
+ expected_transformed_annotation = imread(
+ Path(__file__).parent / "test_images/registered_atlas.tiff"
+ )
+
+ assert np.allclose(transformed_annotation, expected_transformed_annotation)
+
+
+def test_invert_transformation(invert_transform):
+ invert_image, invert_parameters, original_parameters = invert_transform
+
+ expected_image = imread(
+ Path(__file__).parent / "test_images/inverted_reference.tiff"
+ )
+ expected_parameter_object = itk.ParameterObject.New()
+ expected_parameter_object.AddParameterFile(
+ str(
+ Path(__file__).parent
+ / "test_images/InverseTransformParameters.0.txt"
+ )
+ )
+
+ assert np.allclose(invert_image, expected_image, atol=0.1)
+ compare_parameter_objects(invert_parameters, expected_parameter_object)
+
+ for i in range(original_parameters.GetNumberOfParameterMaps()):
+ assert original_parameters.GetParameter(
+ i, "FinalBSplineInterpolationOrder"
+ ) == ("3",)
+
+
+def test_transform_image(invert_transform, sample_moving_image):
+ invert_image, invert_parameters, _ = invert_transform
+
+ transformed_image = transform_image(sample_moving_image, invert_parameters)
+
+ expected_image = imread(
+ Path(__file__).parent / "test_images/registered_sample.tiff"
)
- assert result_image is not None
- assert transform_parameters is not None
+
+ assert np.allclose(transformed_image, expected_image, atol=0.1)
+
+
+def test_calculate_deformation_field(
+ sample_moving_image, registration_affine_only
+):
+ result_image, transform_parameters = registration_affine_only
+
+ deformation_field = calculate_deformation_field(
+ sample_moving_image, transform_parameters
+ )
+
+ deformation_field_0 = imread(
+ Path(__file__).parent / "test_images/deformation_field_0.tiff"
+ )
+ deformation_field_1 = imread(
+ Path(__file__).parent / "test_images/deformation_field_1.tiff"
+ )
+ expected_deformation_field = np.stack(
+ (deformation_field_0, deformation_field_1), axis=-1
+ )
+
+ assert np.allclose(deformation_field, expected_deformation_field, atol=0.1)
def test_setup_parameter_object_empty_list():
diff --git a/tests/test_registration_widget.py b/tests/test_registration_widget.py
index e7309d9..8d0ceb1 100644
--- a/tests/test_registration_widget.py
+++ b/tests/test_registration_widget.py
@@ -29,7 +29,12 @@ def registration_widget_with_example_atlas(make_napari_viewer_with_images):
# Based on the downloaded atlases by the fixture in conftest.py,
# the example atlas will be in the third position.
- example_atlas_index = 2
+ example_atlas_index = -1
+ for i, atlas in enumerate(widget._available_atlases):
+ if atlas == "example_mouse_100um":
+ example_atlas_index = i
+ break
+
widget._on_atlas_dropdown_index_changed(example_atlas_index)
return widget
diff --git a/tests/test_transform_select_view.py b/tests/test_transform_select_view.py
index 8996657..9be9926 100644
--- a/tests/test_transform_select_view.py
+++ b/tests/test_transform_select_view.py
@@ -22,11 +22,14 @@ def test_transform_select_view(transform_select_view, qtbot):
transform_select_view.horizontalHeaderItem(1).text() == "Default File"
)
+ # At initialisation only affine and bspline transforms are shown
assert transform_select_view.rowCount() == len(
transform_select_view.transform_type_options
)
assert transform_select_view.columnCount() == 2
+ # At initialisation affine and bspline transforms are shown
+ # with ara_tools as the default file
for i in range(len(transform_select_view.transform_type_options) - 1):
assert (
transform_select_view.cellWidget(i, 0).currentText()
@@ -34,7 +37,7 @@ def test_transform_select_view(transform_select_view, qtbot):
)
assert (
transform_select_view.cellWidget(i, 1).currentText()
- == transform_select_view.file_options[0]
+ == transform_select_view.file_options[1]
)
last_row = len(transform_select_view.transform_type_options) - 1
@@ -161,6 +164,7 @@ def test_file_option_default_on_transform_change(transform_select_view, qtbot):
== transform_select_view.file_options[file_option_index]
)
+ # Change the transform type from affine to bspline
transform_select_view.cellWidget(0, 0).setCurrentIndex(2)
assert (
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 89068e7..ccc5818 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -3,14 +3,18 @@
import numpy as np
import pytest
+from brainglobe_atlasapi import BrainGlobeAtlas
from pytransform3d.rotations import active_matrix_from_angle
from brainglobe_registration.utils.utils import (
adjust_napari_image_layer,
+ calculate_areas,
calculate_rotated_bounding_box,
+ convert_atlas_labels,
find_layer_index,
get_image_layer_names,
open_parameter_file,
+ restore_atlas_labels,
)
@@ -123,3 +127,69 @@ def test_calculate_rotated_bounding_box(basis, rotation, expected_bounds):
result_shape = calculate_rotated_bounding_box(image_shape, rotation_matrix)
assert result_shape == expected_bounds
+
+
+def test_convert_atlas_labels_no_change():
+ mock_annotations = np.arange(1024).reshape((32, 32))
+
+ result, mapping = convert_atlas_labels(mock_annotations)
+
+ assert np.array_equal(result, mock_annotations)
+ assert len(mapping) == 0
+
+
+def test_convert_atlas_labels_high_labels():
+ mock_annotations = np.arange(2**16, 2**16 + 1024).reshape((32, 32))
+
+ result, mapping = convert_atlas_labels(mock_annotations)
+
+ # Since the labels are consecutive, starting at the max label, there
+ # should be no change to the array and the mapping should be empty
+ assert np.array_equal(result, mock_annotations)
+ assert len(mapping) == 0
+
+
+def test_convert_atlas_labels():
+ rng = np.random.default_rng(42)
+
+ mock_annotations = rng.integers(
+ 2**32 - 1, size=(256, 256), dtype=np.uint32
+ )
+
+ result, mapping = convert_atlas_labels(mock_annotations)
+
+ max_value = 2**16
+ unique_values = np.unique(mock_annotations)
+ expected_mapping_count = unique_values[unique_values >= max_value].size
+
+ assert len(mapping) == expected_mapping_count
+
+ restored_image = restore_atlas_labels(result, mapping)
+
+ assert np.array_equal(restored_image, mock_annotations)
+
+
+def test_calculate_areas(tmp_path):
+ atlas = BrainGlobeAtlas("allen_mouse_100um")
+
+ mid_point = atlas.annotation.shape[0] // 2
+ mock_annotations = atlas.annotation[mid_point, :, :]
+ hemispheres = atlas.hemispheres[mid_point, :, :]
+
+ output_path = tmp_path / "areas.csv"
+
+ out_df = calculate_areas(atlas, mock_annotations, hemispheres, output_path)
+
+ assert output_path.exists()
+ assert out_df.columns.size == 4
+
+ # Based on regression testing, the following values are expected
+ assert out_df.loc[672, "structure_name"] == "Caudoputamen"
+ assert out_df.loc[672, "left_area_mm2"] == 1.98
+ assert out_df.loc[672, "right_area_mm2"] == 2.0
+ assert out_df.loc[672, "total_area_mm2"] == 3.98
+
+ assert out_df.loc[961, "structure_name"] == "Piriform area"
+ assert out_df.loc[961, "left_area_mm2"] == 1.27
+ assert out_df.loc[961, "right_area_mm2"] == 1.28
+ assert out_df.loc[961, "total_area_mm2"] == 2.55