diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..980408851 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,12 @@ +# Set update schedule for GitHub Actions + +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + # Check for updates to GitHub Actions every week + interval: "weekly" + commit-message: + # Prefix all commit messages with "CI: " + prefix: "CI" diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 000000000..ebc90fde1 --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,23 @@ +# GitHub Action to automate the identification of common misspellings in text files. +# https://github.com/codespell-project/actions-codespell +# https://github.com/codespell-project/codespell +name: codespell +on: + # Triggers the workflow on push or pull request events + push: + branches: [ master ] + pull_request: + branches: [ master ] + +permissions: + contents: read + +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: codespell-project/actions-codespell@94259cd8be02ad2903ba34a22d9c13de21a74461 # v2.0 + with: + check_filenames: true diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000..9792a5609 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,25 @@ +name: CI (Lint) + +on: + # Triggers the workflow on push or pull request events + push: + branches: [ master ] + pull_request: + branches: + - "*" + +permissions: + contents: read + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 + with: + python-version: '3.9' + + - uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # v3.0.0 diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index cbc0e15ab..b246f729e 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -16,7 +16,7 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{matrix.python-version}} - + - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/run_tests_os.yml b/.github/workflows/run_tests_os.yml index d438efa61..770a66025 100644 --- a/.github/workflows/run_tests_os.yml +++ b/.github/workflows/run_tests_os.yml @@ -11,7 +11,7 @@ jobs: os: [ubuntu-latest, windows-latest, macos-latest] python-version: ["3.10"] runs-on: ${{ matrix.os }} - + steps: - uses: actions/checkout@v2 @@ -27,7 +27,7 @@ jobs: pip install pytest Cython pip install torch==2.0.0 -f https://download.pytorch.org/whl/cpu pip install . - + - name: Install dependencies on Windows / MacOS if: runner.os == 'Windows' || runner.os == 'macOS' run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..f9516df1a --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,32 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: "v4.4.0" + hooks: + - id: check-added-large-files + args: ['--maxkb=1024'] + - id: check-ast + - id: check-case-conflict + - id: check-merge-conflict + - id: check-symlinks + - id: trailing-whitespace + +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.1.7 + hooks: + # Run the linter. + - id: ruff + args: [ --fix ] + +- repo: https://github.com/asottile/pyupgrade + rev: v3.15.0 + hooks: + - id: pyupgrade + args: [--py39-plus] + +- repo: https://github.com/codespell-project/codespell + rev: v2.2.6 + hooks: + - id: codespell + args: [ + "--write-changes" + ] diff --git a/README.md b/README.md index 419fe17fd..0ada4052b 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Tool for segmentation of over 117 classes in CT images. It was trained on a wide ![Alt text](resources/imgs/overview_classes_2.png) -Created by the department of [Research and Analysis at University Hospital Basel](https://www.unispital-basel.ch/en/radiologie-nuklearmedizin/forschung-radiologie-nuklearmedizin). +Created by the department of [Research and Analysis at University Hospital Basel](https://www.unispital-basel.ch/en/radiologie-nuklearmedizin/forschung-radiologie-nuklearmedizin). If you use it please cite our [Radiology AI paper](https://pubs.rsna.org/doi/10.1148/ryai.230024). Please also cite [nnUNet](https://github.com/MIC-DKFZ/nnUNet) since TotalSegmentator is heavily based on it. @@ -14,7 +14,7 @@ If you use it please cite our [Radiology AI paper](https://pubs.rsna.org/doi/10. TotalSegmentator works on Ubuntu, Mac, and Windows and on CPU and GPU. -Install dependencies: +Install dependencies: * Python >= 3.9 * [Pytorch](http://pytorch.org/) >= 1.12.1 @@ -45,7 +45,7 @@ TotalSegmentator -i ct.nii.gz -o segmentations Next to the default task (`total`) there are more subtasks with more classes: -Openly available for any usage: +Openly available for any usage: * **total**: default task containing 117 main classes (see [here](https://github.com/wasserth/TotalSegmentator#class-details) for a list of classes) * **lung_vessels**: lung_vessels (cite [paper](https://www.sciencedirect.com/science/article/pii/S0720048X22001097)), lung_trachea_bronchia * **body**: body, body_trunc, body_extremities, skin @@ -56,7 +56,7 @@ Openly available for any usage: *: These models are not trained on the full totalsegmentator dataset but on some small other datasets. Therefore, expect them to work less robustly. -Available with a license (free licenses available for non-commercial usage [here](https://backend.totalsegmentator.com/license-academic/). For a commercial license contact jakob.wasserthal@usb.ch): +Available with a license (free licenses available for non-commercial usage [here](https://backend.totalsegmentator.com/license-academic/). For a commercial license contact jakob.wasserthal@usb.ch): * **heartchambers_highres**: myocardium, atrium_left, ventricle_left, atrium_right, ventricle_right, aorta, pulmonary_artery (trained on sub-millimeter resolution) * **appendicular_bones**: patella, tibia, fibula, tarsal, metatarsal, phalanges_feet, ulna, radius, carpal, metacarpal, phalanges_hand * **tissue_types**: subcutaneous_fat, skeletal_muscle, torso_fat @@ -87,17 +87,17 @@ docker run --gpus 'device=0' --ipc=host -v /absolute/path/to/my/data/directory:/ ### Running v1 -If you want to keep on using TotalSegmentator v1 (e.g. because you do not want to change your pipeline) you +If you want to keep on using TotalSegmentator v1 (e.g. because you do not want to change your pipeline) you can install it with the following command: ``` pip install TotalSegmentator==1.5.7 ``` The documentation for v1 can be found [here](https://github.com/wasserth/TotalSegmentator/tree/v1.5.7). Bugfixes for v1 are developed in the branch `v1_bugfixes`. -Our Radiology AI publication refers to TotalSegmentator v1. +Our Radiology AI publication refers to TotalSegmentator v1. ### Resource Requirements -Totalsegmentator has the following runtime and memory requirements (using an Nvidia RTX 3090 GPU): +Totalsegmentator has the following runtime and memory requirements (using an Nvidia RTX 3090 GPU): (1.5mm is the normal model and 3mm is the `--fast` model. With v2 the runtimes have increased a bit since we added more classes.) @@ -112,8 +112,8 @@ If you want to reduce memory consumption you can use the following options: ### Train/validation/test split -The exact split of the dataset can be found in the file `meta.csv` inside of the [dataset](https://doi.org/10.5281/zenodo.6802613). This was used for the validation in our paper. -The exact numbers of the results for the high-resolution model (1.5mm) can be found [here](resources/results_all_classes_v1.json). The paper shows these numbers in the supplementary materials Figure 11. +The exact split of the dataset can be found in the file `meta.csv` inside of the [dataset](https://doi.org/10.5281/zenodo.6802613). This was used for the validation in our paper. +The exact numbers of the results for the high-resolution model (1.5mm) can be found [here](resources/results_all_classes_v1.json). The paper shows these numbers in the supplementary materials Figure 11. ### Retrain model and run evaluation @@ -126,7 +126,7 @@ If you want to combine some subclasses (e.g. lung lobes) into one binary mask (e totalseg_combine_masks -i totalsegmentator_output_dir -o combined_mask.nii.gz -m lung ``` -Normally weights are automatically downloaded when running TotalSegmentator. If you want to download the weights with an extra command (e.g. when building a docker container) use this: +Normally weights are automatically downloaded when running TotalSegmentator. If you want to download the weights with an extra command (e.g. when building a docker container) use this: ``` totalseg_download_weights -t ``` @@ -155,7 +155,7 @@ pip install git+https://github.com/wasserth/TotalSegmentator.git ### Typical problems -**ITK loading Error** +**ITK loading Error** When you get the following error message ``` ITK ERROR: ITK only supports orthonormal direction cosines. No orthonormal definition was found! @@ -166,12 +166,12 @@ pip install SimpleITK==2.0.2 ``` Alternatively you can try -``` +``` fslorient -copysform2qform input_file fslreorient2std input_file output_file ``` -**Bad segmentations** +**Bad segmentations** When you get bad segmentation results check the following: * does your input image contain the original HU values or are the intensity values rescaled to a different range? * is the patient normally positioned in the image? (In axial view is the spine at the bottom of the image? In the coronal view is the head at the top of the image?) @@ -181,13 +181,13 @@ When you get bad segmentation results check the following: TotalSegmentator sends anonymous usage statistics to help us improve it further. You can deactivate it by setting `send_usage_stats` to `false` in `~/.totalsegmentator/config.json`. -### Reference +### Reference For more details see our [Radiology AI paper](https://pubs.rsna.org/doi/10.1148/ryai.230024) ([freely available preprint](https://arxiv.org/abs/2208.05868)). If you use this tool please cite it as follows ``` Wasserthal, J., Breit, H.-C., Meyer, M.T., Pradella, M., Hinck, D., Sauter, A.W., Heye, T., Boll, D., Cyriac, J., Yang, S., Bach, M., Segeroth, M., 2023. TotalSegmentator: Robust Segmentation of 104 Anatomic Structures in CT Images. Radiology: Artificial Intelligence. https://doi.org/10.1148/ryai.230024 ``` -Please also cite [nnUNet](https://github.com/MIC-DKFZ/nnUNet) since TotalSegmentator is heavily based on it. +Please also cite [nnUNet](https://github.com/MIC-DKFZ/nnUNet) since TotalSegmentator is heavily based on it. Moreover, we would really appreciate it if you let us know what you are using this tool for. You can also tell us what classes we should add in future releases. You can do so [here](https://github.com/wasserth/TotalSegmentator/issues/1). @@ -195,7 +195,7 @@ Moreover, we would really appreciate it if you let us know what you are using th The following table shows a list of all classes. -TA2 is a standardized way to name anatomy. Mostly the TotalSegmentator names follow this standard. +TA2 is a standardized way to name anatomy. Mostly the TotalSegmentator names follow this standard. For some classes they differ which you can see in the table below. [Here](resources/totalsegmentator_snomed_mapping.csv) you can find a mapping of the TotalSegmentator classes to SNOMED-CT codes. diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..4fac63c60 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,69 @@ +[tool.ruff] +# Exclude a variety of commonly ignored directories. +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".git-rewrite", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", +] + +line-length = 550 +indent-width = 4 +target-version = "py39" + +[tool.ruff.lint] +# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. +# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or +# McCabe complexity (`C901`) by default. +select = [ + "E4", # Pycodestyle: Import + "E7", # Pycodestyle: Statement + "E9", # Pycodestyle: Runtime + "F" # Pyflakes: All codes +] +ignore = [ + "E402", # module level import not at top of file + "E701", # multiple statements on one line (colon) + "E721", # do not compare types, use isinstance() + "E741", # do not use variables named l, O, or I + "F401", # module imported but unused + "F821", # undefined name + "F841" # local variable name is assigned to but never used +] + +# Allow fix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] + +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" + +[tool.ruff.format] +# Like Black, use double quotes for strings. +quote-style = "double" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" \ No newline at end of file diff --git a/resources/convert_dataset_to_nnunet.py b/resources/convert_dataset_to_nnunet.py index 58be51fd8..333061d3c 100644 --- a/resources/convert_dataset_to_nnunet.py +++ b/resources/convert_dataset_to_nnunet.py @@ -51,7 +51,7 @@ def combine_labels(ref_img, file_out, masks): ref_img = nib.load(ref_img) combined = np.zeros(ref_img.shape).astype(np.uint8) for idx, arg in enumerate(masks): - file_in = Path(arg) + file_in = Path(arg) if file_in.exists(): img = nib.load(file_in) combined[img.get_fdata() > 0] = idx+1 @@ -60,26 +60,26 @@ def combine_labels(ref_img, file_out, masks): nib.save(nib.Nifti1Image(combined.astype(np.uint8), ref_img.affine), file_out) -if __name__ == "__main__": +if __name__ == "__main__": """ - Convert the downloaded TotalSegmentator dataset (after unzipping it) to nnUNet format and + Convert the downloaded TotalSegmentator dataset (after unzipping it) to nnUNet format and generate dataset.json and splits_final.json - example usage: + example usage: python convert_dataset_to_nnunet.py /my_downloads/TotalSegmentator_dataset /nnunet/raw/Dataset100_TotalSegmentator_part1 class_map_part_organs You must set nnUNet_raw and nnUNet_preprocessed environment variables before running this (see nnUNet documentation). """ - dataset_path = Path(sys.argv[1]) # directory containining all the subjects + dataset_path = Path(sys.argv[1]) # directory containing all the subjects nnunet_path = Path(sys.argv[2]) # directory of the new nnunet dataset - # TotalSegmentator is made up of 5 models. Choose which one you want to produce. Choose from: + # TotalSegmentator is made up of 5 models. Choose which one you want to produce. Choose from: # class_map_part_organs - # class_map_part_vertebrae - # class_map_part_cardiac - # class_map_part_muscles + # class_map_part_vertebrae + # class_map_part_cardiac + # class_map_part_muscles # class_map_part_ribs - class_map_name = sys.argv[3] + class_map_name = sys.argv[3] class_map = class_map_5_parts[class_map_name] diff --git a/resources/evaluate.py b/resources/evaluate.py index 40924bfdd..b265b8c58 100644 --- a/resources/evaluate.py +++ b/resources/evaluate.py @@ -10,7 +10,7 @@ from tqdm import tqdm from p_tqdm import p_map # package from: https://github.com/deepmind/surface-distance -from surface_distance import compute_surface_distances, compute_surface_dice_at_tolerance +from surface_distance import compute_surface_distances, compute_surface_dice_at_tolerance from totalsegmentator.map_to_binary import class_map_5_parts @@ -19,7 +19,7 @@ def dice_score(y_true, y_pred): """ Binary Dice score. Same results as sklearn f1 binary. """ - intersect = np.sum(y_true * y_pred) + intersect = np.sum(y_true * y_pred) denominator = np.sum(y_true) + np.sum(y_pred) f1 = (2 * intersect) / (denominator + 1e-6) return f1 @@ -33,7 +33,7 @@ def calc_metrics(subject, gt_dir=None, pred_dir=None, class_map=None): for idx, roi_name in class_map.items(): gt = gt_all == idx pred = pred_all == idx - + if gt.max() > 0 and pred.max() == 0: r[f"dice-{roi_name}"] = 0 r[f"surface_dice_3-{roi_name}"] = 0 @@ -42,7 +42,7 @@ def calc_metrics(subject, gt_dir=None, pred_dir=None, class_map=None): sd = compute_surface_distances(gt, pred, [1.5, 1.5, 1.5]) r[f"surface_dice_3-{roi_name}"] = compute_surface_dice_at_tolerance(sd, 3.0) # gt.max() == 0 which means we can not calculate any score because roi not in the image - else: + else: r[f"dice-{roi_name}"] = np.NaN r[f"surface_dice_3-{roi_name}"] = np.NaN return r @@ -52,10 +52,10 @@ def calc_metrics(subject, gt_dir=None, pred_dir=None, class_map=None): """ Calculate Dice score and normalized surface distance for your nnU-Net predictions. - example usage: + example usage: python evaluate.py ground_truth_dir predictions_dir """ - gt_dir = Path(sys.argv[1]) # directory containining all the subjects + gt_dir = Path(sys.argv[1]) # directory containing all the subjects pred_dir = Path(sys.argv[2]) # directory of the new nnunet dataset # class_map = class_map_5_parts["class_map_part_organs"] @@ -63,7 +63,7 @@ def calc_metrics(subject, gt_dir=None, pred_dir=None, class_map=None): # class_map = class_map_5_parts["class_map_part_cardiac"] # class_map = class_map_5_parts["class_map_part_muscles"] # class_map = class_map_5_parts["class_map_part_ribs"] - class_map_name = sys.argv[3] + class_map_name = sys.argv[3] class_map = class_map_5_parts[class_map_name] subjects = [x.stem.split(".")[0] for x in gt_dir.glob("*.nii.gz")] @@ -72,7 +72,7 @@ def calc_metrics(subject, gt_dir=None, pred_dir=None, class_map=None): res = p_map(partial(calc_metrics, gt_dir=gt_dir, pred_dir=pred_dir, class_map=class_map), subjects, num_cpus=8, disable=True) res = pd.DataFrame(res) - + for metric in ["dice", "surface_dice_3"]: res_all_rois = [] for roi_name in class_map.values(): diff --git a/resources/improvements_in_v2.md b/resources/improvements_in_v2.md index 1d569195b..0174fe170 100644 --- a/resources/improvements_in_v2.md +++ b/resources/improvements_in_v2.md @@ -15,7 +15,7 @@ total: skull, thyroid_gland, prostate, brachiocephalic_vein_left, brachiocephalic_vein_right, brachiocephalic_trunk, common_carotid_artery_left, common_carotid_artery_right, atrial_appendage_left, subclavian_artery_left, subclavian_artery_right, vertebrae_S1, sternum, costal_cartilages, pulmonary_vein, superior_vena_cava, kidney_cyst_left, kidney_cyst_right, spinal_cord ``` -appendicular_bones: +appendicular_bones: ``` patella, tibia, fibula, tarsal, metatarsal, phalanges_feet, ulna, radius, carpal, metacarpal, phalanges_hand ``` @@ -34,7 +34,7 @@ The following classes were moved from the `total` task to the `heartchambers_hig ``` heart_myocardium, heart_atrium_left, heart_ventricle_left, heart_atrium_right, heart_ventricle_right, pulmonary_artery ``` -`total` now only contains the overal class `heart` instead. +`total` now only contains the overall class `heart` instead. Some of these new classes were available in some preliminary version as additional tasks in v1. Now they are properly added. @@ -59,7 +59,7 @@ List of classes where we corrected some systemic errors in the labels (e.g. myoc * kidney We increased the number of training images from 1139 to 1559. We added the following images: -* more whole body images where TotalSegmentator failed before +* more whole body images where TotalSegmentator failed before * images of feet and hands (these were not included so far) * more images of the head * more images with bleedings in the abdomen where TotalSegmentator failed before diff --git a/resources/release.sh b/resources/release.sh index ea1c61666..397fb0f35 100755 --- a/resources/release.sh +++ b/resources/release.sh @@ -1,12 +1,12 @@ #!/bin/bash set -e # Exit on error -# Info: Have to run from within the resources directory otherwise pathes incorrect -# +# Info: Have to run from within the resources directory otherwise paths incorrect +# # Run first: tests/test_locally.py # # use nnunetv2 env -# +# # Usage: ./release.sh -> will ask for new version number # go to root of package @@ -43,7 +43,7 @@ git push origin "v${new_version}" python setup.py sdist bdist_wheel twine upload --skip-existing dist/* -# Step 5: Build and Push Docker Image +# Step 5: Build and Push Docker Image # (random error on my local machine; have to run on server) # docker build -t wasserth/totalsegmentator:${new_version} . # docker push wasserth/totalsegmentator:${new_version} diff --git a/resources/server_setup.md b/resources/server_setup.md index c16639453..a2aa1526d 100644 --- a/resources/server_setup.md +++ b/resources/server_setup.md @@ -10,7 +10,7 @@ terraform destroy -auto-approve ## Helpful commands for setting up the cloud server Updating code on server: -``` +``` cd ~/dev/TotalSegmentator git pull @@ -20,32 +20,32 @@ docker rm $(sudo docker ps -a -q -f status=exited) docker build -t totalsegmentator:master . docker run -d --restart always -p 80:5000 --gpus 'device=0' --ipc=host --name totalsegmentator-server-job -v /home/ubuntu/store:/app/store totalsegmentator:master /app/run_server.sh -``` +``` Run docker TotalSegmentator for test locally -``` +``` docker run --gpus 'device=0' --ipc=host -v /home/ubuntu/test:/workspace totalsegmentator:master TotalSegmentator -i /workspace/ct3mm_0000.nii.gz -o /workspace/test_output --fast --preview -``` +``` Run docker flask server for test locally -``` +``` docker run -p 80:5000 --gpus 'device=0' --ipc=host -v /home/jakob/dev/TotalSegmentator/store:/app/store totalsegmentator:master /app/run_server.sh -``` +``` Can only be killed via docker -``` +``` docker kill $(docker ps -q) -``` +``` -Run docker on server for production +Run docker on server for production (will automatically start after reboot) Have to setup docker.service once so docker will be available at system start -``` +``` systemctl enable docker.service -``` +``` Then this docker container will always run -``` +``` docker run -d --restart always -p 80:5000 --gpus 'device=0' --ipc=host --name totalsegmentator-server-job -v /home/ubuntu/store:/app/store totalsegmentator:master /app/run_server.sh -``` +``` See running containers ``` @@ -55,7 +55,7 @@ docker container ls Stop docker ``` docker stop totalsegmentator-server-job -docker rm $(sudo docker ps -a -q -f status=exited) +docker rm $(sudo docker ps -a -q -f status=exited) ``` See stdout of running docker container @@ -82,24 +82,24 @@ docker restart totalsegmentator-server-job ## Other commands Backup to local harddrive -``` +``` rsync -avz @:/mnt/data/server-store /mnt/jay_hdd/backup -``` +``` Systemd commands Start or stop only once -``` +``` systemctl start/stop/restart totalsegmentator_server -``` -Permanently start programm (automatic restart on error or reboot): -``` +``` +Permanently start program (automatic restart on error or reboot): +``` systemctl enable/disable totalsegmentator_server -``` +``` Check status -``` +``` systemctl status totalsegmentator_server -``` +``` ## Old commands diff --git a/tests/test_end_to_end.py b/tests/test_end_to_end.py index 500b143fe..1900863b6 100644 --- a/tests/test_end_to_end.py +++ b/tests/test_end_to_end.py @@ -22,8 +22,8 @@ def test_prediction_multilabel(self): self.assertTrue(images_equal, f"multilabel prediction not correct (nr_of_diff_voxels: {nr_of_diff_voxels})") def test_prediction_liver_roi_subset(self): - img_ref = nib.load(f"tests/reference_files/example_seg_roi_subset.nii.gz").get_fdata() - img_new = nib.load(f"tests/unittest_prediction_roi_subset.nii.gz").get_fdata() + img_ref = nib.load("tests/reference_files/example_seg_roi_subset.nii.gz").get_fdata() + img_new = nib.load("tests/unittest_prediction_roi_subset.nii.gz").get_fdata() # prediction is not completely deterministic therefore allow for small differences nr_of_diff_voxels = (img_ref != img_new).sum() images_equal = nr_of_diff_voxels < 20 @@ -39,8 +39,8 @@ def test_prediction_fast(self): self.assertTrue(images_equal, f"{roi} fast prediction not correct (nr_of_diff_voxels: {nr_of_diff_voxels})") def test_preview(self): - preview_exists = os.path.exists(f"tests/unittest_prediction_fast/preview_total.png") - self.assertTrue(preview_exists, f"Preview was not generated") + preview_exists = os.path.exists("tests/unittest_prediction_fast/preview_total.png") + self.assertTrue(preview_exists, "Preview was not generated") def test_prediction_multilabel_fast(self): img_ref = nib.load("tests/reference_files/example_seg_fast.nii.gz").get_fdata() @@ -70,12 +70,12 @@ def test_lung_vessels(self): self.assertTrue(images_equal, f"{roi} prediction not correct") def test_tissue_types_wo_license(self): - no_output_file = not os.path.exists(f"tests/unittest_no_license.nii.gz") - self.assertTrue(no_output_file, f"A output file was generated even though no license was set.") + no_output_file = not os.path.exists("tests/unittest_no_license.nii.gz") + self.assertTrue(no_output_file, "A output file was generated even though no license was set.") def test_tissue_types_wrong_license(self): - no_output_file = not os.path.exists(f"tests/unittest_wrong_license.nii.gz") - self.assertTrue(no_output_file, f"A output file was generated even though the license was wrong.") + no_output_file = not os.path.exists("tests/unittest_wrong_license.nii.gz") + self.assertTrue(no_output_file, "A output file was generated even though the license was wrong.") def test_tissue_types(self): for roi in ["subcutaneous_fat", "skeletal_muscle", "torso_fat"]: @@ -92,9 +92,9 @@ def test_appendicular_bones(self): self.assertTrue(images_equal, f"{roi} prediction not correct") def test_statistics(self): - stats_ref = json.load(open("tests/reference_files/example_seg_fast/statistics.json", "r")) + stats_ref = json.load(open("tests/reference_files/example_seg_fast/statistics.json")) stats_ref = pd.DataFrame(stats_ref) - stats_new = json.load(open("tests/unittest_prediction_fast/statistics.json", "r")) + stats_new = json.load(open("tests/unittest_prediction_fast/statistics.json")) stats_new = pd.DataFrame(stats_new) stats_equal = np.allclose(stats_ref.loc["volume"].values, stats_new.loc["volume"].values, rtol=3e-2, atol=3e-2) @@ -111,7 +111,7 @@ def test_statistics(self): # stats_new = pd.DataFrame(stats_new) # stats_new = stats_new.fillna(0) # # very big margin, but some of the radiomics features can change a lot if only a few voxels - # # of the segmentation change. So this test is only to check that radiomics ran sucessfully. + # # of the segmentation change. So this test is only to check that radiomics ran successfully. # stats_equal = np.allclose(stats_ref.values, stats_new.values, rtol=3e-1, atol=3e-1) # self.assertTrue(stats_equal, "radiomics is not correct") diff --git a/tests/test_locally.py b/tests/test_locally.py index 12ccdf315..8f1b34bf5 100755 --- a/tests/test_locally.py +++ b/tests/test_locally.py @@ -1,11 +1,9 @@ import sys from pathlib import Path -from pathlib import Path import pytest import os import re -import sys import glob import shutil import subprocess @@ -25,12 +23,12 @@ from resources.evaluate import calc_metrics """ -Run a complete prediction locally with GPU and evaluate Dice score + +Run a complete prediction locally with GPU and evaluate Dice score + CPU/GPU usage + RAM/GPU memory usage + runtime. (This is not possible on github actions due to missing GPU) Info: -To get the CT file and create the multilable groundtruth files use +To get the CT file and create the multilable groundtruth files use python ~/dev/jakob_scripts/multiseg/eval/get_data_for_test_locally.py Usage: @@ -108,11 +106,11 @@ def reset_monitors(): def start_monitors(): # Create separate threads to monitor memory usage memory_thread = threading.Thread(target=memory_monitor) - memory_thread.daemon = True + memory_thread.daemon = True memory_thread.start() gpu_memory_thread = threading.Thread(target=gpu_memory_monitor) - gpu_memory_thread.daemon = True + gpu_memory_thread.daemon = True gpu_memory_thread.start() cpu_util_thread = threading.Thread(target=cpu_utilization_monitor) @@ -120,17 +118,17 @@ def start_monitors(): cpu_util_thread.start() gpu_util_thread = threading.Thread(target=gpu_utilization_monitor) - gpu_util_thread.daemon = True + gpu_util_thread.daemon = True gpu_util_thread.start() def are_logs_similar(last_log, new_log, cols, tolerance_percent=0.04): if last_log is None or new_log is None: print("Cannot compare logs because one of them is None.") return False - + # For these columns the values differ a lot between runs so we allow a larger margin tolerance_percent_large_diff = 0.2 - cols_large_diff = ["runtime_3mm", + cols_large_diff = ["runtime_3mm", # "memory_gpu_15mm", "memory_gpu_3mm", "cpu_utilization_15mm", "cpu_utilization_3mm", "gpu_utilization_15mm", "gpu_utilization_3mm"] @@ -204,7 +202,7 @@ def are_logs_similar(last_log, new_log, cols, tolerance_percent=0.04): subjects = [s.name.split(".")[0] for s in img_dir.glob("*.nii.gz")] res = [calc_metrics(s, gt_dir, pred_dir, class_map["total"]) for s in subjects] res = pd.DataFrame(res) - + print("Aggregate metrics...") for metric in ["dice", "surface_dice_3"]: res_all_rois = [] @@ -241,7 +239,7 @@ def are_logs_similar(last_log, new_log, cols, tolerance_percent=0.04): memory_gpu["15mm"], memory_gpu["3mm"], cpu_utilization["15mm"], cpu_utilization["3mm"], gpu_utilization["15mm"], gpu_utilization["3mm"], - platform.python_version(), torch.__version__, + platform.python_version(), torch.__version__, float(torch.version.cuda), int(torch.backends.cudnn.version()), torch.cuda.get_device_name(0)] diff --git a/tests/tests.sh b/tests/tests.sh index 358f0447f..1ab0ef523 100755 --- a/tests/tests.sh +++ b/tests/tests.sh @@ -1,6 +1,6 @@ set -e -# To run these tests simply do +# To run these tests simply do # ./tests.sh diff --git a/tests/tests_REMOVE.py b/tests/tests_REMOVE.py index 70cc301d8..8766e4eab 100755 --- a/tests/tests_REMOVE.py +++ b/tests/tests_REMOVE.py @@ -10,7 +10,7 @@ def cleanup(): files_to_remove = glob.glob('tests/unittest_prediction*') # files_to_remove.append('tests/statistics.json') - + for f in files_to_remove: if os.path.isdir(f): shutil.rmtree(f) @@ -32,7 +32,7 @@ def cleanup(): # example_ct.nii.gz: 36s, 3.0GB totalsegmentator('tests/reference_files/example_ct_sm.nii.gz', 'tests/unittest_prediction_roi_subset', roi_subset=['liver', 'brain'], device="cpu") pytest.main(['-v', 'tests/test_end_to_end.py::test_end_to_end::test_prediction_liver_roi_subset']) - + # Test organ predictions - fast - statistics # 2 cpus: (statistics <1s) # example_ct_sm.nii.gz: 13s, 4.1GB @@ -45,7 +45,7 @@ def cleanup(): # Test organ predictions - fast - multilabel totalsegmentator('tests/reference_files/example_ct_sm.nii.gz', 'tests/unittest_prediction_fast.nii.gz', ml=True, fast=True, device="cpu") pytest.main(['-v', 'tests/test_end_to_end.py::test_end_to_end::test_prediction_multilabel_fast']) - + # Test organ predictions - fast - multilabel - force split totalsegmentator('tests/reference_files/example_ct.nii.gz', 'tests/unittest_prediction_fast_force_split.nii.gz', ml=True, fast=True, force_split=True, device="cpu") pytest.main(['-v', 'tests/test_end_to_end.py::test_end_to_end::test_prediction_multilabel_fast_force_split']) diff --git a/tests/update_test_files.sh b/tests/update_test_files.sh index bc4f43834..558735573 100755 --- a/tests/update_test_files.sh +++ b/tests/update_test_files.sh @@ -11,9 +11,9 @@ TotalSegmentator -i tests/reference_files/example_ct_sm.nii.gz -o tests/referenc TotalSegmentator -i tests/reference_files/example_ct_sm.nii.gz -o tests/reference_files/example_seg_fast.nii.gz --fast --ml -d cpu TotalSegmentator -i tests/reference_files/example_ct.nii.gz -o tests/reference_files/example_seg_fast_force_split.nii.gz --fast --ml -fs -d cpu TotalSegmentator -i tests/reference_files/example_ct_sm.nii.gz -o tests/reference_files/example_seg_fast_body_seg.nii.gz --fast --ml -bs -d cpu -TotalSegmentator -i tests/reference_files/example_ct_sm.nii.gz -o tests/reference_files/example_seg_lung_vessels -ta lung_vessels -d cpu +TotalSegmentator -i tests/reference_files/example_ct_sm.nii.gz -o tests/reference_files/example_seg_lung_vessels -ta lung_vessels -d cpu TotalSegmentator -i tests/reference_files/example_ct_sm.nii.gz -o tests/reference_files/example_seg_tissue_types -ta tissue_types -d cpu -l $1 -TotalSegmentator -i tests/reference_files/example_ct_sm.nii.gz -o tests/reference_files/example_seg_appendicular_bones -ta appendicular_bones -d cpu +TotalSegmentator -i tests/reference_files/example_ct_sm.nii.gz -o tests/reference_files/example_seg_appendicular_bones -ta appendicular_bones -d cpu TotalSegmentator -i tests/reference_files/example_ct_dicom -o tests/reference_files/example_seg_dicom.nii.gz --fast --ml -d cpu # Manually check if segmentations in tests/reference_files/example_seg_fast_force_split.nii.gz look correct diff --git a/totalsegmentator/alignment.py b/totalsegmentator/alignment.py index c24467692..48715c36a 100644 --- a/totalsegmentator/alignment.py +++ b/totalsegmentator/alignment.py @@ -19,7 +19,7 @@ def as_closest_canonical_nifti(path_in, path_out): img_in = nib.load(path_in) img_out = nib.as_closest_canonical(img_in) nib.save(img_out, path_out) - + def undo_canonical(img_can, img_orig): """ diff --git a/totalsegmentator/bin/TotalSegmentator.py b/totalsegmentator/bin/TotalSegmentator.py index b1f21935e..aff94b4dc 100644 --- a/totalsegmentator/bin/TotalSegmentator.py +++ b/totalsegmentator/bin/TotalSegmentator.py @@ -13,23 +13,23 @@ def main(): epilog="Written by Jakob Wasserthal. If you use this tool please cite https://pubs.rsna.org/doi/10.1148/ryai.230024") parser.add_argument("-i", metavar="filepath", dest="input", - help="CT nifti image or folder of dicom slices", + help="CT nifti image or folder of dicom slices", type=lambda p: Path(p).absolute(), required=True) parser.add_argument("-o", metavar="directory", dest="output", - help="Output directory for segmentation masks", + help="Output directory for segmentation masks", type=lambda p: Path(p).absolute(), required=True) parser.add_argument("-ot", "--output_type", choices=["nifti", "dicom"], help="Select if segmentations shall be saved as Nifti or as Dicom RT Struct image.", default="nifti") - + parser.add_argument("-ml", "--ml", action="store_true", help="Save one multilabel image for all classes", default=False) parser.add_argument("-nr", "--nr_thr_resamp", type=int, help="Nr of threads for resampling", default=1) - parser.add_argument("-ns", "--nr_thr_saving", type=int, help="Nr of threads for saving segmentations", + parser.add_argument("-ns", "--nr_thr_saving", type=int, help="Nr of threads for saving segmentations", default=6) parser.add_argument("-f", "--fast", action="store_true", help="Run faster lower resolution model (3mm)", @@ -38,22 +38,22 @@ def main(): parser.add_argument("-ff", "--fastest", action="store_true", help="Run even faster lower resolution model (6mm)", default=False) - parser.add_argument("-t", "--nora_tag", type=str, + parser.add_argument("-t", "--nora_tag", type=str, help="tag in nora as mask. Pass nora project id as argument.", default="None") - parser.add_argument("-p", "--preview", action="store_true", + parser.add_argument("-p", "--preview", action="store_true", help="Generate a png preview of segmentation", default=False) - # cerebral_bleed: Intracerebral hemorrhage + # cerebral_bleed: Intracerebral hemorrhage # liver_vessels: hepatic vessels parser.add_argument("-ta", "--task", choices=["total", "body", - "lung_vessels", "cerebral_bleed", "hip_implant", "coronary_arteries", + "lung_vessels", "cerebral_bleed", "hip_implant", "coronary_arteries", "pleural_pericard_effusion", "test", - "appendicular_bones", "tissue_types", "heartchambers_highres", + "appendicular_bones", "tissue_types", "heartchambers_highres", "face", "vertebrae_body"], - # future: liver_vessels, head, + # future: liver_vessels, head, help="Select which model to use. This determines what is predicted.", default="total") @@ -65,38 +65,38 @@ def main(): parser.add_argument("-rsr", "--roi_subset_robust", type=str, nargs="+", help="Like roi_subset but uses a slower but more robust model to find the rois.") - parser.add_argument("-s", "--statistics", action="store_true", + parser.add_argument("-s", "--statistics", action="store_true", help="Calc volume (in mm3) and mean intensity. Results will be in statistics.json", default=False) - parser.add_argument("-r", "--radiomics", action="store_true", + parser.add_argument("-r", "--radiomics", action="store_true", help="Calc radiomics features. Requires pyradiomics. Results will be in statistics_radiomics.json", default=False) - parser.add_argument("-sii", "--stats_include_incomplete", action="store_true", + parser.add_argument("-sii", "--stats_include_incomplete", action="store_true", help="Normally statistics are only calculated for ROIs which are not cut off by the beginning or end of image. Use this option to calc anyways.", default=False) - parser.add_argument("-cp", "--crop_path", help="Custom path to masks used for cropping. If not set will use output directory.", + parser.add_argument("-cp", "--crop_path", help="Custom path to masks used for cropping. If not set will use output directory.", type=lambda p: Path(p).absolute(), default=None) - parser.add_argument("-bs", "--body_seg", action="store_true", + parser.add_argument("-bs", "--body_seg", action="store_true", help="Do initial rough body segmentation and crop image to body region", default=False) - + parser.add_argument("-fs", "--force_split", action="store_true", help="Process image in 3 chunks for less memory consumption", default=False) - parser.add_argument("-ss", "--skip_saving", action="store_true", + parser.add_argument("-ss", "--skip_saving", action="store_true", help="Skip saving of segmentations for faster runtime if you are only interested in statistics.", default=False) # Used for server to make statistics file have the same classes as images are created - parser.add_argument("-ndm", "--no_derived_masks", action="store_true", + parser.add_argument("-ndm", "--no_derived_masks", action="store_true", help="Do not create derived masks (e.g. skin from body mask).", default=False) - parser.add_argument("-v1o", "--v1_order", action="store_true", + parser.add_argument("-v1o", "--v1_order", action="store_true", help="In multilabel file order classes as in v1. New v2 classes will be removed.", default=False) @@ -113,7 +113,7 @@ def main(): parser.add_argument("-v", "--verbose", action="store_true", help="Show more intermediate output", default=False) - parser.add_argument("-l", "--license_number", help="Set license number. Needed for some tasks. Only needed once, then stored in config file.", + parser.add_argument("-l", "--license_number", help="Set license number. Needed for some tasks. Only needed once, then stored in config file.", type=str, default=None) # Tests: diff --git a/totalsegmentator/bin/crop_to_body.py b/totalsegmentator/bin/crop_to_body.py index d157a96aa..47b153199 100644 --- a/totalsegmentator/bin/crop_to_body.py +++ b/totalsegmentator/bin/crop_to_body.py @@ -20,11 +20,11 @@ def main(): epilog="Written by Jakob Wasserthal. If you use this tool please cite https://pubs.rsna.org/doi/10.1148/ryai.230024") parser.add_argument("-i", metavar="filepath", dest="input", - help="CT nifti image", + help="CT nifti image", type=lambda p: Path(p).absolute(), required=True) parser.add_argument("-o", metavar="filepath", dest="output", - help="Cropped nifti image", + help="Cropped nifti image", type=lambda p: Path(p).absolute(), required=True) parser.add_argument("-t", "--only_trunc", action="store_true", help="Crop to trunc instead of entire body.", @@ -32,9 +32,9 @@ def main(): parser.add_argument("-nr", "--nr_thr_resamp", type=int, help="Nr of threads for resampling", default=1) - parser.add_argument("-ns", "--nr_thr_saving", type=int, help="Nr of threads for saving segmentations", + parser.add_argument("-ns", "--nr_thr_saving", type=int, help="Nr of threads for saving segmentations", default=6) - + parser.add_argument("-d", "--device", choices=["gpu", "cpu"], help="Device to run on (default: gpu).", default="gpu") @@ -64,8 +64,8 @@ def main(): if not quiet: print("Generating rough body segmentation...") body_seg, _ = nnUNet_predict_image(args.input, None, 300, model="3d_fullres", folds=[0], trainer="nnUNetTrainer", tta=False, multilabel_image=True, resample=6.0, - crop=None, crop_path=None, task_name="body", nora_tag="None", preview=False, - save_binary=False, nr_threads_resampling=args.nr_thr_resamp, nr_threads_saving=1, + crop=None, crop_path=None, task_name="body", nora_tag="None", preview=False, + save_binary=False, nr_threads_resampling=args.nr_thr_resamp, nr_threads_saving=1, crop_addon=crop_addon, quiet=quiet, verbose=verbose, test=0, device=device) if verbose: print(f"Rough body segmentation generated in {time.time()-st:.2f}s") diff --git a/totalsegmentator/bin/totalseg_combine_masks.py b/totalsegmentator/bin/totalseg_combine_masks.py index 3ee1544dd..33a268db1 100644 --- a/totalsegmentator/bin/totalseg_combine_masks.py +++ b/totalsegmentator/bin/totalseg_combine_masks.py @@ -24,14 +24,14 @@ def main(): epilog="Written by Jakob Wasserthal. If you use this tool please cite https://pubs.rsna.org/doi/10.1148/ryai.230024") parser.add_argument("-i", metavar="directory", dest="mask_dir", - help="TotalSegmentator output directory containing all the masks", + help="TotalSegmentator output directory containing all the masks", type=lambda p: Path(p).absolute(), required=True) parser.add_argument("-o", metavar="filepath", dest="output", - help="Output path for combined mask", + help="Output path for combined mask", type=lambda p: Path(p).absolute(), required=True) - parser.add_argument("-m", "--masks", type=str, choices=["lung", "lung_left", "lung_right", + parser.add_argument("-m", "--masks", type=str, choices=["lung", "lung_left", "lung_right", "vertebrae", "ribs", "vertebrae_ribs", "heart", "pelvis", "body"], help="The type of masks you want to combine", required=True) diff --git a/totalsegmentator/bin/totalseg_download_weights.py b/totalsegmentator/bin/totalseg_download_weights.py index 6fbd697aa..dde08dbe6 100644 --- a/totalsegmentator/bin/totalseg_download_weights.py +++ b/totalsegmentator/bin/totalseg_download_weights.py @@ -19,7 +19,7 @@ def main(): parser.add_argument("-t", "--task", choices=["total", "total_fast", "lung_vessels", "cerebral_bleed", "hip_implant", "coronary_arteries", "pleural_pericard_effusion", - "body", "body_fast", "vertebrae_body", + "body", "body_fast", "vertebrae_body", "heartchambers_highres", "appendicular_bones", "tissue_types", "face"], help="Task for which to download the weights", default="total") diff --git a/totalsegmentator/bin/totalseg_import_weights.py b/totalsegmentator/bin/totalseg_import_weights.py index 91c185e58..e42054e73 100644 --- a/totalsegmentator/bin/totalseg_import_weights.py +++ b/totalsegmentator/bin/totalseg_import_weights.py @@ -11,13 +11,13 @@ def main(): """ Import manually downloaded weights (zip file) to the right folder. - DEPRECATED! This is no longer needed in v2.0.0 and later. + DEPRECATED! This is no longer needed in v2.0.0 and later. """ parser = argparse.ArgumentParser(description="Import manually downloaded weights.", epilog="Written by Jakob Wasserthal.") parser.add_argument("-i", "--weights_file", - help="path to the weights zip file", + help="path to the weights zip file", type=lambda p: Path(p).absolute(), required=True) args = parser.parse_args() diff --git a/totalsegmentator/bin/totalseg_set_license.py b/totalsegmentator/bin/totalseg_set_license.py index 0620ea013..7d4d5e0ad 100644 --- a/totalsegmentator/bin/totalseg_set_license.py +++ b/totalsegmentator/bin/totalseg_set_license.py @@ -29,7 +29,7 @@ def main(): setup_totalseg() # create config file if not exists set_license_number(args.license_number) - print("License has been sucessfully saved.") + print("License has been successfully saved.") if __name__ == "__main__": diff --git a/totalsegmentator/config.py b/totalsegmentator/config.py index dbb2f8522..eac3cde33 100644 --- a/totalsegmentator/config.py +++ b/totalsegmentator/config.py @@ -56,7 +56,7 @@ def setup_totalseg(totalseg_id=None): totalseg_config_file = totalseg_dir / "config.json" if totalseg_config_file.exists(): - with open(totalseg_config_file, "r") as f: + with open(totalseg_config_file) as f: config = json.load(f) else: if totalseg_id is None: @@ -81,7 +81,7 @@ def set_license_number(license_number): totalseg_config_file = totalseg_dir / "config.json" if totalseg_config_file.exists(): - with open(totalseg_config_file, "r") as f: + with open(totalseg_config_file) as f: config = json.load(f) config["license_number"] = license_number with open(totalseg_config_file, "w") as f: @@ -94,7 +94,7 @@ def get_license_number(): totalseg_dir = get_totalseg_dir() totalseg_config_file = totalseg_dir / "config.json" if totalseg_config_file.exists(): - with open(totalseg_config_file, "r") as f: + with open(totalseg_config_file) as f: config = json.load(f) license_number = config["license_number"] if "license_number" in config else "" else: @@ -104,25 +104,25 @@ def get_license_number(): def is_valid_license(license_number): try: - url = f"http://backend.totalsegmentator.com:80/" + url = "http://backend.totalsegmentator.com:80/" r = requests.post(url + "is_valid_license_number", json={"license_number": license_number}, timeout=5) if r.ok: return r.json()['status'] == "valid_license" else: - print(f"An internal server error occured. status code: {r.status_code}") + print(f"An internal server error occurred. status code: {r.status_code}") print(f"message: {r.json()['message']}") return False except Exception as e: - print(f"An Exception occured: {e}") + print(f"An Exception occurred: {e}") return False - + def has_valid_license(): totalseg_dir = get_totalseg_dir() totalseg_config_file = totalseg_dir / "config.json" if totalseg_config_file.exists(): - with open(totalseg_config_file, "r") as f: + with open(totalseg_config_file) as f: config = json.load(f) if "license_number" in config: license_number = config["license_number"] @@ -130,10 +130,10 @@ def has_valid_license(): return "missing_license", "ERROR: A license number has not been set so far." else: return "missing_config_file", f"ERROR: Could not find config file: {totalseg_config_file}" - + if is_valid_license(license_number): return "yes", "SUCCESS: License is valid." - else: + else: return "invalid_license", f"ERROR: Invalid license number ({license_number}). Please check your license number or contact support." @@ -142,7 +142,7 @@ def has_valid_license_offline(): totalseg_dir = get_totalseg_dir() totalseg_config_file = totalseg_dir / "config.json" if totalseg_config_file.exists(): - with open(totalseg_config_file, "r") as f: + with open(totalseg_config_file) as f: config = json.load(f) if "license_number" in config: license_number = config["license_number"] @@ -150,10 +150,10 @@ def has_valid_license_offline(): return "missing_license", "ERROR: A license number has not been set so far." else: return "missing_config_file", f"ERROR: Could not find config file: {totalseg_config_file}" - + if len(license_number) == 18: return "yes", "SUCCESS: License is valid." - else: + else: return "invalid_license", f"ERROR: Invalid license number ({license_number}). Please check your license number or contact support." @@ -161,7 +161,7 @@ def increase_prediction_counter(): totalseg_dir = get_totalseg_dir() totalseg_config_file = totalseg_dir / "config.json" if totalseg_config_file.exists(): - with open(totalseg_config_file, "r") as f: + with open(totalseg_config_file) as f: config = json.load(f) config["prediction_counter"] += 1 with open(totalseg_config_file, "w") as f: @@ -180,7 +180,7 @@ def get_config_key(key_name): totalseg_dir = get_totalseg_dir() totalseg_config_file = totalseg_dir / "config.json" if totalseg_config_file.exists(): - with open(totalseg_config_file, "r") as f: + with open(totalseg_config_file) as f: config = json.load(f) if key_name in config: return config[key_name] @@ -191,7 +191,7 @@ def set_config_key(key_name, value): totalseg_dir = get_totalseg_dir() totalseg_config_file = totalseg_dir / "config.json" if totalseg_config_file.exists(): - with open(totalseg_config_file, "r") as f: + with open(totalseg_config_file) as f: config = json.load(f) config[key_name] = value with open(totalseg_config_file, "w") as f: @@ -203,13 +203,13 @@ def set_config_key(key_name, value): def send_usage_stats(config, params): if config is not None and config["send_usage_stats"]: - + params["roi_subset"] = "" if params["roi_subset"] is None else "-".join(params["roi_subset"]) license_number = get_license_number() try: st = time.time() - url = f"http://backend.totalsegmentator.com:80/" + url = "http://backend.totalsegmentator.com:80/" r = requests.post(url + "log_totalseg_run", json={"totalseg_id": config["totalseg_id"], "prediction_counter": config["prediction_counter"], @@ -234,5 +234,5 @@ def send_usage_stats(config, params): # print(f"message: {r.json()['message']}") # print(f"Request took {time.time()-st:.3f}s") except Exception as e: - # print(f"An Exception occured: {e}") + # print(f"An Exception occurred: {e}") pass diff --git a/totalsegmentator/cropping.py b/totalsegmentator/cropping.py index 7ece914f4..56dbc4123 100644 --- a/totalsegmentator/cropping.py +++ b/totalsegmentator/cropping.py @@ -11,7 +11,7 @@ def get_bbox_from_mask(mask, outside_value=-900, addon=0): if type(addon) is int: addon = [addon] * 3 - if (mask > outside_value).sum() == 0: + if (mask > outside_value).sum() == 0: print("WARNING: Could not crop because no foreground detected") minzidx, maxzidx = 0, mask.shape[0] minxidx, maxxidx = 0, mask.shape[1] @@ -39,7 +39,7 @@ def get_bbox_from_mask(mask, outside_value=-900, addon=0): def crop_to_bbox(image, bbox): """ - image: 3d nd.array + image: 3d ndarray bbox: list of lists [[minx_idx, maxx_idx], [miny_idx, maxy_idx], [minz_idx, maxz_idx]] Indices of bbox must be in voxel coordinates (not in world space) """ @@ -76,26 +76,26 @@ def crop_to_mask(img_in, mask_img, addon=[0,0,0], dtype=None, verbose=False): """ Crops a nifti image to a mask and adapts the affine accordingly. - img_in: nifti image - mask_img: nifti image + img_in: nifti image + mask_img: nifti image addon = addon in mm along each axis dtype: output dtype Returns a nifti image. """ - # This is needed for body mask with sometimes does not have the same shape as the + # This is needed for body mask with sometimes does not have the same shape as the # input image because it was generated on a lower resolution. - # (normally the body mask should be resampled to the original resolution, but it + # (normally the body mask should be resampled to the original resolution, but it # might have been generated by a different program) # This is quite slow for large images. Since normally not needed we remove it. - # - # print("Transforming crop mask to img space:") + # + # print("Transforming crop mask to img space:") # print(f" before: {mask_img.shape}") # mask_img = nibabel.processing.resample_from_to(mask_img, img_in, order=0) # print(f" after: {mask_img.shape}") mask = mask_img.get_fdata() - + addon = (np.array(addon) / img_in.header.get_zooms()).astype(int) # mm to voxels bbox = get_bbox_from_mask(mask, outside_value=0, addon=addon) @@ -143,4 +143,3 @@ def undo_crop_nifti(img_path, ref_img_path, bbox, out_path): img_out = undo_crop(img, ref_img, bbox) nib.save(img_out, out_path) - \ No newline at end of file diff --git a/totalsegmentator/excel.py b/totalsegmentator/excel.py index 68057abfa..ba3af5e16 100644 --- a/totalsegmentator/excel.py +++ b/totalsegmentator/excel.py @@ -1,4 +1,3 @@ - def set_xlsx_column_width_to_content(file_name): from openpyxl import load_workbook from openpyxl.utils import get_column_letter diff --git a/totalsegmentator/libs.py b/totalsegmentator/libs.py index 48acc03ad..8c3cbe6db 100644 --- a/totalsegmentator/libs.py +++ b/totalsegmentator/libs.py @@ -22,9 +22,9 @@ Helpers to suppress stdout prints from nnunet https://stackoverflow.com/questions/2828953/silence-the-stdout-of-a-function-in-python-without-trashing-sys-stdout-and-resto """ -class DummyFile(object): +class DummyFile: def write(self, x): pass - def flush(self): pass + def flush(self): pass @contextlib.contextmanager def nostdout(verbose=False): @@ -42,7 +42,7 @@ def download_model_with_license_and_unpack(task_name, config_dir): totalseg_dir = get_totalseg_dir() totalseg_config_file = totalseg_dir / "config.json" if totalseg_config_file.exists(): - with open(totalseg_config_file, "r") as f: + with open(totalseg_config_file) as f: config = json.load(f) license_number = config["license_number"] else: @@ -50,18 +50,18 @@ def download_model_with_license_and_unpack(task_name, config_dir): return False tempfile = config_dir / "tmp_download_file.zip" - url = f"http://backend.totalsegmentator.com:80/" + url = "http://backend.totalsegmentator.com:80/" # Download try: st = time.time() r = requests.post(url + "download_weights", json={"license_number": license_number, - "task": task_name}, + "task": task_name}, timeout=300, stream=True) r.raise_for_status() # Raise an exception for HTTP errors (4xx, 5xx) - + if r.ok: with open(tempfile, "wb") as f: # without progress bar @@ -82,7 +82,7 @@ def download_model_with_license_and_unpack(task_name, config_dir): if r.json()['status'] == "invalid_license": print(f"ERROR: Invalid license number ({license_number}). Please check your license number or contact support.") sys.exit(1) - + except Exception as e: raise e finally: @@ -97,7 +97,7 @@ def download_url_and_unpack(url, config_dir): # print("Disabling HTTP/1.0") # else: # import http.client - # # helps to solve incomplete read erros + # # helps to solve incomplete read errors # # https://stackoverflow.com/questions/37816596/restrict-request-to-only-ask-for-http-1-0-to-prevent-chunking-error # http.client.HTTPConnection._http_vsn = 10 # http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0' @@ -216,7 +216,7 @@ def download_pretrained_weights(task_id): # WEIGHTS_URL = url + "/static/totalseg_v2/Dataset300_body_6mm_1559subj.zip" WEIGHTS_URL = url + "/v2.0.0-weights/Dataset300_body_6mm_1559subj.zip" - # Models from other projects + # Models from other projects elif task_id == 258: weights_path = config_dir / "Dataset258_lung_vessels_248subj" # WEIGHTS_URL = "https://zenodo.org/record/7064718/files/Task258_lung_vessels_248subj.zip?download=1" @@ -378,7 +378,7 @@ def compress_nifti(file_in, file_out, dtype=np.int32, force_3d=True): def check_if_shape_and_affine_identical(img_1, img_2): - + if not np.array_equal(img_1.affine, img_2.affine): print("Affine in:") print(img_1.affine) diff --git a/totalsegmentator/map_to_binary.py b/totalsegmentator/map_to_binary.py index 5c4ec50d2..196101b80 100644 --- a/totalsegmentator/map_to_binary.py +++ b/totalsegmentator/map_to_binary.py @@ -262,12 +262,12 @@ 1: "vertebrae_body" }, "heartchambers_highres": { - 1: "heart_myocardium", - 2: "heart_atrium_left", - 3: "heart_ventricle_left", - 4: "heart_atrium_right", - 5: "heart_ventricle_right", - 6: "aorta", + 1: "heart_myocardium", + 2: "heart_atrium_left", + 3: "heart_ventricle_left", + 4: "heart_atrium_right", + 5: "heart_ventricle_right", + 6: "aorta", 7: "pulmonary_artery" }, "appendicular_bones": { @@ -306,8 +306,8 @@ commercial_models = { "heartchambers_highres": 301, - "appendicular_bones": 304, - "tissue_types": 481, + "appendicular_bones": 304, + "tissue_types": 481, "vertebrae_body": 302, "face": 303 } @@ -435,8 +435,8 @@ # (the extra rib would start from C7) # -> this has label rib_1 # - # Quite often only 11 ribs (12. ribs probably so small that not found). Those - # cases often wrongly segmented. + # Quite often only 11 ribs (12. ribs probably so small that not found). Those + # cases often wrongly segmented. "class_map_part_ribs": { 1: "rib_left_1", 2: "rib_left_2", diff --git a/totalsegmentator/map_to_total.py b/totalsegmentator/map_to_total.py index 841b9e454..1fa03eb3c 100644 --- a/totalsegmentator/map_to_total.py +++ b/totalsegmentator/map_to_total.py @@ -23,10 +23,10 @@ # "torso_fat": "body", "heart_myocardium": "heart", - "heart_atrium_left": "heart", - "heart_ventricle_left": "heart", - "heart_atrium_right": "heart", - "heart_ventricle_right": "heart", + "heart_atrium_left": "heart", + "heart_ventricle_left": "heart", + "heart_atrium_right": "heart", + "heart_ventricle_right": "heart", "pulmonary_artery": "heart", "face": "skull" diff --git a/totalsegmentator/nifti_ext_header.py b/totalsegmentator/nifti_ext_header.py index 5f11e7471..af24b16bd 100644 --- a/totalsegmentator/nifti_ext_header.py +++ b/totalsegmentator/nifti_ext_header.py @@ -15,7 +15,7 @@ def add_label_map_to_nifti(img_in, label_map): img: nifti image label_map: a dictionary with label ids and names | a list of names and a running id will be generated starting at 1 - + returns: nifti image """ data = img_in.get_fdata() @@ -28,8 +28,8 @@ def add_label_map_to_nifti(img_in, label_map): colors = [[255,0,0],[0,255,0],[0,0,255],[255,255,0],[255,0,255],[0,255,255],[255,128,0],[255,0,128],[128,255,128],[0,128,255],[128,128,128],[185,170,155]] xmlpre = ' ' - - body = f'' + + body = '' for label_id, label_name in label_map.items(): rgb = colors[label_id%len(colors)] body += f'\n' diff --git a/totalsegmentator/nnunet.py b/totalsegmentator/nnunet.py index 12976ae75..bd059f5f8 100644 --- a/totalsegmentator/nnunet.py +++ b/totalsegmentator/nnunet.py @@ -73,7 +73,7 @@ def _get_full_task_name(task_id: int, src: str="raw"): def contains_empty_img(imgs): """ - imgs: List of image pathes + imgs: List of image paths """ is_empty = True for img in imgs: @@ -88,7 +88,7 @@ def nnUNet_predict(dir_in, dir_out, task_id, model="3d_fullres", folds=None, """ Identical to bash function nnUNet_predict - folds: folds to use for prediction. Default is None which means that folds will be detected + folds: folds to use for prediction. Default is None which means that folds will be detected automatically in the model output folder. for all folds: None for only fold 0: [0] @@ -137,7 +137,7 @@ def nnUNetv2_predict(dir_in, dir_out, task_id, model="3d_fullres", folds=None, """ Identical to bash function nnUNetv2_predict - folds: folds to use for prediction. Default is None which means that folds will be detected + folds: folds to use for prediction. Default is None which means that folds will be detected automatically in the model output folder. for all folds: None for only fold 0: [0] @@ -213,7 +213,7 @@ def nnUNetv2_predict(dir_in, dir_out, task_id, model="3d_fullres", folds=None, # predictor.predict_from_files(dir_in, dir_out, # save_probabilities=save_probabilities, overwrite=not continue_prediction, # num_processes_preprocessing=npp, num_processes_segmentation_export=nps, - # folder_with_segs_from_prev_stage=prev_stage_predictions, + # folder_with_segs_from_prev_stage=prev_stage_predictions, # num_parts=num_parts, part_id=part_id) @@ -232,10 +232,10 @@ def save_segmentation_nifti(class_map_item, tmp_dir=None, file_out=None, nora_ta def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=None, - trainer="nnUNetTrainerV2", tta=False, multilabel_image=True, - resample=None, crop=None, crop_path=None, task_name="total", nora_tag="None", preview=False, + trainer="nnUNetTrainerV2", tta=False, multilabel_image=True, + resample=None, crop=None, crop_path=None, task_name="total", nora_tag="None", preview=False, save_binary=False, nr_threads_resampling=1, nr_threads_saving=6, force_split=False, - crop_addon=[3,3,3], roi_subset=None, output_type="nifti", + crop_addon=[3,3,3], roi_subset=None, output_type="nifti", statistics=False, quiet=False, verbose=False, test=0, skip_saving=False, device="cuda", exclude_masks_at_border=True, no_derived_masks=False, v1_order=False): @@ -273,7 +273,7 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N # Workaround to be able to access file_in on windows (see issue #106) # if platform.system() == "Windows": # file_in = file_in.NamedTemporaryFile(delete = False) - # file_in.close() + # file_in.close() # if not multilabel_image: # shutil.copy(file_in, file_out / "input_file.nii.gz") @@ -285,7 +285,7 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N if len(img_in_orig.shape) > 3: print(f"WARNING: Input image has {len(img_in_orig.shape)} dimensions. Only using first three dimensions.") img_in_orig = nib.Nifti1Image(img_in_orig.get_fdata()[:,:,:,0], img_in_orig.affine) - + # takes ~0.9s for medium image img_in = nib.Nifti1Image(img_in_orig.get_fdata(), img_in_orig.affine) # copy img_in_orig @@ -305,7 +305,7 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N img_in = as_closest_canonical(img_in) if resample is not None: - if not quiet: print(f"Resampling...") + if not quiet: print("Resampling...") st = time.time() img_in_shape = img_in.shape img_in_zooms = img_in.header.get_zooms() @@ -329,7 +329,7 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N if force_split: do_triple_split = True if do_triple_split: - if not quiet: print(f"Splitting into subparts...") + if not quiet: print("Splitting into subparts...") img_parts = ["s01", "s02", "s03"] third = img_in_rsp.shape[2] // 3 margin = 20 # set margin with fixed values to avoid rounding problem if using percentage of third @@ -342,7 +342,7 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N tmp_dir / "s03_0000.nii.gz") st = time.time() - if multimodel: # if running multiple models + if multimodel: # if running multiple models # only compute model parts containing the roi subset if roi_subset is not None: @@ -385,9 +385,9 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N nib.save(nib.Nifti1Image(seg_combined[img_part], img_in_rsp.affine), tmp_dir / f"{img_part}.nii.gz") elif test == 1: print("WARNING: Using reference seg instead of prediction for testing.") - shutil.copy(Path("tests") / "reference_files" / "example_seg.nii.gz", tmp_dir / f"s01.nii.gz") + shutil.copy(Path("tests") / "reference_files" / "example_seg.nii.gz", tmp_dir / "s01.nii.gz") else: - if not quiet: print(f"Predicting...") + if not quiet: print("Predicting...") if test == 0: with nostdout(verbose): # nnUNet_predict(tmp_dir, tmp_dir, task_id, model, folds, trainer, tta, @@ -399,8 +399,8 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N # shutil.copy(Path("tests") / "reference_files" / "example_seg_fast.nii.gz", tmp_dir / f"s01.nii.gz") elif test == 3: print("WARNING: Using reference seg instead of prediction for testing.") - shutil.copy(Path("tests") / "reference_files" / "example_seg_lung_vessels.nii.gz", tmp_dir / f"s01.nii.gz") - if not quiet: print(" Predicted in {:.2f}s".format(time.time() - st)) + shutil.copy(Path("tests") / "reference_files" / "example_seg_lung_vessels.nii.gz", tmp_dir / "s01.nii.gz") + if not quiet: print(f" Predicted in {time.time() - st:.2f}s") # Combine image subparts back to one image if do_triple_split: @@ -431,20 +431,20 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N if preview: from totalsegmentator.preview import generate_preview - # Generate preview before upsampling so it is faster and still in canonical space + # Generate preview before upsampling so it is faster and still in canonical space # for better orientation. if not quiet: print("Generating preview...") st = time.time() smoothing = 20 preview_dir = file_out.parent if multilabel_image else file_out generate_preview(img_in_rsp, preview_dir / f"preview_{task_name}.png", img_pred.get_fdata(), smoothing, task_name) - if not quiet: print(" Generated in {:.2f}s".format(time.time() - st)) + if not quiet: print(f" Generated in {time.time() - st:.2f}s") # Statistics calculated on the 3mm downsampled image are very similar to statistics # calculated on the original image. Volume often completely identical. For intensity # some more change but still minor. # - # Speed: + # Speed: # stats on 1.5mm: 37s # stats on 3.0mm: 4s -> great improvement if statistics: @@ -458,11 +458,11 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N if resample is not None: if not quiet: print("Resampling...") - if verbose: print(f" back to original shape: {img_in_shape}") + if verbose: print(f" back to original shape: {img_in_shape}") # Use force_affine otherwise output affine sometimes slightly off (which then is even increased # by undo_canonical) img_pred = change_spacing(img_pred, [resample, resample, resample], img_in_shape, - order=0, dtype=np.uint8, nr_cpus=nr_threads_resampling, + order=0, dtype=np.uint8, nr_cpus=nr_threads_resampling, force_affine=img_in.affine) if verbose: print("Undoing canonical...") @@ -490,7 +490,7 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N file_out.mkdir(exist_ok=True, parents=True) save_mask_as_rtstruct(img_data, selected_classes, file_in_dcm, file_out / "segmentations.dcm") else: - # Copy header to make output header exactly the same as input. But change dtype otherwise it will be + # Copy header to make output header exactly the same as input. But change dtype otherwise it will be # float or int and therefore the masks will need a lot more space. # (infos on header: https://nipy.org/nibabel/nifti_images.html) new_header = img_in_orig.header.copy() @@ -519,7 +519,7 @@ def nnUNet_predict_image(file_in, file_out, task_id, model="3d_fullres", folds=N file_out.mkdir(exist_ok=True, parents=True) if np.prod(img_data.shape) > 512*512*1000: - print(f"Shape of output image is very big. Setting nr_threads_saving=1 to save memory.") + print("Shape of output image is very big. Setting nr_threads_saving=1 to save memory.") nr_threads_saving = 1 # Code for single threaded execution (runtime:24s) diff --git a/totalsegmentator/postprocessing.py b/totalsegmentator/postprocessing.py index 437fc349c..5775b71a4 100644 --- a/totalsegmentator/postprocessing.py +++ b/totalsegmentator/postprocessing.py @@ -133,7 +133,7 @@ def extract_skin(ct_img, body_img): # Segment by density # Roughly the skin density range. Made large to make segmentation not have holes # (0 to 250 would have many small holes in skin) - density_mask = (ct > -200) & (ct < 250) + density_mask = (ct > -200) & (ct < 250) skin[~density_mask] = 0 # Fill holes diff --git a/totalsegmentator/preview.py b/totalsegmentator/preview.py index d7d5dcc72..0e913fcaa 100644 --- a/totalsegmentator/preview.py +++ b/totalsegmentator/preview.py @@ -22,7 +22,7 @@ roi_groups = { "total": [ ["humerus_left", "humerus_right", "scapula_left", "scapula_right", "clavicula_left", - "clavicula_right", "femur_left", "femur_right", "hip_left", "hip_right", "sacrum", + "clavicula_right", "femur_left", "femur_right", "hip_left", "hip_right", "sacrum", # "patella", "tibia", "fibula", "tarsal", "metatarsal", "phalanges_feet", "ulna", "radius", "carpal", "metacarpal", "phalanges_hand", "colon", "trachea", "skull"], ["spleen", "kidney_right", "kidney_left", "gallbladder", @@ -34,8 +34,8 @@ ["iliac_artery_left", "iliac_artery_right", "iliac_vena_left", "iliac_vena_right", "aorta", "inferior_vena_cava", "portal_vein_and_splenic_vein", "esophagus", - "brachiocephalic_trunk", "subclavian_artery_right", "subclavian_artery_left", - "common_carotid_artery_right", "common_carotid_artery_left", + "brachiocephalic_trunk", "subclavian_artery_right", "subclavian_artery_left", + "common_carotid_artery_right", "common_carotid_artery_left", "atrial_appendage_left"], ["small_bowel", "stomach", "lung_upper_lobe_left", "lung_upper_lobe_right"], @@ -54,7 +54,7 @@ "rib_right_12", "urinary_bladder", "duodenum", "gluteus_minimus_left", "gluteus_minimus_right", "sternum", "costal_cartilages"], ["liver", "autochthon_left", "autochthon_right", "iliopsoas_left", "iliopsoas_right", - # "heart_ventricle_left", "heart_ventricle_right", "pulmonary_artery", + # "heart_ventricle_left", "heart_ventricle_right", "pulmonary_artery", "pulmonary_vein", "superior_vena_cava", "brachiocephalic_vein_left", "brachiocephalic_vein_right"] ], @@ -93,7 +93,7 @@ ["aorta", "pulmonary_artery"] ], "appendicular_bones": [ - ["patella", "tibia", "fibula", "tarsal", "metatarsal", "phalanges_feet", + ["patella", "tibia", "fibula", "tarsal", "metatarsal", "phalanges_feet", "ulna", "radius", "carpal", "metacarpal", "phalanges_hand"] ], "tissue_types": [ @@ -107,7 +107,7 @@ # "aortic_branches_test": [ # ["brachiocephalic_trunk", "subclavian_artery_right", "subclavian_artery_left", "aorta", # "common_carotid_artery_right", "common_carotid_artery_left"], - # ["superior_vena_cava", + # ["superior_vena_cava", # "brachiocephalic_vein_left", "brachiocephalic_vein_right", "atrial_appendage_left"], # ["pulmonary_vein", "pulmonary_artery"], # ["heart_atrium_left", "heart_atrium_right", "thyroid_gland"] @@ -176,7 +176,7 @@ def plot_subject(ct_img, output_path, df=None, roi_data=None, smoothing=20, # scene.set_camera(position=(612., 331., 1782.), # decrease z: zoom a bit closer # focal_point=(612., 331., 228.), # view_up=(0.0, 1.0, 0.0)) - + scene.projection(proj_type="parallel") scene.reset_camera_tight(margin_factor=1.02) # need to do reset_camera=False in record for this to work in diff --git a/totalsegmentator/python_api.py b/totalsegmentator/python_api.py index dcdca0eaa..666938853 100644 --- a/totalsegmentator/python_api.py +++ b/totalsegmentator/python_api.py @@ -22,9 +22,9 @@ def show_license_info(): if status == "missing_license": # textwarp needed to remove the indentation of the multiline string print(textwrap.dedent("""\ - In contrast to the other tasks this task is not openly available. - It requires a license. For non-commercial usage a free license can be - acquired here: + In contrast to the other tasks this task is not openly available. + It requires a license. For non-commercial usage a free license can be + acquired here: https://backend.totalsegmentator.com/license-academic/ For commercial usage contact: jakob.wasserthal@usb.ch @@ -46,9 +46,9 @@ def totalsegmentator(input, output, ml=False, nr_thr_resamp=1, nr_thr_saving=6, statistics_exclude_masks_at_border=True, no_derived_masks=False, v1_order=False, fastest=False, roi_subset_robust=None): """ - Run TotalSegmentator from within python. + Run TotalSegmentator from within python. - For explanation of the arguments see description of command line + For explanation of the arguments see description of command line arguments in bin/TotalSegmentator. """ input = Path(input) @@ -56,7 +56,7 @@ def totalsegmentator(input, output, ml=False, nr_thr_resamp=1, nr_thr_saving=6, nora_tag = "None" if nora_tag is None else nora_tag - if not quiet: + if not quiet: print("\nIf you use this tool please cite: https://pubs.rsna.org/doi/10.1148/ryai.230024\n") # available devices: gpu | cpu | mps @@ -241,7 +241,7 @@ def totalsegmentator(input, output, ml=False, nr_thr_resamp=1, nr_thr_saving=6, # fast statistics are calculated on the downsampled image if statistics and fast: - statistics_fast = True + statistics_fast = True statistics = False else: statistics_fast = False @@ -265,7 +265,7 @@ def totalsegmentator(input, output, ml=False, nr_thr_resamp=1, nr_thr_saving=6, # Generate rough organ segmentation (6mm) for speed up if crop or roi_subset is used if crop is not None or roi_subset is not None: - + body_seg = False # can not be used together with body_seg download_pretrained_weights(298) st = time.time() @@ -278,14 +278,14 @@ def totalsegmentator(input, output, ml=False, nr_thr_resamp=1, nr_thr_saving=6, crop_spacing = 6.0 organ_seg, _ = nnUNet_predict_image(input, None, crop_model_task, model="3d_fullres", folds=[0], trainer="nnUNetTrainer_4000epochs_NoMirroring", tta=False, multilabel_image=True, resample=crop_spacing, - crop=None, crop_path=None, task_name="total", nora_tag="None", preview=False, - save_binary=False, nr_threads_resampling=nr_thr_resamp, nr_threads_saving=1, + crop=None, crop_path=None, task_name="total", nora_tag="None", preview=False, + save_binary=False, nr_threads_resampling=nr_thr_resamp, nr_threads_saving=1, crop_addon=crop_addon, output_type=output_type, statistics=False, quiet=quiet, verbose=verbose, test=0, skip_saving=False, device=device) class_map_inv = {v: k for k, v in class_map["total"].items()} crop_mask = np.zeros(organ_seg.shape, dtype=np.uint8) organ_seg_data = organ_seg.get_fdata() - # roi_subset_crop = [map_to_total[roi] if roi in map_to_total else roi for roi in roi_subset] + # roi_subset_crop = [map_to_total[roi] if roi in map_to_total else roi for roi in roi_subset] roi_subset_crop = crop if crop is not None else roi_subset for roi in roi_subset_crop: crop_mask[organ_seg_data == class_map_inv[roi]] = 1 @@ -301,8 +301,8 @@ def totalsegmentator(input, output, ml=False, nr_thr_resamp=1, nr_thr_saving=6, if not quiet: print("Generating rough body segmentation...") body_seg, _ = nnUNet_predict_image(input, None, 300, model="3d_fullres", folds=[0], trainer="nnUNetTrainer", tta=False, multilabel_image=True, resample=6.0, - crop=None, crop_path=None, task_name="body", nora_tag="None", preview=False, - save_binary=True, nr_threads_resampling=nr_thr_resamp, nr_threads_saving=1, + crop=None, crop_path=None, task_name="body", nora_tag="None", preview=False, + save_binary=True, nr_threads_resampling=nr_thr_resamp, nr_threads_saving=1, crop_addon=crop_addon, output_type=output_type, statistics=False, quiet=quiet, verbose=verbose, test=0, skip_saving=False, device=device) crop = body_seg @@ -311,10 +311,10 @@ def totalsegmentator(input, output, ml=False, nr_thr_resamp=1, nr_thr_saving=6, folds = [0] # None seg_img, ct_img = nnUNet_predict_image(input, output, task_id, model=model, folds=folds, trainer=trainer, tta=False, multilabel_image=ml, resample=resample, - crop=crop, crop_path=crop_path, task_name=task, nora_tag=nora_tag, preview=preview, - nr_threads_resampling=nr_thr_resamp, nr_threads_saving=nr_thr_saving, + crop=crop, crop_path=crop_path, task_name=task, nora_tag=nora_tag, preview=preview, + nr_threads_resampling=nr_thr_resamp, nr_threads_saving=nr_thr_saving, force_split=force_split, crop_addon=crop_addon, roi_subset=roi_subset, - output_type=output_type, statistics=statistics_fast, + output_type=output_type, statistics=statistics_fast, quiet=quiet, verbose=verbose, test=test, skip_saving=skip_saving, device=device, exclude_masks_at_border=statistics_exclude_masks_at_border, no_derived_masks=no_derived_masks, v1_order=v1_order) @@ -322,7 +322,7 @@ def totalsegmentator(input, output, ml=False, nr_thr_resamp=1, nr_thr_saving=6, config = increase_prediction_counter() send_usage_stats(config, {"task": task, "fast": fast, "preview": preview, - "multilabel": ml, "roi_subset": roi_subset, + "multilabel": ml, "roi_subset": roi_subset, "statistics": statistics, "radiomics": radiomics}) if statistics: @@ -338,7 +338,7 @@ def totalsegmentator(input, output, ml=False, nr_thr_resamp=1, nr_thr_saving=6, raise ValueError("Radiomics not supported for multilabel segmentation. Use without --ml option.") if img_type == "dicom": raise ValueError("Radiomics not supported for DICOM input. Use nifti input.") - if not quiet: print("Calculating radiomics...") + if not quiet: print("Calculating radiomics...") st = time.time() stats_dir = output.parent if ml else output get_radiomics_features_for_entire_dir(input, output, stats_dir / "statistics_radiomics.json") diff --git a/totalsegmentator/resampling.py b/totalsegmentator/resampling.py index fa7891ca0..e162d65e6 100644 --- a/totalsegmentator/resampling.py +++ b/totalsegmentator/resampling.py @@ -29,8 +29,8 @@ def resample_img(img, zoom=0.5, order=0, nr_cpus=-1): Resize numpy image array to new size. Faster than resample_img_nnunet. - Resample_img_nnunet maybe slighlty better quality on CT (but not sure). - + Resample_img_nnunet maybe slightly better quality on CT (but not sure). + Works for 2D and 3D and 4D images. """ def _process_gradient(grad_idx): @@ -38,10 +38,10 @@ def _process_gradient(grad_idx): dim = len(img.shape) - # Add dimesions to make each input 4D - if dim == 2: + # Add dimensions to make each input 4D + if dim == 2: img = img[..., None, None] - if dim == 3: + if dim == 3: img = img[..., None] nr_cpus = psutil.cpu_count() if nr_cpus == -1 else nr_cpus @@ -58,7 +58,7 @@ def _process_gradient(grad_idx): def resample_img_cucim(img, zoom=0.5, order=0, nr_cpus=-1): """ - Completely speedup of resampling compare to non-gpu version not as big, because much time is lost in + Completely speedup of resampling compare to non-gpu version not as big, because much time is lost in loading the file and then in copying to the GPU. For small image no significant speedup. @@ -91,7 +91,7 @@ def resample_img_nnunet(data, mask=None, original_spacing=1.0, target_spacing=2. [x,y,z], [x,y,z] """ from .resample_nnunet import resample_patient - + if type(original_spacing) is float: original_spacing = [original_spacing,] * 3 original_spacing = np.array(original_spacing) @@ -126,12 +126,12 @@ def change_spacing(img_in, new_spacing=1.25, target_shape=None, order=0, nr_cpus nnunet_resample=False, dtype=None, remove_negative=False, force_affine=None): """ Resample nifti image to the new spacing (uses resample_img() internally). - + img_in: nifti image new_spacing: float or sequence of float target_shape: sequence of int (optional) order: resample order (optional) - nnunet_resample: nnunet resampling will use order=0 sampling for z if very anisotropic. Sometimes results + nnunet_resample: nnunet resampling will use order=0 sampling for z if very anisotropic. Sometimes results in a little bit less blurry results dtype: output datatype remove_negative: set all negative values to 0. Useful if resampling introduced negative values. @@ -144,8 +144,8 @@ def change_spacing(img_in, new_spacing=1.25, target_shape=None, order=0, nr_cpus a shape which is +-1 compared to original shape, because of rounding of the shape to int. To avoid this the exact output shape can be provided. Then new_spacing will be ignored and the exact spacing will be calculated which is needed to get to target_shape. - In this case however the calculated spacing can be slighlty different from the desired new_spacing. This will - result in a slightly different affine. To avoid this the desired affine can be writen by force with "force_affine". + In this case however the calculated spacing can be slightly different from the desired new_spacing. This will + result in a slightly different affine. To avoid this the desired affine can be written by force with "force_affine". Note: Only works properly if affine is all 0 except for diagonal and offset (=no rotation and sheering) """ @@ -166,8 +166,8 @@ def change_spacing(img_in, new_spacing=1.25, target_shape=None, order=0, nr_cpus if target_shape is not None: # Find the right zoom to exactly reach the target_shape. # We also have to adapt the spacing to this new zoom. - zoom = np.array(target_shape) / old_shape - new_spacing = img_spacing / zoom + zoom = np.array(target_shape) / old_shape + new_spacing = img_spacing / zoom else: zoom = img_spacing / new_spacing @@ -198,7 +198,7 @@ def change_spacing(img_in, new_spacing=1.25, target_shape=None, order=0, nr_cpus new_data = resample_img_cucim(data, zoom=zoom, order=order, nr_cpus=nr_cpus) # gpu resampling else: new_data = resample_img(data, zoom=zoom, order=order, nr_cpus=nr_cpus) # cpu resampling - + if remove_negative: new_data[new_data < 1e-4] = 0 @@ -219,8 +219,8 @@ def change_spacing(img_in, new_spacing=1.25, target_shape=None, order=0, nr_cpus # if __name__ == "__main__": # args = sys.argv[1:] -# file_in = Path(args[0]) -# file_out = Path(args[1]) +# file_in = Path(args[0]) +# file_out = Path(args[1]) # # spacing in mm # x = float(args[2]) diff --git a/totalsegmentator/statistics.py b/totalsegmentator/statistics.py index 15a893569..5b5bd1866 100644 --- a/totalsegmentator/statistics.py +++ b/totalsegmentator/statistics.py @@ -20,7 +20,7 @@ def get_radiomics_features(seg_file, img_file="ct.nii.gz"): from radiomics import featureextractor standard_features = ['shape_Elongation', 'shape_Flatness', 'shape_LeastAxisLength', 'shape_MajorAxisLength', 'shape_Maximum2DDiameterColumn', 'shape_Maximum2DDiameterRow', 'shape_Maximum2DDiameterSlice', 'shape_Maximum3DDiameter', 'shape_MeshVolume', 'shape_MinorAxisLength', 'shape_Sphericity', 'shape_SurfaceArea', 'shape_SurfaceVolumeRatio', 'shape_VoxelVolume', 'firstorder_10Percentile', 'firstorder_90Percentile', 'firstorder_Energy', 'firstorder_Entropy', 'firstorder_InterquartileRange', 'firstorder_Kurtosis', 'firstorder_Maximum', 'firstorder_MeanAbsoluteDeviation', 'firstorder_Mean', 'firstorder_Median', 'firstorder_Minimum', 'firstorder_Range', 'firstorder_RobustMeanAbsoluteDeviation', 'firstorder_RootMeanSquared', 'firstorder_Skewness', 'firstorder_TotalEnergy', 'firstorder_Uniformity', 'firstorder_Variance', 'glcm_Autocorrelation', 'glcm_ClusterProminence', 'glcm_ClusterShade', 'glcm_ClusterTendency', 'glcm_Contrast', 'glcm_Correlation', 'glcm_DifferenceAverage', 'glcm_DifferenceEntropy', 'glcm_DifferenceVariance', 'glcm_Id', 'glcm_Idm', 'glcm_Idmn', 'glcm_Idn', 'glcm_Imc1', 'glcm_Imc2', 'glcm_InverseVariance', 'glcm_JointAverage', 'glcm_JointEnergy', 'glcm_JointEntropy', 'glcm_MCC', 'glcm_MaximumProbability', 'glcm_SumAverage', 'glcm_SumEntropy', 'glcm_SumSquares', 'gldm_DependenceEntropy', 'gldm_DependenceNonUniformity', 'gldm_DependenceNonUniformityNormalized', 'gldm_DependenceVariance', 'gldm_GrayLevelNonUniformity', 'gldm_GrayLevelVariance', 'gldm_HighGrayLevelEmphasis', 'gldm_LargeDependenceEmphasis', 'gldm_LargeDependenceHighGrayLevelEmphasis', 'gldm_LargeDependenceLowGrayLevelEmphasis', 'gldm_LowGrayLevelEmphasis', 'gldm_SmallDependenceEmphasis', 'gldm_SmallDependenceHighGrayLevelEmphasis', 'gldm_SmallDependenceLowGrayLevelEmphasis', 'glrlm_GrayLevelNonUniformity', 'glrlm_GrayLevelNonUniformityNormalized', 'glrlm_GrayLevelVariance', 'glrlm_HighGrayLevelRunEmphasis', 'glrlm_LongRunEmphasis', 'glrlm_LongRunHighGrayLevelEmphasis', 'glrlm_LongRunLowGrayLevelEmphasis', 'glrlm_LowGrayLevelRunEmphasis', 'glrlm_RunEntropy', 'glrlm_RunLengthNonUniformity', 'glrlm_RunLengthNonUniformityNormalized', 'glrlm_RunPercentage', 'glrlm_RunVariance', 'glrlm_ShortRunEmphasis', 'glrlm_ShortRunHighGrayLevelEmphasis', 'glrlm_ShortRunLowGrayLevelEmphasis', 'glszm_GrayLevelNonUniformity', 'glszm_GrayLevelNonUniformityNormalized', 'glszm_GrayLevelVariance', 'glszm_HighGrayLevelZoneEmphasis', 'glszm_LargeAreaEmphasis', 'glszm_LargeAreaHighGrayLevelEmphasis', 'glszm_LargeAreaLowGrayLevelEmphasis', 'glszm_LowGrayLevelZoneEmphasis', 'glszm_SizeZoneNonUniformity', 'glszm_SizeZoneNonUniformityNormalized', 'glszm_SmallAreaEmphasis', 'glszm_SmallAreaHighGrayLevelEmphasis', 'glszm_SmallAreaLowGrayLevelEmphasis', 'glszm_ZoneEntropy', 'glszm_ZonePercentage', 'glszm_ZoneVariance', 'ngtdm_Busyness', 'ngtdm_Coarseness', 'ngtdm_Complexity', 'ngtdm_Contrast', 'ngtdm_Strength'] - + try: if len(np.unique(nib.load(seg_file).get_fdata())) > 1: settings = {} @@ -36,7 +36,7 @@ def get_radiomics_features(seg_file, img_file="ct.nii.gz"): extractor.enableFeatureClassByName("shape") extractor.enableFeatureClassByName("firstorder") features = extractor.execute(str(img_file), str(seg_file)) - + features = {k.replace("original_", ""): v for k, v in features.items() if k.startswith("original_")} else: print("WARNING: Entire mask is 0 or 1. Setting all features to 0") @@ -47,10 +47,10 @@ def get_radiomics_features(seg_file, img_file="ct.nii.gz"): # only keep subset of features # meaningful_features = ['shape_Elongation', 'shape_Flatness', 'shape_LeastAxisLength'] - # features = {k: v for k, v in features.items() if k in meaningful_features} + # features = {k: v for k, v in features.items() if k in meaningful_features} features = {k: round(float(v), 4) for k, v in features.items()} # round to 4 decimals and cast to python float - + return seg_file.name.split(".")[0], features @@ -113,9 +113,9 @@ def get_basic_statistics(seg: np.array, ct_file, file_out: Path, quiet: bool = F roi_mask = (data > 0).astype(np.uint8) # 0.16s # stats[mask_name]["intensity"] = ct[roi_mask > 0].mean().round(2) if roi_mask.sum() > 0 else 0.0 # 3.0s stats[mask_name]["intensity"] = np.average(ct, weights=roi_mask).round(2) if roi_mask.sum() > 0 else 0.0 # 0.9s - + # For nora json is good # For other people csv might be better -> not really because here only for one subject each -> use json with open(file_out, "w") as f: json.dump(stats, f, indent=4) - +