From 4c7d7c342bf06801c40cd2d4f837073755aa5256 Mon Sep 17 00:00:00 2001 From: sahusiddharth <112792547+sahusiddharth@users.noreply.github.com> Date: Mon, 18 Mar 2024 18:49:33 +0530 Subject: [PATCH] Refactored-assert-statements-with-explicit-error-handling (#1825) * Refactored-assert-statements-with-explicit-error-handling Signed-off-by: sahusiddharth * fixed ruff Signed-off-by: sahusiddharth * fixed logic in mvtec Signed-off-by: sahusiddharth --------- Signed-off-by: sahusiddharth --- src/anomalib/callbacks/nncf/utils.py | 34 ++++++++++----- src/anomalib/data/base/dataset.py | 24 ++++++++--- src/anomalib/data/base/video.py | 22 +++++++--- src/anomalib/data/depth/folder_3d.py | 31 ++++++++----- src/anomalib/data/depth/mvtec_3d.py | 24 +++++++---- src/anomalib/data/errors.py | 19 ++++++++ src/anomalib/data/image/folder.py | 16 ++++--- src/anomalib/data/image/kolektor.py | 11 +++-- src/anomalib/data/image/mvtec.py | 18 ++++---- src/anomalib/data/utils/split.py | 17 ++++---- src/anomalib/data/utils/synthetic.py | 14 ++++-- .../deploy/inferencers/openvino_inferencer.py | 4 +- src/anomalib/engine/engine.py | 6 +-- src/anomalib/metrics/__init__.py | 43 +++++++++++-------- .../components/feature_extractors/torchfx.py | 5 ++- .../components/flow/all_in_one_block.py | 7 +-- .../models/image/cflow/lightning_model.py | 4 +- .../models/image/csflow/lightning_model.py | 5 ++- .../image/efficient_ad/lightning_model.py | 4 +- .../models/image/fastflow/lightning_model.py | 5 ++- .../models/image/ganomaly/lightning_model.py | 5 ++- .../models/image/padim/torch_model.py | 6 +-- .../reverse_distillation/lightning_model.py | 5 ++- .../models/image/uflow/feature_extraction.py | 5 ++- .../models/image/uflow/lightning_model.py | 5 ++- .../models/video/ai_vad/clip/model.py | 5 ++- src/anomalib/models/video/ai_vad/density.py | 4 +- src/anomalib/models/video/ai_vad/features.py | 6 +-- src/anomalib/utils/config.py | 5 ++- src/anomalib/utils/visualization/image.py | 15 +++++-- tests/helpers/data.py | 2 +- 31 files changed, 259 insertions(+), 117 deletions(-) create mode 100644 src/anomalib/data/errors.py diff --git a/src/anomalib/callbacks/nncf/utils.py b/src/anomalib/callbacks/nncf/utils.py index 7818d06b6b..5a41c59ce1 100644 --- a/src/anomalib/callbacks/nncf/utils.py +++ b/src/anomalib/callbacks/nncf/utils.py @@ -129,20 +129,26 @@ def compose_nncf_config(nncf_config: dict, enabled_options: list[str]) -> dict: # So, user can define `order_of_parts` in the optimisation_config # to specify the order of applying the parts. order_of_parts = optimisation_parts["order_of_parts"] - assert isinstance(order_of_parts, list), 'The field "order_of_parts" in optimisation config should be a list' + if not isinstance(order_of_parts, list): + msg = 'The field "order_of_parts" in optimization config should be a list' + raise TypeError(msg) for part in enabled_options: - assert ( - part in order_of_parts - ), f"The part {part} is selected, but it is absent in order_of_parts={order_of_parts}" + if part not in order_of_parts: + msg = f"The part {part} is selected, but it is absent in order_of_parts={order_of_parts}" + raise ValueError(msg) optimisation_parts_to_choose = [part for part in order_of_parts if part in enabled_options] - assert "base" in optimisation_parts, 'Error: the optimisation config does not contain the "base" part' + if "base" not in optimisation_parts: + msg = 'Error: the optimisation config does not contain the "base" part' + raise KeyError(msg) nncf_config_part = optimisation_parts["base"] for part in optimisation_parts_to_choose: - assert part in optimisation_parts, f'Error: the optimisation config does not contain the part "{part}"' + if part not in optimisation_parts: + msg = f'Error: the optimisation config does not contain the part "{part}"' + raise KeyError(msg) optimisation_part_dict = optimisation_parts[part] try: nncf_config_part = merge_dicts_and_lists_b_into_a(nncf_config_part, optimisation_part_dict) @@ -205,9 +211,16 @@ def _err_str(_a: dict | list, _b: dict | list, _key: int | str | None = None) -> f" type(b) = {type(_b)}" ) - assert isinstance(a, dict | list), f"Can merge only dicts and lists, whereas type(a)={type(a)}" - assert isinstance(b, dict | list), _err_str(a, b, cur_key) - assert isinstance(a, list) == isinstance(b, list), _err_str(a, b, cur_key) + if not (isinstance(a, dict | list)): + msg = f"Can merge only dicts and lists, whereas type(a)={type(a)}" + raise TypeError(msg) + + if not (isinstance(b, dict | list)): + raise TypeError(_err_str(a, b, cur_key)) + + if (isinstance(a, list) and not isinstance(b, list)) or (isinstance(b, list) and not isinstance(a, list)): + raise TypeError(_err_str(a, b, cur_key)) + if isinstance(a, list) and isinstance(b, list): # the main diff w.r.t. mmcf.Config -- merging of lists return a + b @@ -222,7 +235,8 @@ def _err_str(_a: dict | list, _b: dict | list, _key: int | str | None = None) -> a[k] = _merge_dicts_and_lists_b_into_a(a[k], b[k], new_cur_key) continue - assert not isinstance(b[k], dict | list), _err_str(a[k], b[k], new_cur_key) + if any(isinstance(b[k], t) for t in [dict, list]): + raise TypeError(_err_str(a[k], b[k], new_cur_key)) # suppose here that a[k] and b[k] are scalars, just overwrite a[k] = b[k] diff --git a/src/anomalib/data/base/dataset.py b/src/anomalib/data/base/dataset.py index 0635782b3f..14d239cc33 100644 --- a/src/anomalib/data/base/dataset.py +++ b/src/anomalib/data/base/dataset.py @@ -92,7 +92,9 @@ def subsample(self, indices: Sequence[int], inplace: bool = False) -> "AnomalibD inplace (bool): When true, the subsampling will be performed on the instance itself. Defaults to ``False``. """ - assert len(set(indices)) == len(indices), "No duplicates allowed in indices." + if len(set(indices)) != len(indices): + msg = "No duplicates allowed in indices." + raise ValueError(msg) dataset = self if inplace else copy.deepcopy(self) dataset.samples = self.samples.iloc[indices].reset_index(drop=True) return dataset @@ -116,12 +118,18 @@ def samples(self, samples: DataFrame) -> None: samples (DataFrame): DataFrame with new samples. """ # validate the passed samples by checking the - assert isinstance(samples, DataFrame), f"samples must be a pandas.DataFrame, found {type(samples)}" + if not isinstance(samples, DataFrame): + msg = f"samples must be a pandas.DataFrame, found {type(samples)}" + raise TypeError(msg) + expected_columns = _EXPECTED_COLUMNS_PERTASK[self.task] - assert all( - col in samples.columns for col in expected_columns - ), f"samples must have (at least) columns {expected_columns}, found {samples.columns}" - assert samples["image_path"].apply(lambda p: Path(p).exists()).all(), "missing file path(s) in samples" + if not all(col in samples.columns for col in expected_columns): + msg = f"samples must have (at least) columns {expected_columns}, found {samples.columns}" + raise ValueError(msg) + + if not samples["image_path"].apply(lambda p: Path(p).exists()).all(): + msg = "missing file path(s) in samples" + raise FileNotFoundError(msg) self._samples = samples.sort_values(by="image_path", ignore_index=True) @@ -193,7 +201,9 @@ def __add__(self, other_dataset: "AnomalibDataset") -> "AnomalibDataset": Returns: AnomalibDataset: Concatenated dataset. """ - assert isinstance(other_dataset, self.__class__), "Cannot concatenate datasets that are not of the same type." + if not isinstance(other_dataset, self.__class__): + msg = "Cannot concatenate datasets that are not of the same type." + raise TypeError(msg) dataset = copy.deepcopy(self) dataset.samples = pd.concat([self.samples, other_dataset.samples], ignore_index=True) return dataset diff --git a/src/anomalib/data/base/video.py b/src/anomalib/data/base/video.py index 72735d078c..7e9461a0a9 100644 --- a/src/anomalib/data/base/video.py +++ b/src/anomalib/data/base/video.py @@ -68,7 +68,9 @@ def __init__( def __len__(self) -> int: """Get length of the dataset.""" - assert isinstance(self.indexer, ClipsIndexer) + if not isinstance(self.indexer, ClipsIndexer): + msg = "self.indexer must be an instance of ClipsIndexer." + raise TypeError(msg) return self.indexer.num_clips() @property @@ -94,7 +96,9 @@ def _setup_clips(self) -> None: Should be called after each change to self._samples """ - assert callable(self.indexer_cls) + if not callable(self.indexer_cls): + msg = "self.indexer_cls must be callable." + raise TypeError(msg) self.indexer = self.indexer_cls( # pylint: disable=not-callable video_paths=list(self.samples.image_path), mask_paths=list(self.samples.mask_path), @@ -145,8 +149,9 @@ def __getitem__(self, index: int) -> dict[str, str | torch.Tensor]: Returns: dict[str, str | torch.Tensor]: Dictionary containing the mask, clip and file system information. """ - assert isinstance(self.indexer, ClipsIndexer) - + if not isinstance(self.indexer, ClipsIndexer): + msg = "self.indexer must be an instance of ClipsIndexer." + raise TypeError(msg) item = self.indexer.get_item(index) # include the untransformed image for visualization item["original_image"] = item["image"].to(torch.uint8) @@ -185,8 +190,13 @@ def _setup(self, _stage: str | None = None) -> None: Video datamodules are not compatible with synthetic anomaly generation. """ - assert self.train_data is not None - assert self.test_data is not None + if self.train_data is None: + msg = "self.train_data cannot be None." + raise ValueError(msg) + + if self.test_data is None: + msg = "self.test_data cannot be None." + raise ValueError(msg) self.train_data.setup() self.test_data.setup() diff --git a/src/anomalib/data/depth/folder_3d.py b/src/anomalib/data/depth/folder_3d.py index a1fab24591..39d8ccab76 100644 --- a/src/anomalib/data/depth/folder_3d.py +++ b/src/anomalib/data/depth/folder_3d.py @@ -14,6 +14,7 @@ from anomalib import TaskType from anomalib.data.base import AnomalibDataModule, AnomalibDepthDataset +from anomalib.data.errors import MisMatchError from anomalib.data.utils import ( DirType, LabelName, @@ -24,7 +25,7 @@ from anomalib.data.utils.path import _prepare_files_labels, validate_and_resolve_path -def make_folder3d_dataset( +def make_folder3d_dataset( # noqa: C901 normal_dir: str | Path, root: str | Path | None = None, abnormal_dir: str | Path | None = None, @@ -74,7 +75,9 @@ def make_folder3d_dataset( abnormal_depth_dir = validate_and_resolve_path(abnormal_depth_dir, root) if abnormal_depth_dir else None normal_test_depth_dir = validate_and_resolve_path(normal_test_depth_dir, root) if normal_test_depth_dir else None - assert normal_dir.is_dir(), "A folder location must be provided in normal_dir." + if not normal_dir.is_dir(): + msg = "A folder location must be provided in normal_dir." + raise ValueError(msg) filenames = [] labels = [] @@ -129,17 +132,23 @@ def make_folder3d_dataset( ].image_path.to_numpy() # make sure every rgb image has a corresponding depth image and that the file exists - assert ( + mismatch = ( samples.loc[samples.label_index == LabelName.ABNORMAL] .apply(lambda x: Path(x.image_path).stem in Path(x.depth_path).stem, axis=1) .all() - ), "Mismatch between anomalous images and depth images. Make sure the mask files in 'xyz' \ - folder follow the same naming convention as the anomalous images in the dataset \ - (e.g. image: '000.png', depth: '000.tiff')." + ) + if not mismatch: + msg = """Mismatch between anomalous images and depth images. Make sure the mask files + in 'xyz' folder follow the same naming convention as the anomalous images in the dataset + (e.g. image: '000.png', depth: '000.tiff').""" + raise MisMatchError(msg) - assert samples.depth_path.apply( + missing_depth_files = samples.depth_path.apply( lambda x: Path(x).exists() if not isna(x) else True, - ).all(), "missing depth image files" + ).all() + if not missing_depth_files: + msg = "Missing depth image files." + raise FileNotFoundError(msg) samples = samples.astype({"depth_path": "str"}) @@ -152,9 +161,11 @@ def make_folder3d_dataset( samples = samples.astype({"mask_path": "str"}) # make sure all the files exist - assert samples.mask_path.apply( + if not samples.mask_path.apply( lambda x: Path(x).exists() if x != "" else True, - ).all(), f"missing mask files, mask_dir={mask_dir}" + ).all(): + msg = f"Missing mask files. mask_dir={mask_dir}" + raise FileNotFoundError(msg) else: samples["mask_path"] = "" diff --git a/src/anomalib/data/depth/mvtec_3d.py b/src/anomalib/data/depth/mvtec_3d.py index da2c9a976e..7ac09cf5e7 100644 --- a/src/anomalib/data/depth/mvtec_3d.py +++ b/src/anomalib/data/depth/mvtec_3d.py @@ -29,6 +29,7 @@ from anomalib import TaskType from anomalib.data.base import AnomalibDataModule, AnomalibDepthDataset +from anomalib.data.errors import MisMatchError from anomalib.data.utils import ( DownloadInfo, LabelName, @@ -146,22 +147,27 @@ def make_mvtec_3d_dataset( samples = samples.astype({"image_path": "str", "mask_path": "str", "depth_path": "str"}) # assert that the right mask files are associated with the right test images - assert ( + mismatch_masks = ( samples.loc[samples.label_index == LabelName.ABNORMAL] .apply(lambda x: Path(x.image_path).stem in Path(x.mask_path).stem, axis=1) .all() - ), "Mismatch between anomalous images and ground truth masks. Make sure the mask files in 'ground_truth' \ - folder follow the same naming convention as the anomalous images in the dataset (e.g. image: '000.png', \ - mask: '000.png' or '000_mask.png')." + ) + if not mismatch_masks: + msg = """Mismatch between anomalous images and ground truth masks. Make sure the mask files + in 'ground_truth' folder follow the same naming convention as the anomalous images in + the dataset (e.g. image: '000.png', mask: '000.png' or '000_mask.png').""" + raise MisMatchError(msg) - # assert that the right depth image files are associated with the right test images - assert ( + mismatch_depth = ( samples.loc[samples.label_index == LabelName.ABNORMAL] .apply(lambda x: Path(x.image_path).stem in Path(x.depth_path).stem, axis=1) .all() - ), "Mismatch between anomalous images and depth images. Make sure the mask files in 'xyz' \ - folder follow the same naming convention as the anomalous images in the dataset (e.g. image: '000.png', \ - depth: '000.tiff')." + ) + if not mismatch_depth: + msg = """Mismatch between anomalous images and depth images. Make sure the mask files in + 'xyz' folder follow the same naming convention as the anomalous images in the dataset + (e.g. image: '000.png', depth: '000.tiff').""" + raise MisMatchError(msg) if split: samples = samples[samples.split == split].reset_index(drop=True) diff --git a/src/anomalib/data/errors.py b/src/anomalib/data/errors.py new file mode 100644 index 0000000000..97c956663c --- /dev/null +++ b/src/anomalib/data/errors.py @@ -0,0 +1,19 @@ +"""Custom Exception Class for Mismatch Detection (MisMatchError).""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +class MisMatchError(Exception): + """Exception raised when a mismatch is detected. + + Attributes: + message (str): Explanation of the error. + """ + + def __init__(self, message: str = "") -> None: + if message: + self.message = message + else: + self.message = "Mismatch detected." + super().__init__(self.message) diff --git a/src/anomalib/data/image/folder.py b/src/anomalib/data/image/folder.py index 4b52bd691a..afbb583618 100644 --- a/src/anomalib/data/image/folder.py +++ b/src/anomalib/data/image/folder.py @@ -15,6 +15,7 @@ from anomalib import TaskType from anomalib.data.base import AnomalibDataModule, AnomalibDataset +from anomalib.data.errors import MisMatchError from anomalib.data.utils import ( DirType, LabelName, @@ -102,7 +103,9 @@ def _resolve_path_and_convert_to_list(path: str | Path | Sequence[str | Path] | abnormal_dir = _resolve_path_and_convert_to_list(abnormal_dir) normal_test_dir = _resolve_path_and_convert_to_list(normal_test_dir) mask_dir = _resolve_path_and_convert_to_list(mask_dir) - assert len(normal_dir) > 0, "A folder location must be provided in normal_dir." + if len(normal_dir) == 0: + msg = "A folder location must be provided in normal_dir." + raise ValueError(msg) filenames = [] labels = [] @@ -144,13 +147,16 @@ def _resolve_path_and_convert_to_list(path: str | Path | Sequence[str | Path] | samples = samples.astype({"mask_path": "str"}) # make sure all every rgb image has a corresponding mask image. - assert ( + if not ( samples.loc[samples.label_index == LabelName.ABNORMAL] .apply(lambda x: Path(x.image_path).stem in Path(x.mask_path).stem, axis=1) .all() - ), "Mismatch between anomalous images and mask images. Make sure the mask files \ - folder follow the same naming convention as the anomalous images in the dataset \ - (e.g. image: '000.png', mask: '000.png')." + ): + msg = """Mismatch between anomalous images and mask images. Make sure the mask files " + "folder follow the same naming convention as the anomalous images in the dataset " + "(e.g. image: '000.png', mask: '000.png').""" + raise MisMatchError(msg) + else: samples["mask_path"] = "" diff --git a/src/anomalib/data/image/kolektor.py b/src/anomalib/data/image/kolektor.py index 871fb94e89..049c770c45 100644 --- a/src/anomalib/data/image/kolektor.py +++ b/src/anomalib/data/image/kolektor.py @@ -28,6 +28,7 @@ from anomalib import TaskType from anomalib.data.base import AnomalibDataModule, AnomalibDataset +from anomalib.data.errors import MisMatchError from anomalib.data.utils import ( DownloadInfo, Split, @@ -165,13 +166,15 @@ def make_kolektor_dataset( samples = samples[["path", "item", "split", "label", "image_path", "mask_path", "label_index"]] # assert that the right mask files are associated with the right test images - assert ( + if not ( samples.loc[samples.label_index == 1] .apply(lambda x: Path(x.image_path).stem in Path(x.mask_path).stem, axis=1) .all() - ), "Mismatch between anomalous images and ground truth masks. Make sure the mask files \ - follow the same naming convention as the anomalous images in the dataset (e.g. image: 'Part0.jpg', \ - mask: 'Part0_label.bmp')." + ): + msg = """Mismatch between anomalous images and ground truth masks. Make sure the mask files + follow the same naming convention as the anomalous images in the dataset + (e.g. image: 'Part0.jpg', mask: 'Part0_label.bmp').""" + raise MisMatchError(msg) # Get the dataframe for the required split if split: diff --git a/src/anomalib/data/image/mvtec.py b/src/anomalib/data/image/mvtec.py index c23add93ab..ffb657233f 100644 --- a/src/anomalib/data/image/mvtec.py +++ b/src/anomalib/data/image/mvtec.py @@ -35,6 +35,7 @@ from anomalib import TaskType from anomalib.data.base import AnomalibDataModule, AnomalibDataset +from anomalib.data.errors import MisMatchError from anomalib.data.utils import ( DownloadInfo, LabelName, @@ -154,14 +155,15 @@ def make_mvtec_dataset( ] = mask_samples.image_path.to_numpy() # assert that the right mask files are associated with the right test images - if len(samples.loc[samples.label_index == LabelName.ABNORMAL]): - assert ( - samples.loc[samples.label_index == LabelName.ABNORMAL] - .apply(lambda x: Path(x.image_path).stem in Path(x.mask_path).stem, axis=1) - .all() - ), "Mismatch between anomalous images and ground truth masks. Make sure the mask files in 'ground_truth' \ - folder follow the same naming convention as the anomalous images in the dataset (e.g. image: \ - '000.png', mask: '000.png' or '000_mask.png')." + abnormal_samples = samples.loc[samples.label_index == LabelName.ABNORMAL] + if ( + len(abnormal_samples) + and not abnormal_samples.apply(lambda x: Path(x.image_path).stem in Path(x.mask_path).stem, axis=1).all() + ): + msg = """Mismatch between anomalous images and ground truth masks. Make sure t + he mask files in 'ground_truth' folder follow the same naming convention as the + anomalous images in the dataset (e.g. image: '000.png', mask: '000.png' or '000_mask.png').""" + raise MisMatchError(msg) if split: samples = samples[samples.split == split].reset_index(drop=True) diff --git a/src/anomalib/data/utils/split.py b/src/anomalib/data/utils/split.py index 27d1b4d770..aefa2bb1e6 100644 --- a/src/anomalib/data/utils/split.py +++ b/src/anomalib/data/utils/split.py @@ -86,10 +86,13 @@ def random_split( if isinstance(split_ratio, float): split_ratio = [1 - split_ratio, split_ratio] - assert ( # noqa: PT018 - math.isclose(sum(split_ratio), 1) and sum(split_ratio) <= 1 - ), f"split ratios must sum to 1, found {sum(split_ratio)}" - assert all(0 < ratio < 1 for ratio in split_ratio), f"all split ratios must be between 0 and 1, found {split_ratio}" + if not (math.isclose(sum(split_ratio), 1) and sum(split_ratio) <= 1): + msg = f"Split ratios must sum to 1, found {sum(split_ratio)}" + raise ValueError(msg) + + if not all(0 < ratio < 1 for ratio in split_ratio): + msg = f"All split ratios must be between 0 and 1, found {split_ratio}" + raise ValueError(msg) # create list of source data if label_aware and "label_index" in dataset.samples: @@ -108,10 +111,8 @@ def random_split( subset_idx = i % sum(subset_lengths) subset_lengths[subset_idx] += 1 if 0 in subset_lengths: - msg = ( - "Zero subset length encountered during splitting. This means one of your subsets might be" - " empty or devoid of either normal or anomalous images.", - ) + msg = """Zero subset length encountered during splitting. This means one of your subsets + might be empty or devoid of either normal or anomalous images.""" logger.warning(msg) # perform random subsampling diff --git a/src/anomalib/data/utils/synthetic.py b/src/anomalib/data/utils/synthetic.py index e782024ebf..67b8dcef99 100644 --- a/src/anomalib/data/utils/synthetic.py +++ b/src/anomalib/data/utils/synthetic.py @@ -46,9 +46,17 @@ def make_synthetic_dataset( mask_dir (Path): Directory to which the ground truth anomaly masks will be written. anomalous_ratio (float): Fraction of source samples that will be converted into anomalous samples. """ - assert 1 not in source_samples.label_index.to_numpy(), "All source images must be normal." - assert image_dir.is_dir(), f"{image_dir} is not a folder." - assert mask_dir.is_dir(), f"{mask_dir} is not a folder" + if 1 in source_samples.label_index.to_numpy(): + msg = "All source images must be normal." + raise ValueError(msg) + + if not image_dir.is_dir(): + msg = f"{image_dir} is not a folder." + raise NotADirectoryError(msg) + + if not mask_dir.is_dir(): + msg = f"{mask_dir} is not a folder." + raise NotADirectoryError(msg) # filter relevant columns source_samples = source_samples.filter(["image_path", "label", "label_index", "mask_path", "split"]) diff --git a/src/anomalib/deploy/inferencers/openvino_inferencer.py b/src/anomalib/deploy/inferencers/openvino_inferencer.py index 1fcf846ee9..62d4e797d0 100644 --- a/src/anomalib/deploy/inferencers/openvino_inferencer.py +++ b/src/anomalib/deploy/inferencers/openvino_inferencer.py @@ -261,7 +261,9 @@ def post_process(self, predictions: np.ndarray, metadata: dict | DictConfig | No anomaly_maps=anomaly_map, metadata=metadata, ) - assert anomaly_map is not None + if anomaly_map is None: + msg = "Anomaly map cannot be None." + raise ValueError(msg) if "image_shape" in metadata and anomaly_map.shape != metadata["image_shape"]: image_height = metadata["image_shape"][0] diff --git a/src/anomalib/engine/engine.py b/src/anomalib/engine/engine.py index 2f2f14279b..48ad62bd41 100644 --- a/src/anomalib/engine/engine.py +++ b/src/anomalib/engine/engine.py @@ -725,9 +725,9 @@ def predict( anomalib predict --model Padim --data --ckpt_path ``` """ - assert ( - model or self.model - ), "`Engine.predict()` requires an `AnomalyModule` when it hasn't been passed in a previous run." + if not (model or self.model): + msg = "`Engine.predict()` requires an `AnomalyModule` when it hasn't been passed in a previous run." + raise ValueError(msg) if ckpt_path: ckpt_path = Path(ckpt_path).resolve() diff --git a/src/anomalib/metrics/__init__.py b/src/anomalib/metrics/__init__.py index 544e6fbf6f..86b007aba9 100644 --- a/src/anomalib/metrics/__init__.py +++ b/src/anomalib/metrics/__init__.py @@ -77,29 +77,33 @@ def _validate_metrics_dict(metrics: dict[str, dict[str, Any]]) -> None: - have key init_args" and its value is of type dict). """ - assert all( - isinstance(metric, str) for metric in metrics - ), f"All keys (metric names) must be strings, found {sorted(metrics.keys())}" - assert all( - isinstance(metric, dict | DictConfig) for metric in metrics.values() - ), f"All values must be dictionaries, found {list(metrics.values())}" - assert all("class_path" in metric and isinstance(metric["class_path"], str) for metric in metrics.values()), ( - "All internal dictionaries must have a 'class_path' key whose value is of type str, " - f"found {list(metrics.values())}" - ) - assert all( - "init_args" in metric and isinstance(metric["init_args"], dict | DictConfig) for metric in metrics.values() - ), ( - "All internal dictionaries must have a 'init_args' key whose value is of type dict, " - f"found {list(metrics.values())}" - ) + if not all(isinstance(metric, str) for metric in metrics): + msg = f"All keys (metric names) must be strings, found {sorted(metrics.keys())}" + raise TypeError(msg) + + if not all(isinstance(metric, DictConfig | dict) for metric in metrics.values()): + msg = f"All values must be dictionaries, found {list(metrics.values())}" + raise TypeError(msg) + + if not all("class_path" in metric and isinstance(metric["class_path"], str) for metric in metrics.values()): + msg = "All internal dictionaries must have a 'class_path' key whose value is of type str." + raise ValueError(msg) + + if not all( + "init_args" in metric and isinstance(metric["init_args"], dict) or isinstance(metric["init_args"], DictConfig) + for metric in metrics.values() + ): + msg = "All internal dictionaries must have a 'init_args' key whose value is of type dict." + raise ValueError(msg) def _get_class_from_path(class_path: str) -> Callable: """Get a class from a module assuming the string format is `package.subpackage.module.ClassName`.""" module_name, class_name = class_path.rsplit(".", 1) module = importlib.import_module(module_name) - assert hasattr(module, class_name), f"Class {class_name} not found in module {module_name}" + if not hasattr(module, class_name): + msg = f"Class {class_name} not found in module {module_name}" + raise AttributeError(msg) return getattr(module, class_name) @@ -178,7 +182,10 @@ def create_metric_collection( # fallback is using the names if isinstance(metrics, ListConfig | list): - assert all(isinstance(metric, str) for metric in metrics), f"All metrics must be strings, found {metrics}" + if not all(isinstance(metric, str) for metric in metrics): + msg = f"All metrics must be strings, found {metrics}" + raise TypeError(msg) + return metric_collection_from_names(metrics, prefix) if isinstance(metrics, DictConfig | dict): diff --git a/src/anomalib/models/components/feature_extractors/torchfx.py b/src/anomalib/models/components/feature_extractors/torchfx.py index 00f8ad4fbf..a8c42632ab 100644 --- a/src/anomalib/models/components/feature_extractors/torchfx.py +++ b/src/anomalib/models/components/feature_extractors/torchfx.py @@ -170,7 +170,10 @@ class can be provided and it will try to load the weights from the provided weig if isinstance(weights, WeightsEnum): # torchvision models feature_extractor = create_feature_extractor(model=backbone_model, return_nodes=return_nodes) elif weights is not None: - assert isinstance(weights, str), "Weights should point to a path" + if not isinstance(weights, str): + msg = "Weights should point to a path" + raise TypeError(msg) + model_weights = torch.load(weights) if "state_dict" in model_weights: model_weights = model_weights["state_dict"] diff --git a/src/anomalib/models/components/flow/all_in_one_block.py b/src/anomalib/models/components/flow/all_in_one_block.py index b108b20892..0f517c2552 100644 --- a/src/anomalib/models/components/flow/all_in_one_block.py +++ b/src/anomalib/models/components/flow/all_in_one_block.py @@ -134,9 +134,10 @@ def __init__( self.conditional = False self.condition_channels = 0 else: - assert tuple(dims_c[0][1:]) == tuple( - dims_in[0][1:], - ), f"Dimensions of input and condition don't agree: {dims_c} vs {dims_in}." + if tuple(dims_c[0][1:]) != tuple(dims_in[0][1:]): + msg = f"Dimensions of input and condition don't agree: {dims_c} vs {dims_in}." + raise ValueError(msg) + self.conditional = True self.condition_channels = sum(dc[0] for dc in dims_c) diff --git a/src/anomalib/models/image/cflow/lightning_model.py b/src/anomalib/models/image/cflow/lightning_model.py index 63593dda68..8efdba044d 100644 --- a/src/anomalib/models/image/cflow/lightning_model.py +++ b/src/anomalib/models/image/cflow/lightning_model.py @@ -150,7 +150,9 @@ def training_step(self, batch: dict[str, str | torch.Tensor], *args, **kwargs) - decoder = self.model.decoders[layer_idx].to(images.device) fiber_batches = embedding_length // self.model.fiber_batch_size # number of fiber batches - assert fiber_batches > 0, "Make sure we have enough fibers, otherwise decrease N or batch-size!" + if fiber_batches <= 0: + msg = "Make sure we have enough fibers, otherwise decrease N or batch-size!" + raise ValueError(msg) for batch_num in range(fiber_batches): # per-fiber processing opt.zero_grad() diff --git a/src/anomalib/models/image/csflow/lightning_model.py b/src/anomalib/models/image/csflow/lightning_model.py index 58a71d540b..2b83ee9a87 100644 --- a/src/anomalib/models/image/csflow/lightning_model.py +++ b/src/anomalib/models/image/csflow/lightning_model.py @@ -59,7 +59,10 @@ def __init__( self.model: CsFlowModel def _setup(self) -> None: - assert self.input_size is not None, "Csflow needs input size to build torch model." + if self.input_size is None: + msg = "Csflow needs input size to build torch model." + raise ValueError(msg) + self.model = CsFlowModel( input_size=self.input_size, cross_conv_hidden_channels=self.cross_conv_hidden_channels, diff --git a/src/anomalib/models/image/efficient_ad/lightning_model.py b/src/anomalib/models/image/efficient_ad/lightning_model.py index 36cbba8dd6..6dc8c487ce 100644 --- a/src/anomalib/models/image/efficient_ad/lightning_model.py +++ b/src/anomalib/models/image/efficient_ad/lightning_model.py @@ -146,7 +146,9 @@ def teacher_channel_mean_std(self, dataloader: DataLoader) -> dict[str, torch.Te chanel_sum += torch.sum(y, dim=[0, 2, 3]) chanel_sum_sqr += torch.sum(y**2, dim=[0, 2, 3]) - assert n is not None + if n is None: + msg = "The value of 'n' cannot be None." + raise ValueError(msg) channel_mean = chanel_sum / n diff --git a/src/anomalib/models/image/fastflow/lightning_model.py b/src/anomalib/models/image/fastflow/lightning_model.py index caacf73ce7..23a8aabe25 100644 --- a/src/anomalib/models/image/fastflow/lightning_model.py +++ b/src/anomalib/models/image/fastflow/lightning_model.py @@ -59,7 +59,10 @@ def __init__( self.model: FastflowModel def _setup(self) -> None: - assert self.input_size is not None, "Fastflow needs input size to build torch model." + if self.input_size is None: + msg = "Fastflow needs input size to build torch model." + raise ValueError(msg) + self.model = FastflowModel( input_size=self.input_size, backbone=self.backbone, diff --git a/src/anomalib/models/image/ganomaly/lightning_model.py b/src/anomalib/models/image/ganomaly/lightning_model.py index 8279d65e1a..cdac166828 100644 --- a/src/anomalib/models/image/ganomaly/lightning_model.py +++ b/src/anomalib/models/image/ganomaly/lightning_model.py @@ -93,7 +93,10 @@ def __init__( self.model: GanomalyModel def _setup(self) -> None: - assert self.input_size is not None, "CSflow needs input size to build torch model." + if self.input_size is None: + msg = "CSflow needs input size to build torch model." + raise ValueError(msg) + self.model = GanomalyModel( input_size=self.input_size, num_input_channels=3, diff --git a/src/anomalib/models/image/padim/torch_model.py b/src/anomalib/models/image/padim/torch_model.py index 89b1a218cd..1bcb66c714 100644 --- a/src/anomalib/models/image/padim/torch_model.py +++ b/src/anomalib/models/image/padim/torch_model.py @@ -88,9 +88,9 @@ def __init__( ) raise ValueError(msg) - assert ( - 0 < self.n_features <= self.n_features_original - ), f"for backbone {self.backbone}, 0 < n_features <= {self.n_features_original}, found {self.n_features}" + if not (0 < self.n_features <= self.n_features_original): + msg = f"For backbone {self.backbone}, 0 < n_features <= {self.n_features_original}, found {self.n_features}" + raise ValueError(msg) # Since idx is randomly selected, save it with model to get same results self.register_buffer( diff --git a/src/anomalib/models/image/reverse_distillation/lightning_model.py b/src/anomalib/models/image/reverse_distillation/lightning_model.py index 72484b0a21..262a0c4b52 100644 --- a/src/anomalib/models/image/reverse_distillation/lightning_model.py +++ b/src/anomalib/models/image/reverse_distillation/lightning_model.py @@ -57,7 +57,10 @@ def __init__( self.model: ReverseDistillationModel def _setup(self) -> None: - assert self.input_size is not None, "Input size is required for Reverse Distillation model." + if self.input_size is None: + msg = "Input size is required for Reverse Distillation model." + raise ValueError(msg) + self.model = ReverseDistillationModel( backbone=self.backbone, pre_trained=self.pre_trained, diff --git a/src/anomalib/models/image/uflow/feature_extraction.py b/src/anomalib/models/image/uflow/feature_extraction.py index ec362d8710..fb35af3ec8 100644 --- a/src/anomalib/models/image/uflow/feature_extraction.py +++ b/src/anomalib/models/image/uflow/feature_extraction.py @@ -28,7 +28,10 @@ def get_feature_extractor(backbone: str, input_size: tuple[int, int] = (256, 256 Returns: FeatureExtractorInterface: Feature extractor. """ - assert backbone in AVAILABLE_EXTRACTORS, f"Feature extractor must be one of {AVAILABLE_EXTRACTORS}." + if backbone not in AVAILABLE_EXTRACTORS: + msg = f"Feature extractor must be one of {AVAILABLE_EXTRACTORS}." + raise ValueError(msg) + if backbone in ["resnet18", "wide_resnet50_2"]: return FeatureExtractor(backbone, input_size, layers=("layer1", "layer2", "layer3")) if backbone == "mcait": diff --git a/src/anomalib/models/image/uflow/lightning_model.py b/src/anomalib/models/image/uflow/lightning_model.py index b7a66aec1a..58d62e6d72 100644 --- a/src/anomalib/models/image/uflow/lightning_model.py +++ b/src/anomalib/models/image/uflow/lightning_model.py @@ -61,7 +61,10 @@ def __init__( self.model: UflowModel def _setup(self) -> None: - assert self.input_size is not None, "Input size is required for UFlow model." + if self.input_size is None: + msg = "Input size is required for UFlow model." + raise ValueError(msg) + self.model = UflowModel( input_size=self.input_size, backbone=self.backbone, diff --git a/src/anomalib/models/video/ai_vad/clip/model.py b/src/anomalib/models/video/ai_vad/clip/model.py index fe041b8536..9f23afa8fc 100644 --- a/src/anomalib/models/video/ai_vad/clip/model.py +++ b/src/anomalib/models/video/ai_vad/clip/model.py @@ -444,7 +444,10 @@ def build_model(state_dict: dict): vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) vision_patch_size = None - assert output_width**2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] + if output_width**2 + 1 != state_dict["visual.attnpool.positional_embedding"].shape[0]: + msg = "Assertion failed: output_width**2 + 1 != state_dict['visual.attnpool.positional_embedding'].shape[0]" + raise ValueError(msg) + image_resolution = output_width * 32 embed_dim = state_dict["text_projection"].shape[1] diff --git a/src/anomalib/models/video/ai_vad/density.py b/src/anomalib/models/video/ai_vad/density.py index dbc0081fba..857c80cf6f 100644 --- a/src/anomalib/models/video/ai_vad/density.py +++ b/src/anomalib/models/video/ai_vad/density.py @@ -89,7 +89,9 @@ def __init__( self.appearance_estimator = GroupedKNNEstimator(n_neighbors_deep) if self.use_pose_features: self.pose_estimator = GroupedKNNEstimator(n_neighbors=n_neighbors_pose) - assert any((use_pose_features, use_deep_features, use_velocity_features)) + if not any((use_pose_features, use_deep_features, use_velocity_features)): + msg = "At least one feature stream must be enabled." + raise ValueError(msg) def update(self, features: dict[FeatureType, torch.Tensor], group: str | None = None) -> None: """Update the density estimators for the different feature types. diff --git a/src/anomalib/models/video/ai_vad/features.py b/src/anomalib/models/video/ai_vad/features.py index 07281461eb..296769f799 100644 --- a/src/anomalib/models/video/ai_vad/features.py +++ b/src/anomalib/models/video/ai_vad/features.py @@ -46,9 +46,9 @@ def __init__( use_deep_features: bool = True, ) -> None: super().__init__() - assert ( - use_velocity_features or use_pose_features or use_deep_features - ), "At least one feature stream must be enabled." + if not (use_velocity_features or use_pose_features or use_deep_features): + msg = "At least one feature stream must be enabled." + raise ValueError(msg) self.use_velocity_features = use_velocity_features self.use_pose_features = use_pose_features diff --git a/src/anomalib/utils/config.py b/src/anomalib/utils/config.py index 27f4605419..113522819e 100644 --- a/src/anomalib/utils/config.py +++ b/src/anomalib/utils/config.py @@ -67,7 +67,10 @@ def to_tuple(input_size: int | ListConfig) -> tuple[int, int]: if isinstance(input_size, int): ret_val = cast(tuple[int, int], (input_size,) * 2) elif isinstance(input_size, ListConfig | Sequence): - assert len(input_size) == 2, "Expected a single integer or tuple of length 2 for width and height." + if len(input_size) != 2: + msg = "Expected a single integer or tuple of length 2 for width and height." + raise ValueError(msg) + ret_val = cast(tuple[int, int], tuple(input_size)) else: msg = f"Expected either int or ListConfig, got {type(input_size)}" diff --git a/src/anomalib/utils/visualization/image.py b/src/anomalib/utils/visualization/image.py index 11ec3cf4b5..b36c5dc7cc 100644 --- a/src/anomalib/utils/visualization/image.py +++ b/src/anomalib/utils/visualization/image.py @@ -74,7 +74,10 @@ def __init__( self.segmentations = (self.segmentations * 255).astype(np.uint8) if self.pred_boxes is not None: - assert self.box_labels is not None, "Box labels must be provided when box locations are provided." + if self.box_labels is None: + msg = "Box labels must be provided when box locations are provided." + raise ValueError(msg) + self.normal_boxes = self.pred_boxes[~self.box_labels.astype(bool)] self.anomalous_boxes = self.pred_boxes[self.box_labels.astype(bool)] @@ -199,7 +202,10 @@ def _visualize_full(self, image_result: ImageResult) -> np.ndarray: """ image_grid = _ImageGrid() if self.task == TaskType.DETECTION: - assert image_result.pred_boxes is not None + if image_result.pred_boxes is None: + msg = "Image result predicted boxes are None." + raise ValueError(msg) + image_grid.add_image(image_result.image, "Image") if image_result.gt_boxes is not None: gt_image = draw_boxes(np.copy(image_result.image), image_result.gt_boxes, color=(255, 0, 0)) @@ -210,7 +216,10 @@ def _visualize_full(self, image_result: ImageResult) -> np.ndarray: pred_image = draw_boxes(pred_image, image_result.anomalous_boxes, color=(255, 0, 0)) image_grid.add_image(pred_image, "Predictions") if self.task == TaskType.SEGMENTATION: - assert image_result.pred_mask is not None + if image_result.pred_mask is None: + msg = "Image result predicted mask is None." + raise ValueError(msg) + image_grid.add_image(image_result.image, "Image") if image_result.gt_mask is not None: image_grid.add_image(image=image_result.gt_mask, color_map="gray", title="Ground Truth") diff --git a/tests/helpers/data.py b/tests/helpers/data.py index 134a863312..51b683acab 100644 --- a/tests/helpers/data.py +++ b/tests/helpers/data.py @@ -166,7 +166,7 @@ def generate_video( masks: list[np.ndarray] = [] state = 1 if first_label == LabelName.NORMAL else -1 for _ in range(length): - state = state * -1 if np.random.random() < p_state_switch else state + state = state * -1 if self.rng.random() < p_state_switch else state label = LabelName.NORMAL if state == 1 else LabelName.ABNORMAL frame, mask = self.generate_image(label=label) frames.append(frame)