Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking β€œSign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

πŸ”¨ v2 - Refactor: Add missing auxiliary attributes to AnomalibModule #2460

Next Next commit
Refactor post-processor to match the pre-processor and evaluator pattern
Signed-off-by: Samet Akcay <samet.akcay@intel.com>
  • Loading branch information
samet-akcay committed Dec 9, 2024

Unverified

This user has not yet uploaded their public signing key.
commit ef2deb08fb790ce3c4740579174a5e7f0c8e84d6
56 changes: 47 additions & 9 deletions src/anomalib/models/components/base/anomaly_module.py
Original file line number Diff line number Diff line change
@@ -40,7 +40,7 @@ class AnomalibModule(ExportMixin, pl.LightningModule, ABC):
def __init__(
self,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__()
@@ -52,11 +52,11 @@ def __init__(
self.callbacks: list[Callback]

self.pre_processor = self._resolve_pre_processor(pre_processor)
self.post_processor = post_processor or self.default_post_processor()
self.post_processor = self._resolve_post_processor(post_processor)
self.evaluator = self._resolve_evaluator(evaluator)

self._input_size: tuple[int, int] | None = None
self._is_setup = False # flag to track if setup has been called from the trainer
self._is_setup = False

@property
def name(self) -> str:
@@ -214,15 +214,53 @@ def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> P
]),
)

def default_post_processor(self) -> PostProcessor | None:
"""Default post processor.
def _resolve_post_processor(self, post_processor: PostProcessor | bool) -> PostProcessor | None:
"""Resolve and validate which post-processor to use.

Override in subclass for model-specific post-processing behaviour.
Args:
post_processor: Post-processor configuration
- True -> use default post-processor
- False -> no post-processor
- PostProcessor -> use the provided post-processor

Returns:
Configured post-processor
"""
if isinstance(post_processor, PostProcessor):
return post_processor
if isinstance(post_processor, bool):
return self.configure_post_processor() if post_processor else None
msg = f"Invalid post-processor type: {type(post_processor)}"
raise TypeError(msg)

@classmethod
def configure_post_processor(cls) -> PostProcessor | None:
"""Configure the default post-processor based on the learning type.

Returns:
PostProcessor: Configured post-processor instance.

Raises:
NotImplementedError: If no default post-processor is available for the model's learning type.

Examples:
Get default post-processor:

>>> post_processor = AnomalibModule.configure_post_processor()

Create model with custom post-processor:

>>> custom_post_processor = CustomPostProcessor()
>>> model = PatchCore(post_processor=custom_post_processor)

Disable post-processing:

>>> model = PatchCore(post_processor=False)
"""
if self.learning_type == LearningType.ONE_CLASS:
if cls.learning_type == LearningType.ONE_CLASS:
return OneClassPostProcessor()
msg = f"No default post-processor available for model {self.__name__} with learning type {self.learning_type}. \
Please override the default_post_processor method in the model implementation."
msg = f"No default post-processor available for model with learning type {cls.learning_type}. \
Please override the configure_post_processor method in the model implementation."
raise NotImplementedError(msg)

def _resolve_evaluator(self, evaluator: Evaluator | bool) -> Evaluator | None:
2 changes: 1 addition & 1 deletion src/anomalib/models/image/cfa/lightning_model.py
Original file line number Diff line number Diff line change
@@ -59,7 +59,7 @@ def __init__(
num_hard_negative_features: int = 3,
radius: float = 1e-5,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/cflow/lightning_model.py
Original file line number Diff line number Diff line change
@@ -71,7 +71,7 @@ def __init__(
permute_soft: bool = False,
lr: float = 0.0001,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/csflow/lightning_model.py
Original file line number Diff line number Diff line change
@@ -48,7 +48,7 @@ def __init__(
clamp: int = 3,
num_channels: int = 3,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/dfkde/lightning_model.py
Original file line number Diff line number Diff line change
@@ -50,7 +50,7 @@ def __init__(
feature_scaling_method: FeatureScalingMethod = FeatureScalingMethod.SCALE,
max_training_points: int = 40000,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/dfm/lightning_model.py
Original file line number Diff line number Diff line change
@@ -54,7 +54,7 @@ def __init__(
pca_level: float = 0.97,
score_type: str = "fre",
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/draem/lightning_model.py
Original file line number Diff line number Diff line change
@@ -51,7 +51,7 @@ def __init__(
anomaly_source_path: str | None = None,
beta: float | tuple[float, float] = (0.1, 1.0),
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/dsr/lightning_model.py
Original file line number Diff line number Diff line change
@@ -53,7 +53,7 @@ def __init__(
latent_anomaly_strength: float = 0.2,
upsampling_train_ratio: float = 0.7,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/efficient_ad/lightning_model.py
Original file line number Diff line number Diff line change
@@ -76,7 +76,7 @@ def __init__(
padding: bool = False,
pad_maps: bool = True,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/fastflow/lightning_model.py
Original file line number Diff line number Diff line change
@@ -50,7 +50,7 @@ def __init__(
conv3x3_only: bool = False,
hidden_ratio: float = 1.0,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/fre/lightning_model.py
Original file line number Diff line number Diff line change
@@ -56,7 +56,7 @@ def __init__(
input_dim: int = 65536,
latent_dim: int = 220,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/ganomaly/lightning_model.py
Original file line number Diff line number Diff line change
@@ -71,7 +71,7 @@ def __init__(
beta1: float = 0.5,
beta2: float = 0.999,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
4 changes: 2 additions & 2 deletions src/anomalib/models/image/padim/lightning_model.py
Original file line number Diff line number Diff line change
@@ -50,7 +50,7 @@ def __init__(
pre_trained: bool = True,
n_features: int | None = None,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
@@ -132,6 +132,6 @@ def learning_type(self) -> LearningType:
return LearningType.ONE_CLASS

@staticmethod
def default_post_processor() -> OneClassPostProcessor:
def configure_post_processor() -> OneClassPostProcessor:
"""Return the default post-processor for PADIM."""
return OneClassPostProcessor()
4 changes: 2 additions & 2 deletions src/anomalib/models/image/patchcore/lightning_model.py
Original file line number Diff line number Diff line change
@@ -53,7 +53,7 @@ def __init__(
coreset_sampling_ratio: float = 0.1,
num_neighbors: int = 9,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
@@ -154,7 +154,7 @@ def learning_type(self) -> LearningType:
return LearningType.ONE_CLASS

@staticmethod
def default_post_processor() -> OneClassPostProcessor:
def configure_post_processor() -> OneClassPostProcessor:
"""Return the default post-processor for the model.

Returns:
Original file line number Diff line number Diff line change
@@ -48,7 +48,7 @@ def __init__(
anomaly_map_mode: AnomalyMapGenerationMode = AnomalyMapGenerationMode.ADD,
pre_trained: bool = True,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/stfpm/lightning_model.py
Original file line number Diff line number Diff line change
@@ -44,7 +44,7 @@ def __init__(
backbone: str = "resnet18",
layers: Sequence[str] = ("layer1", "layer2", "layer3"),
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
2 changes: 1 addition & 1 deletion src/anomalib/models/image/uflow/lightning_model.py
Original file line number Diff line number Diff line change
@@ -49,7 +49,7 @@ def __init__(
affine_subnet_channels_ratio: float = 1.0,
permute_soft: bool = False,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
"""Uflow model.
3 changes: 2 additions & 1 deletion src/anomalib/models/image/vlm_ad/lightning_model.py
Original file line number Diff line number Diff line change
@@ -102,7 +102,8 @@ def configure_transforms(image_size: tuple[int, int] | None = None) -> None:
if image_size is not None:
logger.warning("Ignoring image_size argument as each backend has its own transforms.")

def default_post_processor(self) -> PostProcessor | None: # noqa: PLR6301
@classmethod
def configure_post_processor(cls) -> PostProcessor | None:
"""Post processing is not required for this model."""
return None

4 changes: 2 additions & 2 deletions src/anomalib/models/image/winclip/lightning_model.py
Original file line number Diff line number Diff line change
@@ -56,7 +56,7 @@ def __init__(
scales: tuple = (2, 3),
few_shot_source: Path | str | None = None,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | None = None,
post_processor: PostProcessor | bool = True,
evaluator: Evaluator | bool = True,
) -> None:
super().__init__(pre_processor=pre_processor, post_processor=post_processor, evaluator=evaluator)
@@ -195,6 +195,6 @@ def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> P
return PreProcessor(val_transform=transform, test_transform=transform)

@staticmethod
def default_post_processor() -> OneClassPostProcessor:
def configure_post_processor() -> OneClassPostProcessor:
"""Return the default post-processor for WinCLIP."""
return OneClassPostProcessor()
5 changes: 3 additions & 2 deletions src/anomalib/models/video/ai_vad/lightning_model.py
Original file line number Diff line number Diff line change
@@ -81,9 +81,10 @@ def __init__(
n_neighbors_pose: int = 1,
n_neighbors_deep: int = 1,
pre_processor: PreProcessor | bool = True,
post_processor: PostProcessor | bool = True,
**kwargs,
) -> None:
super().__init__(pre_processor=pre_processor, **kwargs)
super().__init__(pre_processor=pre_processor, post_processor=post_processor, **kwargs)
self.model = AiVadModel(
box_score_thresh=box_score_thresh,
persons_only=persons_only,
@@ -179,6 +180,6 @@ def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> P
return PreProcessor() # A pre-processor with no transforms.

@staticmethod
def default_post_processor() -> PostProcessor:
def configure_post_processor() -> PostProcessor:
"""Return the default post-processor for AI-VAD."""
return OneClassPostProcessor()
Original file line number Diff line number Diff line change
@@ -74,6 +74,6 @@ def learning_type(self) -> LearningType:
return LearningType.ZERO_SHOT

@staticmethod
def default_post_processor() -> PostProcessor:
def configure_post_processor() -> PostProcessor:
"""Returns a dummy post-processor."""
return DummyPostProcessor()