From 80106b4e74f7f8a4a50fa6cf3cc3a2cb537ecbb4 Mon Sep 17 00:00:00 2001 From: abc-125 <63813435+abc-125@users.noreply.github.com> Date: Sat, 20 Apr 2024 13:16:40 +0200 Subject: [PATCH 1/5] Remove batch_size from config, make it hardcoded in the model (it is batch size for the imagenet dataloader, which should not be changed). --- configs/model/efficient_ad.yaml | 3 +-- src/anomalib/models/image/efficient_ad/lightning_model.py | 5 +---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/configs/model/efficient_ad.yaml b/configs/model/efficient_ad.yaml index abb5cb662d..1d7f70b7eb 100644 --- a/configs/model/efficient_ad.yaml +++ b/configs/model/efficient_ad.yaml @@ -7,12 +7,11 @@ model: weight_decay: 1.0e-05 padding: false pad_maps: true - batch_size: 1 metrics: pixel: - AUROC trainer: - max_epochs: 200 + max_epochs: 1000 max_steps: 70000 diff --git a/src/anomalib/models/image/efficient_ad/lightning_model.py b/src/anomalib/models/image/efficient_ad/lightning_model.py index 619d1c58cf..7bf3e89da9 100644 --- a/src/anomalib/models/image/efficient_ad/lightning_model.py +++ b/src/anomalib/models/image/efficient_ad/lightning_model.py @@ -58,8 +58,6 @@ class EfficientAd(AnomalyModule): pad_maps (bool): relevant if padding is set to False. In this case, pad_maps = True pads the output anomaly maps so that their size matches the size in the padding = True case. Defaults to ``True``. - batch_size (int): batch size for imagenet dataloader - Defaults to ``1``. """ def __init__( @@ -71,7 +69,6 @@ def __init__( weight_decay: float = 0.00001, padding: bool = False, pad_maps: bool = True, - batch_size: int = 1, ) -> None: super().__init__() @@ -83,7 +80,7 @@ def __init__( padding=padding, pad_maps=pad_maps, ) - self.batch_size = batch_size + self.batch_size = 1 # hardcoded batch_size for imagenet dataloader self.lr = lr self.weight_decay = weight_decay From 3d806499c0a4e7d7889918d5f43af32130b3b8f6 Mon Sep 17 00:00:00 2001 From: abc-125 <63813435+abc-125@users.noreply.github.com> Date: Sat, 20 Apr 2024 14:14:59 +0200 Subject: [PATCH 2/5] Better description --- src/anomalib/models/image/efficient_ad/lightning_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/anomalib/models/image/efficient_ad/lightning_model.py b/src/anomalib/models/image/efficient_ad/lightning_model.py index 7bf3e89da9..d93721a182 100644 --- a/src/anomalib/models/image/efficient_ad/lightning_model.py +++ b/src/anomalib/models/image/efficient_ad/lightning_model.py @@ -80,7 +80,7 @@ def __init__( padding=padding, pad_maps=pad_maps, ) - self.batch_size = 1 # hardcoded batch_size for imagenet dataloader + self.batch_size = 1 # imagenet dataloader batch size is 1 according to the paper self.lr = lr self.weight_decay = weight_decay From 2d59cb160aaff8d38b445d4f30a592ca289d3427 Mon Sep 17 00:00:00 2001 From: abc-125 <63813435+abc-125@users.noreply.github.com> Date: Mon, 22 Apr 2024 21:12:15 +0200 Subject: [PATCH 3/5] Removed Imagenet normalization, added check-up for train_batch_size --- .../image/efficient_ad/lightning_model.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/anomalib/models/image/efficient_ad/lightning_model.py b/src/anomalib/models/image/efficient_ad/lightning_model.py index d93721a182..03bc643af3 100644 --- a/src/anomalib/models/image/efficient_ad/lightning_model.py +++ b/src/anomalib/models/image/efficient_ad/lightning_model.py @@ -80,7 +80,7 @@ def __init__( padding=padding, pad_maps=pad_maps, ) - self.batch_size = 1 # imagenet dataloader batch size is 1 according to the paper + self.batch_size = 1 # imagenet dataloader batch_size is 1 according to the paper self.lr = lr self.weight_decay = weight_decay @@ -234,9 +234,18 @@ def configure_optimizers(self) -> torch.optim.Optimizer: def on_train_start(self) -> None: """Called before the first training epoch. - First sets up the pretrained teacher model, then prepares the imagenette data, and finally calculates or - loads the channel-wise mean and std of the training dataset and push to the model. + First check if EfficientAd-specific parameters are set correctly (train_batch_size of 1 + and no Imagenet normalization in transforms), then sets up the pretrained teacher model, + then prepares the imagenette data, and finally calculates or loads + the channel-wise mean and std of the training dataset and push to the model. """ + if self.trainer.datamodule.train_batch_size != 1: + msg = "train_batch_size for EfficientAd should be 1." + raise ValueError(msg) + if self._transform and any(isinstance(transform, Normalize) for transform in self._transform.transforms): + msg = "Transforms for EfficientAd should not contain Normalize." + raise ValueError(msg) + sample = next(iter(self.trainer.train_dataloader)) image_size = sample["image"].shape[-2:] self.prepare_pretrained_model() @@ -311,11 +320,10 @@ def learning_type(self) -> LearningType: return LearningType.ONE_CLASS def configure_transforms(self, image_size: tuple[int, int] | None = None) -> Transform: - """Default transform for Padim.""" + """Default transform for EfficientAd. Imagenet normalization applied in forward.""" image_size = image_size or (256, 256) return Compose( [ Resize(image_size, antialias=True), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ], ) From 6898f473444db069b08eec4fc8e6208c549ef1a8 Mon Sep 17 00:00:00 2001 From: abc-125 <63813435+abc-125@users.noreply.github.com> Date: Sat, 27 Apr 2024 09:35:40 +0200 Subject: [PATCH 4/5] Fix train_batch_size for testing EfficientAd --- tests/integration/model/test_models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/model/test_models.py b/tests/integration/model/test_models.py index 8cf2a5a9e1..e49cc11e6b 100644 --- a/tests/integration/model/test_models.py +++ b/tests/integration/model/test_models.py @@ -204,7 +204,8 @@ def _get_objects( root=dataset_path / "mvtec", category="dummy", task=task_type, - train_batch_size=2, + # EfficientAd requires train batch size 1 + train_batch_size=1 if model_name == "efficient_ad" else 2, ) model = get_model(model_name, **extra_args) From b83c24cb9668e2325180e87ccfb295fa2b728cc8 Mon Sep 17 00:00:00 2001 From: abc-125 <63813435+abc-125@users.noreply.github.com> Date: Sat, 27 Apr 2024 09:52:47 +0200 Subject: [PATCH 5/5] Updated usage example --- src/anomalib/models/image/efficient_ad/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/anomalib/models/image/efficient_ad/README.md b/src/anomalib/models/image/efficient_ad/README.md index f12bf91827..67da16fd95 100644 --- a/src/anomalib/models/image/efficient_ad/README.md +++ b/src/anomalib/models/image/efficient_ad/README.md @@ -18,7 +18,7 @@ Anomalies are detected as the difference in output feature maps between the teac ## Usage -`python tools/train.py --model efficient_ad` +`anomalib train --model EfficientAd --data anomalib.data.MVTec --data.train_batch_size 1` ## Benchmark