Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix the Anomaly Task to work & Add integration test for Anomaly #3007

Merged
merged 14 commits into from
Mar 13, 2024
Merged
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/pre_merge.yaml
Original file line number Diff line number Diff line change
@@ -91,6 +91,7 @@ jobs:
- task: "instance_segmentation"
- task: "semantic_segmentation"
- task: "visual_prompting"
- task: "anomaly"
name: Integration-Test-${{ matrix.task }}-py310
# This is what will cancel the job concurrency
concurrency:
12 changes: 12 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -89,6 +89,18 @@ mmlab = [
]
anomaly = [
"anomalib==1.0.0",
# This is a dependency to avoid conflicts with installing the anomalib[core] option.
"av>=10.0.0",
"einops>=0.3.2",
"freia>=0.2",
"imgaug==0.4.0",
"kornia>=0.6.6,<0.6.10",
"matplotlib>=3.4.3",
"opencv-python>=4.5.3.56",
"pandas>=1.1.0",
"open-clip-torch>=2.23.0",
# issue with latest version for torchmetrics with anomalib: https://github.com/Lightning-AI/torchmetrics/issues/1526
"torchmetrics==0.10.3",
]

[project.scripts]
8 changes: 4 additions & 4 deletions src/otx/algo/anomaly/padim.py
Original file line number Diff line number Diff line change
@@ -15,26 +15,26 @@ class Padim(OTXAnomaly, OTXModel, AnomalibPadim):
"""OTX Padim model.

Args:
input_size (tuple[int, int], optional): Input size. Defaults to (256, 256).
backbone (str, optional): Feature extractor backbone. Defaults to "resnet18".
layers (list[str], optional): Feature extractor layers. Defaults to ["layer1", "layer2", "layer3"].
pre_trained (bool, optional): Pretrained backbone. Defaults to True.
n_features (int | None, optional): Number of features. Defaults to None.
num_classes (int, optional): Anoamly don't use num_classes ,
but OTXModel always receives num_classes, so need this.
"""

def __init__(
self,
input_size: tuple[int, int] = (256, 256),
backbone: str = "resnet18",
layers: list[str] = ["layer1", "layer2", "layer3"], # noqa: B006
pre_trained: bool = True,
n_features: int | None = None,
num_classes: int = 2,
) -> None:
OTXAnomaly.__init__(self)
OTXModel.__init__(self, num_classes=2)
OTXModel.__init__(self, num_classes=num_classes)
AnomalibPadim.__init__(
self,
input_size=input_size,
backbone=backbone,
layers=layers,
pre_trained=pre_trained,
8 changes: 4 additions & 4 deletions src/otx/algo/anomaly/stfpm.py
Original file line number Diff line number Diff line change
@@ -21,21 +21,21 @@ class Stfpm(OTXAnomaly, OTXModel, AnomalibStfpm):

Args:
layers (Sequence[str]): Feature extractor layers.
input_size (tuple[int, int]): Input size.
backbone (str, optional): Feature extractor backbone. Defaults to "resnet18".
num_classes (int, optional): Anoamly don't use num_classes ,
but OTXModel always receives num_classes, so need this.
"""

def __init__(
self,
layers: Sequence[str] = ["layer1", "layer2", "layer3"],
input_size: tuple[int, int] = (256, 256),
backbone: str = "resnet18",
num_classes: int = 2,
) -> None:
OTXAnomaly.__init__(self)
OTXModel.__init__(self, num_classes=2)
OTXModel.__init__(self, num_classes=num_classes)
AnomalibStfpm.__init__(
self,
input_size=input_size,
backbone=backbone,
layers=layers,
)
Original file line number Diff line number Diff line change
@@ -26,7 +26,7 @@ def update(self, preds: list[dict], target: list[dict]) -> None:
target (list[dict]): list of RLE encoded masks
"""
for item in preds:
bbox_detection, mask_detection = self._get_safe_item_values(item, warn=self.warn_on_many_detections)
bbox_detection, mask_detection = self._get_safe_item_values(item)
if bbox_detection is not None:
self.detection_box.append(bbox_detection)
if mask_detection is not None:
2 changes: 1 addition & 1 deletion src/otx/cli/cli.py
Original file line number Diff line number Diff line change
@@ -413,7 +413,7 @@ def instantiate_model(self, model_config: Namespace) -> tuple:
# Update num_classes
if not self.get_config_value(self.config_init, "disable_infer_num_classes", False):
num_classes = self.datamodule.label_info.num_classes
if num_classes != model_config.init_args.num_classes:
if hasattr(model_config.init_args, "num_classes") and num_classes != model_config.init_args.num_classes:
warning_msg = (
f"The `num_classes` in dataset is {num_classes} "
f"but, the `num_classes` of model is {model_config.init_args.num_classes}. "
2 changes: 1 addition & 1 deletion src/otx/core/data/entity/anomaly/classification.py
Original file line number Diff line number Diff line change
@@ -53,7 +53,7 @@ def collate_fn(
) -> AnomalyClassificationDataBatch:
"""Collection function to collect `OTXDataEntity` into `OTXBatchDataEntity` in data loader."""
batch = super().collate_fn(entities)
images = tv_tensors.Image(data=torch.stack(batch.images, dim=0)) if stack_images else batch.images
images = tv_tensors.Image(data=torch.stack(tuple(batch.images), dim=0)) if stack_images else batch.images
return AnomalyClassificationDataBatch(
batch_size=batch.batch_size,
images=images,
2 changes: 1 addition & 1 deletion src/otx/core/data/entity/anomaly/detection.py
Original file line number Diff line number Diff line change
@@ -56,7 +56,7 @@ def collate_fn(
) -> AnomalyDetectionDataBatch:
"""Collection function to collect `OTXDataEntity` into `OTXBatchDataEntity` in data loader."""
batch = super().collate_fn(entities)
images = tv_tensors.Image(data=torch.stack(batch.images, dim=0)) if stack_images else batch.images
images = tv_tensors.Image(data=torch.stack(tuple(batch.images), dim=0)) if stack_images else batch.images
return AnomalyDetectionDataBatch(
batch_size=batch.batch_size,
images=images,
1 change: 1 addition & 0 deletions src/otx/core/metrics/accuracy.py
Original file line number Diff line number Diff line change
@@ -272,6 +272,7 @@ def __init__(
TorchmetricAcc(
task="multiclass",
num_classes=int(head_range[1] - head_range[0]),
top_k=1,
)
for head_range in head_logits_info.values()
]
19 changes: 16 additions & 3 deletions src/otx/core/model/module/anomaly/anomaly_lightning.py
Original file line number Diff line number Diff line change
@@ -134,7 +134,7 @@ class OTXAnomaly:
def __init__(self) -> None:
self.optimizer: list[OptimizerCallable] | OptimizerCallable = None
self.scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = None
self.input_size: list[int] = [256, 256]
self.input_size: tuple[int] = (256, 256)
self.mean_values: tuple[float, float, float] = (0.0, 0.0, 0.0)
self.scale_values: tuple[float, float, float] = (1.0, 1.0, 1.0)
self.trainer: Trainer
@@ -147,6 +147,19 @@ def __init__(self) -> None:
self.image_metrics: AnomalibMetricCollection
self.pixel_metrics: AnomalibMetricCollection

@property
def input_size(self) -> tuple[int, int]:
"""Returns the input size of the model.

Returns:
tuple[int, int]: The input size of the model as a tuple of (height, width).
"""
return self._input_size

@input_size.setter
def input_size(self, value: tuple[int, int]) -> None:
self._input_size = value

@property
def task(self) -> AnomalibTaskType:
"""Return the task type of the model."""
@@ -342,13 +355,13 @@ def state_dict(self) -> dict[str, Any]:
"""
state_dict = super().state_dict() # type: ignore[misc]
# This is defined in OTXModel
state_dict["meta_info"] = self.meta_info # type: ignore[attr-defined]
state_dict["label_info"] = self.label_info # type: ignore[attr-defined]
return state_dict

def load_state_dict(self, ckpt: OrderedDict[str, Any], *args, **kwargs) -> None:
"""Pass the checkpoint to the anomaly model."""
ckpt = ckpt.get("state_dict", ckpt)
ckpt.pop("meta_info", None) # [TODO](ashwinvaidya17): Revisit this method when OTXModel is the lightning model
ckpt.pop("label_info", None) # [TODO](ashwinvaidya17): Revisit this method when OTXModel is the lightning model
return super().load_state_dict(ckpt, *args, **kwargs) # type: ignore[misc]

def forward(
7 changes: 4 additions & 3 deletions src/otx/engine/utils/auto_configurator.py
Original file line number Diff line number Diff line change
@@ -42,9 +42,9 @@
OTXTaskType.INSTANCE_SEGMENTATION: RECIPE_PATH / "instance_segmentation" / "maskrcnn_r50.yaml",
OTXTaskType.ACTION_CLASSIFICATION: RECIPE_PATH / "action" / "action_classification" / "x3d.yaml",
OTXTaskType.ACTION_DETECTION: RECIPE_PATH / "action" / "action_detection" / "x3d_fastrcnn.yaml",
OTXTaskType.ANOMALY_CLASSIFICATION: RECIPE_PATH / "anomaly" / "anomaly_classification" / "padim.yaml",
OTXTaskType.ANOMALY_SEGMENTATION: RECIPE_PATH / "anomaly" / "anomaly_segmentation" / "padim.yaml",
OTXTaskType.ANOMALY_DETECTION: RECIPE_PATH / "anomaly" / "anomaly_detection" / "padim.yaml",
OTXTaskType.ANOMALY_CLASSIFICATION: RECIPE_PATH / "anomaly_classification" / "padim.yaml",
OTXTaskType.ANOMALY_SEGMENTATION: RECIPE_PATH / "anomaly_segmentation" / "padim.yaml",
OTXTaskType.ANOMALY_DETECTION: RECIPE_PATH / "anomaly_detection" / "padim.yaml",
OTXTaskType.VISUAL_PROMPTING: RECIPE_PATH / "visual_prompting" / "sam_tiny_vit.yaml",
OTXTaskType.ZERO_SHOT_VISUAL_PROMPTING: RECIPE_PATH / "zero_shot_visual_prompting" / "sam_tiny_vit.yaml",
}
@@ -67,6 +67,7 @@
"common_semantic_segmentation_with_subset_dirs": [OTXTaskType.SEMANTIC_SEGMENTATION],
"kinetics": [OTXTaskType.ACTION_CLASSIFICATION],
"ava": [OTXTaskType.ACTION_DETECTION],
"mvtec": [OTXTaskType.ANOMALY_CLASSIFICATION, OTXTaskType.ANOMALY_DETECTION, OTXTaskType.ANOMALY_SEGMENTATION],
}

OVMODEL_PER_TASK = {
10 changes: 6 additions & 4 deletions src/otx/recipe/anomaly_classification/padim.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.padim.Padim
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"
pre_trained: True
@@ -15,10 +12,15 @@ engine:

callback_monitor: step # this has no effect as Padim does not need to be trained

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
precision: 32
max_epochs: 1
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_CLASSIFICATION
config:
8 changes: 4 additions & 4 deletions src/otx/recipe/anomaly_classification/stfpm.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.stfpm.Stfpm
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"

@@ -21,14 +18,17 @@ engine:

callback_monitor: train_loss_epoch # val loss is not available as there is no validation set from default dataloader

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
max_epochs: 100
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
patience: 5
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_CLASSIFICATION
config:
10 changes: 6 additions & 4 deletions src/otx/recipe/anomaly_detection/padim.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.padim.Padim
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"
pre_trained: True
@@ -15,10 +12,15 @@ engine:

callback_monitor: step # this has no effect as Padim does not need to be trained

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
precision: 32
max_epochs: 1
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_DETECTION
config:
8 changes: 4 additions & 4 deletions src/otx/recipe/anomaly_detection/stfpm.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.stfpm.Stfpm
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"

@@ -21,14 +18,17 @@ engine:

callback_monitor: train_loss_epoch # val loss is not available as there is no validation set from default dataloader

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
max_epochs: 100
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
patience: 5
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_DETECTION
config:
10 changes: 6 additions & 4 deletions src/otx/recipe/anomaly_segmentation/padim.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.padim.Padim
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"
pre_trained: True
@@ -15,10 +12,15 @@ engine:

callback_monitor: step # this has no effect as Padim does not need to be trained

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
precision: 32
max_epochs: 1
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_SEGMENTATION
config:
5 changes: 4 additions & 1 deletion src/otx/recipe/anomaly_segmentation/stfpm.yaml
Original file line number Diff line number Diff line change
@@ -21,14 +21,17 @@ engine:

callback_monitor: train_loss_epoch # val loss is not available as there is no validation set from default dataloader

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
max_epochs: 100
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
patience: 5
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_SEGMENTATION
config:
Original file line number Diff line number Diff line change
@@ -24,6 +24,7 @@ metric:
init_args:
task: multiclass
num_classes: 1000
top_k: 1

engine:
task: MULTI_CLASS_CLS
Original file line number Diff line number Diff line change
@@ -24,6 +24,7 @@ metric:
init_args:
task: multiclass
num_classes: 1000
top_k: 1

engine:
task: MULTI_CLASS_CLS
Original file line number Diff line number Diff line change
@@ -27,6 +27,7 @@ metric:
init_args:
task: multiclass
num_classes: 1000
top_k: 1

engine:
task: MULTI_CLASS_CLS
Original file line number Diff line number Diff line change
@@ -25,6 +25,7 @@ metric:
init_args:
task: multiclass
num_classes: 1000
top_k: 1

engine:
task: MULTI_CLASS_CLS
Original file line number Diff line number Diff line change
@@ -31,6 +31,7 @@ metric:
init_args:
task: multiclass
num_classes: 1000
top_k: 1

engine:
task: MULTI_CLASS_CLS
Original file line number Diff line number Diff line change
@@ -33,6 +33,7 @@ metric:
init_args:
task: multiclass
num_classes: 1000
top_k: 1

engine:
task: MULTI_CLASS_CLS
Loading
Loading