diff --git a/CHANGELOG.md b/CHANGELOG.md index ffbbde0233..79bd61519a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,11 +4,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). -## [Unreleased] +## [v0.4.0rc1] - 2023-02-07 ### Added -- Bump up PyTorch Lightning version to v.1.9.\* () - Add ShanghaiTech Campus video anomaly detection dataset () - Add `pyupgrade` to `pre-commit` configs, and refactor based on `pyupgrade` and `refurb` () - Add [CFA](https://arxiv.org/abs/2206.04325) model implementation () @@ -30,6 +29,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Changed +- Bump PyTorch Lightning version to v.1.9.\* () - Make input image normalization and center cropping configurable from config (https://github.com/openvinotoolkit/anomalib/pull/822) - Improve flexibility and configurability of subset splitting (https://github.com/openvinotoolkit/anomalib/pull/822) - Switch to new datamodules design (https://github.com/openvinotoolkit/anomalib/pull/822) diff --git a/README.md b/README.md index 9249ba1fa0..c819a27542 100644 --- a/README.md +++ b/README.md @@ -76,8 +76,6 @@ pip install -e . # Training -## ⚠️ Anomalib < v.0.4.0 - By default [`python tools/train.py`](https://github.com/openvinotoolkit/anomalib/blob/main/tools/train.py) runs [PADIM](https://arxiv.org/abs/2011.08785) model on `leather` category from the [MVTec AD](https://www.mvtec.com/company/research/datasets/mvtec-ad) [(CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) dataset. @@ -134,24 +132,11 @@ Tips: The backbone can be set in the config file, two examples below. -Anomalib < v.0.4.0 - ```yaml model: name: cflow backbone: wide_resnet50_2 pre_trained: true -Anomalib > v.0.4.0 Beta - Subject to Change -``` - -Anomalib >= v.0.4.0 - -```yaml -model: - class_path: anomalib.models.Cflow - init_args: - backbone: wide_resnet50_2 - pre_trained: true ``` ## Custom Dataset @@ -187,26 +172,8 @@ dataset: random_tile_count: 16 ``` -## ⚠️ Anomalib > v.0.4.0 Beta - Subject to Change - -We introduce a new CLI approach that uses [PyTorch Lightning CLI](https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_cli.html). To train a model using the new CLI, one would call the following: - -```bash -anomalib fit --config -``` - -For instance, to train a [PatchCore](https://github.com/openvinotoolkit/anomalib/tree/main/anomalib/models/patchcore) model, the following command would be run: - -```bash -anomalib fit --config ./configs/model/patchcore.yaml -``` - -The new CLI approach offers a lot more flexibility, details of which are explained in the [documentation](https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_cli.html). - # Inference -## ⚠️ Anomalib < v.0.4.0 - Anomalib includes multiple tools, including Lightning, Gradio, and OpenVINO inferencers, for performing inference with a trained model. The following command can be used to run PyTorch Lightning inference from the command line: diff --git a/anomalib/__init__.py b/anomalib/__init__.py index 3d89d45619..3ec0b6c68a 100644 --- a/anomalib/__init__.py +++ b/anomalib/__init__.py @@ -3,4 +3,4 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -__version__ = "0.4.0dev" +__version__ = "0.4.0rc1" diff --git a/anomalib/config/config.py b/anomalib/config/config.py index 1d88a0d5b4..3c08a8a500 100644 --- a/anomalib/config/config.py +++ b/anomalib/config/config.py @@ -294,7 +294,7 @@ def get_configurable_parameters( # thresholding if "metrics" in config.keys(): - # NOTE: Deprecate this after v0.4.0. + # NOTE: Deprecate this once the new CLI is implemented. if "adaptive" in config.metrics.threshold.keys(): warn( DeprecationWarning( diff --git a/anomalib/models/components/feature_extractors/timm.py b/anomalib/models/components/feature_extractors/timm.py index 363cde0320..e4ec500b9a 100644 --- a/anomalib/models/components/feature_extractors/timm.py +++ b/anomalib/models/components/feature_extractors/timm.py @@ -112,6 +112,6 @@ class FeatureExtractor(TimmFeatureExtractor): def __init__(self, *args, **kwargs): logger.warning( "FeatureExtractor is deprecated. Use TimmFeatureExtractor instead." - " Both FeatureExtractor and TimmFeatureExtractor will be removed in version 2023.1" + " Both FeatureExtractor and TimmFeatureExtractor will be removed in a future release." ) super().__init__(*args, **kwargs) diff --git a/configs/model/cfa.yaml b/configs/model/cfa.yaml deleted file mode 100644 index 55c479a8b0..0000000000 --- a/configs/model/cfa.yaml +++ /dev/null @@ -1,107 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model/stfpm.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [224, 224] - train_batch_size: 4 - test_batch_size: 4 - num_workers: 8 - task: segmentation - transform_config_train: null - transform_config_val: null - seed: 0 - create_validation_set: false - -model: - class_path: anomalib.models.Cfa - init_args: - input_size: [224, 224] - backbone: wide_resnet50_2 - gamma_c: 1 - gamma_d: 1 - -optimizer: - class_path: torch.optim.AdamW - init_args: - lr: 1e-3 - weight_decay: 5e-4 - amsgrad: true - -metrics: - adaptive_threshold: true - default_image_threshold: null - default_pixel_threshold: null - image_metric_names: - - AUROC - pixel_metric_names: - - AUROC - - AUPRO - normalization_method: min_max - -visualization: - show_images: False # show images on the screen - save_images: False # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - callbacks: - - class_path: pytorch_lightning.callbacks.EarlyStopping - init_args: - patience: 5 - monitor: pixel_AUROC - mode: max - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 30 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - resume_from_checkpoint: null - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 diff --git a/configs/model/cflow.yaml b/configs/model/cflow.yaml deleted file mode 100644 index 8290475400..0000000000 --- a/configs/model/cflow.yaml +++ /dev/null @@ -1,113 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model.cflow.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [256, 256] - train_batch_size: 16 - test_batch_size: 16 - num_workers: 8 - task: segmentation - transform_config_train: null - transform_config_val: null - seed: 0 - create_validation_set: false - -model: - class_path: anomalib.models.Cflow - init_args: - input_size: [256, 256] - backbone: wide_resnet50_2 - pre_trained: true - layers: - - layer2 - - layer3 - - layer4 - fiber_batch_size: 64 - decoder: freia-cflow - condition_vector: 128 - coupling_blocks: 8 - clamp_alpha: 1.9 - permute_soft: false - lr: 0.0001 - -post_processing: - normalization_method: min_max # - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - manual_pixel_threshold: null - -metrics: - image_metrics: - - F1Score - - AUROC - pixel_metrics: - - F1Score - - AUROC - -visualization: - show_images: False # show images on the screen - save_images: True # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - callbacks: - - class_path: pytorch_lightning.callbacks.EarlyStopping - init_args: - patience: 2 - monitor: pixel_AUROC - mode: max - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 50 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - resume_from_checkpoint: null - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 diff --git a/configs/model/dfkde.yaml b/configs/model/dfkde.yaml deleted file mode 100644 index 86a427042f..0000000000 --- a/configs/model/dfkde.yaml +++ /dev/null @@ -1,99 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model/dfkde.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [256, 256] - train_batch_size: 32 - test_batch_size: 32 - num_workers: 8 - task: classification - transform_config_train: null - transform_config_val: null - seed: 0 - create_validation_set: false - -model: - class_path: anomalib.models.Dfkde - init_args: - backbone: resnet18 - pre_trained: true - layers: - - layer4 - max_training_points: 40000 - pre_processing: scale - n_components: 16 - threshold_steepness: 0.05 - threshold_offset: 12 - -post_processing: - normalization_method: min_max # - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - manual_pixel_threshold: null - -metrics: - image_metrics: - - F1Score - - AUROC - -visualization: - show_images: False # show images on the screen - save_images: True # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 1 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - resume_from_checkpoint: null - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/configs/model/dfm.yaml b/configs/model/dfm.yaml deleted file mode 100644 index 75d994895d..0000000000 --- a/configs/model/dfm.yaml +++ /dev/null @@ -1,99 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model.dfm.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [256, 256] - train_batch_size: 32 - test_batch_size: 32 - num_workers: 8 - task: classification - transform_config_train: null - transform_config_val: null - seed: 0 - create_validation_set: false - -model: - class_path: anomalib.models.Dfm - init_args: - backbone: resnet18 - pre_trained: true - layer: layer3 - pooling_kernel_size: 4 - pca_level: 0.97 - score_type: fre - -post_processing: - normalization_method: min_max # - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - manual_pixel_threshold: null - -metrics: - image_metrics: - - F1Score - - AUROC - pixel_metrics: - - F1Score - - AUROC - -visualization: - show_images: False # show images on the screen - save_images: True # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 1 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - resume_from_checkpoint: null - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/configs/model/draem.yaml b/configs/model/draem.yaml deleted file mode 100644 index 0e428e076e..0000000000 --- a/configs/model/draem.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model.draem.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [256, 256] - train_batch_size: 8 - test_batch_size: 32 - num_workers: 8 - task: segmentation - transform_config_train: ./anomalib/models/draem/transform_config.yaml - transform_config_val: ./anomalib/models/draem/transform_config.yaml - seed: 0 - create_validation_set: false - -model: - class_path: anomalib.models.Draem - init_args: - anomaly_source_path: null # optional, e.g. ./datasets/dtd - -optimizer: - class_path: torch.optim.Adam - init_args: - lr: 0.0001 - -post_processing: - normalization_method: min_max # - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - manual_pixel_threshold: null - -metrics: - image_metrics: - - F1Score - - AUROC - pixel_metrics: - - F1Score - - AUROC - -visualization: - show_images: False # show images on the screen - save_images: True # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - callbacks: - - class_path: pytorch_lightning.callbacks.EarlyStopping - init_args: - patience: 50 - monitor: pixel_AUROC - mode: max - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 100 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 diff --git a/configs/model/fastflow.yaml b/configs/model/fastflow.yaml deleted file mode 100644 index 23b5aa3b65..0000000000 --- a/configs/model/fastflow.yaml +++ /dev/null @@ -1,111 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model/fastflow.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [256, 256] - train_batch_size: 32 - test_batch_size: 32 - num_workers: 8 - task: segmentation - transform_config_train: null - transform_config_val: null - seed: 0 - create_validation_set: false - -model: - class_path: anomalib.models.Fastflow - init_args: - input_size: [256, 256] - backbone: resnet18 # options: [resnet18, wide_resnet50_2, cait_m48_448, deit_base_distilled_patch16_384] - pre_trained: true - flow_steps: 8 # options: [8, 8, 20, 20] - for each supported backbone - conv3x3_only: True # options: [True, False, False, False] - for each supported backbone - hidden_ratio: 1.0 # options: [1.0, 1.0, 0.16, 0.16] - for each supported backbone - -optimizer: - class_path: torch.optim._multi_tensor.adam.Adam - init_args: - lr: 0.001 - weight_decay: 0.00001 - -post_processing: - normalization_method: min_max # - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - manual_pixel_threshold: null - -metrics: - image_metrics: - - F1Score - - AUROC - pixel_metrics: - - F1Score - - AUROC - -visualization: - show_images: False # show images on the screen - save_images: True # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - callbacks: - - class_path: pytorch_lightning.callbacks.EarlyStopping - init_args: - patience: 3 - monitor: pixel_AUROC - mode: max - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 500 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - resume_from_checkpoint: null - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/configs/model/ganomaly.yaml b/configs/model/ganomaly.yaml deleted file mode 100644 index a8e3a70dbd..0000000000 --- a/configs/model/ganomaly.yaml +++ /dev/null @@ -1,109 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model/ganomaly.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [256, 256] - train_batch_size: 32 - test_batch_size: 32 - num_workers: 8 - task: classification - transform_config_train: null - transform_config_val: null - seed: 0 - create_validation_set: false - -model: - class_path: anomalib.models.Ganomaly - init_args: - batch_size: 32 - input_size: [256, 256] - n_features: 64 - latent_vec_size: 100 - extra_layers: 0 - add_final_conv_layer: true - wadv: 1 - wcon: 50 - wenc: 1 - lr: 0.0002 - beta1: 0.5 - beta2: 0.999 - -post_processing: - normalization_method: min_max # - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - manual_pixel_threshold: null - -metrics: - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - image_metrics: - - F1Score - - AUROC - -visualization: - show_images: False # show images on the screen - save_images: True # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - callbacks: - - class_path: pytorch_lightning.callbacks.EarlyStopping - init_args: - patience: 3 - monitor: image_AUROC - mode: max - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 2 - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 100 - max_steps: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - resume_from_checkpoint: null - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 diff --git a/configs/model/padim.yaml b/configs/model/padim.yaml deleted file mode 100644 index b1c8e3f876..0000000000 --- a/configs/model/padim.yaml +++ /dev/null @@ -1,101 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model/padim.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [256, 256] - train_batch_size: 32 - test_batch_size: 32 - num_workers: 8 - task: segmentation - transform_config_train: null - transform_config_val: null - seed: 0 - create_validation_set: false - -model: - class_path: anomalib.models.Padim - init_args: - input_size: - - 256 # Height - - 256 # Width - backbone: resnet18 - pre_trained: true - layers: - - layer1 - - layer2 - - layer3 - -post_processing: - normalization_method: min_max # - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - manual_pixel_threshold: null - -metrics: - image_metrics: - - F1Score - - AUROC - pixel_metrics: - - F1Score - - AUROC - -visualization: - show_images: False # show images on the screen - save_images: True # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - logger: true - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - max_epochs: 1 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - resume_from_checkpoint: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/configs/model/patchcore.yaml b/configs/model/patchcore.yaml deleted file mode 100644 index b3d3f54f6b..0000000000 --- a/configs/model/patchcore.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model.patchcore.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [224, 224] - train_batch_size: 32 - test_batch_size: 1 - num_workers: 8 - task: segmentation - transform_config_train: null - transform_config_val: null - create_validation_set: false - -model: - class_path: anomalib.models.Patchcore - init_args: - input_size: - - 224 # Height - - 224 # Width - backbone: wide_resnet50_2 - pre_trained: true - layers: - - layer2 - - layer3 - coreset_sampling_ratio: 0.1 - num_neighbors: 9 - -post_processing: - normalization_method: min_max # - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - manual_pixel_threshold: null - -metrics: - image_metrics: - - F1Score - - AUROC - pixel_metrics: - - F1Score - - AUROC - -visualization: - show_images: False # show images on the screen - save_images: True # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 1 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - resume_from_checkpoint: null - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/configs/model/reverse_distillation.yaml b/configs/model/reverse_distillation.yaml deleted file mode 100644 index cf1abd1f07..0000000000 --- a/configs/model/reverse_distillation.yaml +++ /dev/null @@ -1,110 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model/reverse_distillation.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [256, 256] - train_batch_size: 32 - test_batch_size: 32 - num_workers: 8 - task: segmentation - transform_config_train: null - transform_config_val: null - seed: 0 - create_validation_set: false - -model: - class_path: anomalib.models.ReverseDistillation - init_args: - input_size: [256, 256] - backbone: wide_resnet50_2 - pre_trained: true - layers: - - layer1 - - layer2 - - layer3 - anomaly_map_mode: multiply - # TODO: Learning rate params to be moved to optimizer section. - lr: 0.005 - beta1: 0.5 - beta2: 0.99 - -post_processing: - normalization_method: min_max # - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - manual_pixel_threshold: null - -metrics: - image_metrics: - - F1Score - - AUROC - pixel_metrics: - - F1Score - - AUROC - -visualization: - show_images: False # show images on the screen - save_images: True # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - callbacks: - - class_path: pytorch_lightning.callbacks.EarlyStopping - init_args: - patience: 3 - monitor: pixel_AUROC - mode: max - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 2 - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 200 - max_steps: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - resume_from_checkpoint: null - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 diff --git a/configs/model/stfpm.yaml b/configs/model/stfpm.yaml deleted file mode 100644 index b9b2914643..0000000000 --- a/configs/model/stfpm.yaml +++ /dev/null @@ -1,113 +0,0 @@ -# This is the config file for the new Anomalib CLI. -# To use this, run the following command: -# anomalib fit --config ./config/model/stfpm.yaml -ckpt_path: null -seed_everything: null - -data: - class_path: anomalib.data.MVTec - init_args: - root: ./datasets/MVTec - category: bottle - image_size: [256, 256] - train_batch_size: 32 - test_batch_size: 32 - num_workers: 8 - task: segmentation - transform_config_train: null - transform_config_val: null - seed: 0 - create_validation_set: false - -model: - class_path: anomalib.models.Stfpm - init_args: - input_size: [256, 256] - backbone: resnet18 - pre_trained: true - layers: - - layer1 - - layer2 - - layer3 - -optimizer: - class_path: torch.optim._multi_tensor.sgd.SGD - init_args: - lr: 0.4 - momentum: 0.9 - weight_decay: 0.0001 - -post_processing: - normalization_method: min_max # - threshold_method: adaptive # options: [adaptive, manual] - manual_image_threshold: null - manual_pixel_threshold: null - -metrics: - image_metrics: - - F1Score - - AUROC - pixel_metrics: - - F1Score - - AUROC - -visualization: - show_images: False # show images on the screen - save_images: True # save images to the file system - log_images: False # log images to the available loggers (if any) - mode: full # options: ["full", "simple"] - -trainer: - callbacks: - - class_path: pytorch_lightning.callbacks.EarlyStopping - init_args: - patience: 5 - monitor: pixel_AUROC - mode: max - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: 1 - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 100 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - resume_from_checkpoint: null - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0